From 8f96044fe9f3f973920199346dbcd3de6751dd40 Mon Sep 17 00:00:00 2001 From: "A. Wilcox" Date: Fri, 16 Feb 2024 00:24:20 -0600 Subject: system/easy-kernel: Update to 6.6.1-mc1 --- system/easy-kernel/0000-README | 46 +- system/easy-kernel/0100-linux-5.15.132.patch | 811450 ------------------ system/easy-kernel/0100-linux-6.6.6.patch | 64421 ++ system/easy-kernel/0120-XATTR_USER_PREFIX.patch | 58 +- .../0122-link-security-restrictions.patch | 20 +- system/easy-kernel/0200-x86-compile.patch | 4 +- system/easy-kernel/0202-fix-gcc13-build.patch | 11 - system/easy-kernel/0202-parisc-disable-prctl.patch | 51 + .../0204-amd-deserialised-MSR-access.patch | 134 + .../easy-kernel/0250-expose-per-process-ksm.patch | 460 + system/easy-kernel/0255-ultra-ksm.patch | 6936 - system/easy-kernel/0262-boot-order.patch | 48 + system/easy-kernel/0302-iwlwifi-rfkill-fix.patch | 170 + .../0400-reduce-pageblock-size-nonhugetlb.patch | 44 + system/easy-kernel/0402-mm-optimise-slub.patch | 84 + system/easy-kernel/0404-page-cache-not-found.patch | 48 + system/easy-kernel/0502-gcc9-kcflags.patch | 246 +- .../easy-kernel/0504-update-zstd-to-v1_5_5.patch | 13822 + system/easy-kernel/1000-version.patch | 24 +- system/easy-kernel/APKBUILD | 64 +- system/easy-kernel/config-aarch64 | 2346 +- system/easy-kernel/config-armv7 | 1846 +- system/easy-kernel/config-m68k | 616 +- system/easy-kernel/config-pmmx | 1856 +- system/easy-kernel/config-ppc | 1557 +- system/easy-kernel/config-ppc64 | 1494 +- system/easy-kernel/config-sparc64 | 1509 +- system/easy-kernel/config-x86_64 | 2130 +- system/easy-kernel/no-require-gnu-tar.patch | 11 +- system/easy-kernel/no-require-lilo.patch | 2 +- 30 files changed, 88122 insertions(+), 823386 deletions(-) delete mode 100644 system/easy-kernel/0100-linux-5.15.132.patch create mode 100644 system/easy-kernel/0100-linux-6.6.6.patch delete mode 100644 system/easy-kernel/0202-fix-gcc13-build.patch create mode 100644 system/easy-kernel/0202-parisc-disable-prctl.patch create mode 100644 system/easy-kernel/0204-amd-deserialised-MSR-access.patch create mode 100644 system/easy-kernel/0250-expose-per-process-ksm.patch delete mode 100644 system/easy-kernel/0255-ultra-ksm.patch create mode 100644 system/easy-kernel/0262-boot-order.patch create mode 100644 system/easy-kernel/0302-iwlwifi-rfkill-fix.patch create mode 100644 system/easy-kernel/0400-reduce-pageblock-size-nonhugetlb.patch create mode 100644 system/easy-kernel/0402-mm-optimise-slub.patch create mode 100644 system/easy-kernel/0404-page-cache-not-found.patch create mode 100644 system/easy-kernel/0504-update-zstd-to-v1_5_5.patch diff --git a/system/easy-kernel/0000-README b/system/easy-kernel/0000-README index ae39cebbe..ba2ebb04d 100644 --- a/system/easy-kernel/0000-README +++ b/system/easy-kernel/0000-README @@ -48,34 +48,62 @@ File: 0200-x86-compile.patch From: Laurent Bercot Desc: Fixes builds on x86 that terminate due to overenthusiastic -Werror -File: 0202-fix-gcc13-build.patch -From: Sam James -Desc: Fixes building with gcc-13 due to plugin ABI mismatch, does not affect older gcc builds +File: 0202-parisc-disable-prctl.patch +From: Helge Deller +Desc: Disables prctl on PA-RISC/HPPA due to this platform needing executable stacks. + +File: 0204-amd-deserialised-MSR-access.patch +From: Borislav Petkov +Desc: Reduces performance penalty on AMD64 processors (Opteron, K8, Athlon64, Sempron) by removing unnecessary synchronisation barrier. File: 0210-fix-powerbook-6-5-audio.patch From: Horst Burkhardt -Desc: Enables audio in PowerBook6,4 and PowerBook6,5 iBooks on PowerPC +Desc: Enables audio in PowerBook6,4 and PowerBook6,5 iBooks on PowerPC. -File: 0255-ultraksm.patch -From: https://github.com/sirlucjan/kernel-patches/ -Desc: Ultra Same-Page Merging provides an aggressive KSM implementation to further enhance memory usage over RedHat KSM in mainline +File: 0250-expose-per-process-ksm.patch +From: Oleksandr Natalenko +Desc: Provides a non-prctl interface for per-process KSM to support uksmd. File: 0260-reduce-swappiness.patch From: Horst Burkhardt - originally from -ck patchset by Con Kolivas -Desc: Reduces the proclivity of the kernel to page out memory contents to disk +Desc: Reduces the proclivity of the kernel to page out memory contents to disk. + +File: 0262-boot-order.patch +From: Peter Jung +Desc: Changes graphics bringup to occur after ATA initialisation, saving some time at boot. File: 0300-tmp513-regression-fix.patch From: Mike Pagano Desc: Fix to regression in Kconfig from kernel 5.5.6 to enable tmp513 hardware monitoring module to build. +File: 0302-iwlwifi-rfkill-fix.patch +From: Johannes Berg +Desc: Fix issue where rfkill results in kernel lock-up. + +File: 0400-reduce-pageblock-size-nonhugetlb.patch +From: Sultan Alsawaf +Desc: Reduces latency under memory pressure by reducing pageblock size. + +File: 0402-mm-optimise-slub.patch +From: Jay Patel +Desc: Reduces SLUB memory usage by adjusting operational constants depending on page size. + +File: 0404-page-cache-not-found.patch +From: Yin Fengwei +Desc: Reverts a commit that causes cache misses in readahead. + File: 0500-print-fw-info.patch From: Georgy Yakovlev Desc: Makes kernel print exact firmware file that kernel attempts to load. File: 0502-gcc9-kcflags.patch -From: https://github.com/graysky2/kernel_compiler_patch/ +From: graysky Desc: Enables gcc >=9.1 optimizations for the very latest x86_64 CPUs. +File: 0504-update-zstd-to-v1_5_5.patch +From: Piotr Gorski +Desc: Updates kernel Zstandard compression code to upstream 1.5.5 from Meta. + File: 1000-version.patch From: Horst Burkhardt Desc: Adjust Makefile to represent patchset version, adds cool logo to boot logo options diff --git a/system/easy-kernel/0100-linux-5.15.132.patch b/system/easy-kernel/0100-linux-5.15.132.patch deleted file mode 100644 index 7e12c3aa5..000000000 --- a/system/easy-kernel/0100-linux-5.15.132.patch +++ /dev/null @@ -1,811450 +0,0 @@ -diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc -index 889ed45be4ca6..2d5a5913b5f28 100644 ---- a/Documentation/ABI/testing/configfs-usb-gadget-uvc -+++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc -@@ -51,7 +51,7 @@ Date: Dec 2014 - KernelVersion: 4.0 - Description: Default output terminal descriptors - -- All attributes read only: -+ All attributes read only except bSourceID: - - ============== ============================================= - iTerminal index of string descriptor -diff --git a/Documentation/ABI/testing/sysfs-ata b/Documentation/ABI/testing/sysfs-ata -index 9ab0ef1dd1c72..299e0d1dc1619 100644 ---- a/Documentation/ABI/testing/sysfs-ata -+++ b/Documentation/ABI/testing/sysfs-ata -@@ -107,13 +107,14 @@ Description: - described in ATA8 7.16 and 7.17. Only valid if - the device is not a PM. - -- pio_mode: (RO) Transfer modes supported by the device when -- in PIO mode. Mostly used by PATA device. -+ pio_mode: (RO) PIO transfer mode used by the device. -+ Mostly used by PATA devices. - -- xfer_mode: (RO) Current transfer mode -+ xfer_mode: (RO) Current transfer mode. Mostly used by -+ PATA devices. - -- dma_mode: (RO) Transfer modes supported by the device when -- in DMA mode. Mostly used by PATA device. -+ dma_mode: (RO) DMA transfer mode used by the device. -+ Mostly used by PATA devices. - - class: (RO) Device class. Can be "ata" for disk, - "atapi" for packet device, "pmp" for PM, or -diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio -index 6ad47a67521c7..f41e767e702bd 100644 ---- a/Documentation/ABI/testing/sysfs-bus-iio -+++ b/Documentation/ABI/testing/sysfs-bus-iio -@@ -188,7 +188,7 @@ Description: - Raw capacitance measurement from channel Y. Units after - application of scale and offset are nanofarads. - --What: /sys/.../iio:deviceX/in_capacitanceY-in_capacitanceZ_raw -+What: /sys/.../iio:deviceX/in_capacitanceY-capacitanceZ_raw - KernelVersion: 3.2 - Contact: linux-iio@vger.kernel.org - Description: -diff --git a/Documentation/ABI/testing/sysfs-bus-iio-vf610 b/Documentation/ABI/testing/sysfs-bus-iio-vf610 -index 308a6756d3bf3..491ead8044888 100644 ---- a/Documentation/ABI/testing/sysfs-bus-iio-vf610 -+++ b/Documentation/ABI/testing/sysfs-bus-iio-vf610 -@@ -1,4 +1,4 @@ --What: /sys/bus/iio/devices/iio:deviceX/conversion_mode -+What: /sys/bus/iio/devices/iio:deviceX/in_conversion_mode - KernelVersion: 4.2 - Contact: linux-iio@vger.kernel.org - Description: -diff --git a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor -index d76cd3946434d..e9ef69aef20b1 100644 ---- a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor -+++ b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor -@@ -5,6 +5,9 @@ Contact: linux-mtd@lists.infradead.org - Description: (RO) The JEDEC ID of the SPI NOR flash as reported by the - flash device. - -+ The attribute is not present if the flash doesn't support -+ the "Read JEDEC ID" command (9Fh). This is the case for -+ non-JEDEC compliant flashes. - - What: /sys/bus/spi/devices/.../spi-nor/manufacturer - Date: April 2021 -diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu -index b46ef147616ab..eecbd16033493 100644 ---- a/Documentation/ABI/testing/sysfs-devices-system-cpu -+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu -@@ -511,15 +511,18 @@ Description: information about CPUs heterogeneity. - cpu_capacity: capacity of cpu#. - - What: /sys/devices/system/cpu/vulnerabilities -+ /sys/devices/system/cpu/vulnerabilities/gather_data_sampling -+ /sys/devices/system/cpu/vulnerabilities/itlb_multihit -+ /sys/devices/system/cpu/vulnerabilities/l1tf -+ /sys/devices/system/cpu/vulnerabilities/mds - /sys/devices/system/cpu/vulnerabilities/meltdown -+ /sys/devices/system/cpu/vulnerabilities/mmio_stale_data -+ /sys/devices/system/cpu/vulnerabilities/retbleed -+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass - /sys/devices/system/cpu/vulnerabilities/spectre_v1 - /sys/devices/system/cpu/vulnerabilities/spectre_v2 -- /sys/devices/system/cpu/vulnerabilities/spec_store_bypass -- /sys/devices/system/cpu/vulnerabilities/l1tf -- /sys/devices/system/cpu/vulnerabilities/mds - /sys/devices/system/cpu/vulnerabilities/srbds - /sys/devices/system/cpu/vulnerabilities/tsx_async_abort -- /sys/devices/system/cpu/vulnerabilities/itlb_multihit - Date: January 2018 - Contact: Linux kernel mailing list - Description: Information about CPU vulnerabilities -diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback -index ac2947b989504..3d5de44cbbee9 100644 ---- a/Documentation/ABI/testing/sysfs-driver-xen-blkback -+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback -@@ -42,5 +42,5 @@ KernelVersion: 5.10 - Contact: SeongJae Park - Description: - Whether to enable the persistent grants feature or not. Note -- that this option only takes effect on newly created backends. -+ that this option only takes effect on newly connected backends. - The default is Y (enable). -diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkfront b/Documentation/ABI/testing/sysfs-driver-xen-blkfront -index 28008905615f0..1f7659aa085c2 100644 ---- a/Documentation/ABI/testing/sysfs-driver-xen-blkfront -+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkfront -@@ -15,5 +15,5 @@ KernelVersion: 5.10 - Contact: SeongJae Park - Description: - Whether to enable the persistent grants feature or not. Note -- that this option only takes effect on newly created frontends. -+ that this option only takes effect on newly connected frontends. - The default is Y (enable). -diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs -index f627e705e663b..48d41b6696270 100644 ---- a/Documentation/ABI/testing/sysfs-fs-f2fs -+++ b/Documentation/ABI/testing/sysfs-fs-f2fs -@@ -425,6 +425,7 @@ Description: Show status of f2fs superblock in real time. - 0x800 SBI_QUOTA_SKIP_FLUSH skip flushing quota in current CP - 0x1000 SBI_QUOTA_NEED_REPAIR quota file may be corrupted - 0x2000 SBI_IS_RESIZEFS resizefs is in process -+ 0x4000 SBI_IS_FREEZING freefs is in process - ====== ===================== ================================= - - What: /sys/fs/f2fs//ckpt_thread_ioprio -diff --git a/Documentation/ABI/testing/sysfs-kernel-oops_count b/Documentation/ABI/testing/sysfs-kernel-oops_count -new file mode 100644 -index 0000000000000..156cca9dbc960 ---- /dev/null -+++ b/Documentation/ABI/testing/sysfs-kernel-oops_count -@@ -0,0 +1,6 @@ -+What: /sys/kernel/oops_count -+Date: November 2022 -+KernelVersion: 6.2.0 -+Contact: Linux Kernel Hardening List -+Description: -+ Shows how many times the system has Oopsed since last boot. -diff --git a/Documentation/ABI/testing/sysfs-kernel-warn_count b/Documentation/ABI/testing/sysfs-kernel-warn_count -new file mode 100644 -index 0000000000000..90a029813717d ---- /dev/null -+++ b/Documentation/ABI/testing/sysfs-kernel-warn_count -@@ -0,0 +1,6 @@ -+What: /sys/kernel/warn_count -+Date: November 2022 -+KernelVersion: 6.2.0 -+Contact: Linux Kernel Hardening List -+Description: -+ Shows how many times the system has Warned since last boot. -diff --git a/Documentation/accounting/psi.rst b/Documentation/accounting/psi.rst -index f2b3439edcc2c..5e40b3f437f90 100644 ---- a/Documentation/accounting/psi.rst -+++ b/Documentation/accounting/psi.rst -@@ -37,11 +37,7 @@ Pressure interface - Pressure information for each resource is exported through the - respective file in /proc/pressure/ -- cpu, memory, and io. - --The format for CPU is as such:: -- -- some avg10=0.00 avg60=0.00 avg300=0.00 total=0 -- --and for memory and IO:: -+The format is as such:: - - some avg10=0.00 avg60=0.00 avg300=0.00 total=0 - full avg10=0.00 avg60=0.00 avg300=0.00 total=0 -@@ -58,6 +54,9 @@ situation from a state where some tasks are stalled but the CPU is - still doing productive work. As such, time spent in this subset of the - stall state is tracked separately and exported in the "full" averages. - -+CPU full is undefined at the system level, but has been reported -+since 5.13, so it is set to zero for backward compatibility. -+ - The ratios (in %) are tracked as recent trends over ten, sixty, and - three hundred second windows, which gives insight into short term events - as well as medium and long term trends. The total absolute stall time -@@ -92,7 +91,8 @@ Triggers can be set on more than one psi metric and more than one trigger - for the same psi metric can be specified. However for each trigger a separate - file descriptor is required to be able to poll it separately from others, - therefore for each trigger a separate open() syscall should be made even --when opening the same psi interface file. -+when opening the same psi interface file. Write operations to a file descriptor -+with an already existing psi trigger will fail with EBUSY. - - Monitors activate only when system enters stall state for the monitored - psi metric and deactivates upon exit from the stall state. While system is -diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst -index 41191b5fb69d9..dd913eefbf312 100644 ---- a/Documentation/admin-guide/cgroup-v1/memory.rst -+++ b/Documentation/admin-guide/cgroup-v1/memory.rst -@@ -84,6 +84,8 @@ Brief summary of control files. - memory.swappiness set/show swappiness parameter of vmscan - (See sysctl's vm.swappiness) - memory.move_charge_at_immigrate set/show controls of moving charges -+ This knob is deprecated and shouldn't be -+ used. - memory.oom_control set/show oom controls. - memory.numa_stat show the number of memory usage per numa - node -@@ -723,8 +725,15 @@ NOTE2: - It is recommended to set the soft limit always below the hard limit, - otherwise the hard limit will take precedence. - --8. Move charges at task migration --================================= -+8. Move charges at task migration (DEPRECATED!) -+=============================================== -+ -+THIS IS DEPRECATED! -+ -+It's expensive and unreliable! It's better practice to launch workload -+tasks directly from inside their target cgroup. Use dedicated workload -+cgroups to allow fine-grained policy adjustments without having to -+move physical pages between control domains. - - Users can move charges associated with a task along with task migration, that - is, uncharge task's pages from the old cgroup and charge them to the new cgroup. -diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst -index f170d88202588..3766bf8a1c20e 100644 ---- a/Documentation/admin-guide/cifs/usage.rst -+++ b/Documentation/admin-guide/cifs/usage.rst -@@ -734,10 +734,9 @@ SecurityFlags Flags which control security negotiation and - using weaker password hashes is 0x37037 (lanman, - plaintext, ntlm, ntlmv2, signing allowed). Some - SecurityFlags require the corresponding menuconfig -- options to be enabled (lanman and plaintext require -- CONFIG_CIFS_WEAK_PW_HASH for example). Enabling -- plaintext authentication currently requires also -- enabling lanman authentication in the security flags -+ options to be enabled. Enabling plaintext -+ authentication currently requires also enabling -+ lanman authentication in the security flags - because the cifs module only supports sending - laintext passwords using the older lanman dialect - form of the session setup SMB. (e.g. for authentication -diff --git a/Documentation/admin-guide/device-mapper/dm-init.rst b/Documentation/admin-guide/device-mapper/dm-init.rst -index e5242ff17e9b7..981d6a9076994 100644 ---- a/Documentation/admin-guide/device-mapper/dm-init.rst -+++ b/Documentation/admin-guide/device-mapper/dm-init.rst -@@ -123,3 +123,11 @@ Other examples (per target): - 0 1638400 verity 1 8:1 8:2 4096 4096 204800 1 sha256 - fb1a5a0f00deb908d8b53cb270858975e76cf64105d412ce764225d53b8f3cfd - 51934789604d1b92399c52e7cb149d1b3a1b74bbbcb103b2a0aaacbed5c08584 -+ -+For setups using device-mapper on top of asynchronously probed block -+devices (MMC, USB, ..), it may be necessary to tell dm-init to -+explicitly wait for them to become available before setting up the -+device-mapper tables. This can be done with the "dm-mod.waitfor=" -+module parameter, which takes a list of devices to wait for:: -+ -+ dm-mod.waitfor=[,..,] -diff --git a/Documentation/admin-guide/device-mapper/writecache.rst b/Documentation/admin-guide/device-mapper/writecache.rst -index 10429779a91ab..724e028d1858b 100644 ---- a/Documentation/admin-guide/device-mapper/writecache.rst -+++ b/Documentation/admin-guide/device-mapper/writecache.rst -@@ -78,16 +78,16 @@ Status: - 2. the number of blocks - 3. the number of free blocks - 4. the number of blocks under writeback --5. the number of read requests --6. the number of read requests that hit the cache --7. the number of write requests --8. the number of write requests that hit uncommitted block --9. the number of write requests that hit committed block --10. the number of write requests that bypass the cache --11. the number of write requests that are allocated in the cache -+5. the number of read blocks -+6. the number of read blocks that hit the cache -+7. the number of write blocks -+8. the number of write blocks that hit uncommitted block -+9. the number of write blocks that hit committed block -+10. the number of write blocks that bypass the cache -+11. the number of write blocks that are allocated in the cache - 12. the number of write requests that are blocked on the freelist - 13. the number of flush requests --14. the number of discard requests -+14. the number of discarded blocks - - Messages: - flush -diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt -index 922c23bb4372a..c07dc0ee860e7 100644 ---- a/Documentation/admin-guide/devices.txt -+++ b/Documentation/admin-guide/devices.txt -@@ -2339,13 +2339,7 @@ - disks (see major number 3) except that the limit on - partitions is 31. - -- 162 char Raw block device interface -- 0 = /dev/rawctl Raw I/O control device -- 1 = /dev/raw/raw1 First raw I/O device -- 2 = /dev/raw/raw2 Second raw I/O device -- ... -- max minor number of raw device is set by kernel config -- MAX_RAW_DEVS or raw module parameter 'max_raw_devs' -+ 162 char Used for (now removed) raw block device interface - - 163 char - -diff --git a/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst b/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst -new file mode 100644 -index 0000000000000..ec6e9f5bcf9e8 ---- /dev/null -+++ b/Documentation/admin-guide/hw-vuln/cross-thread-rsb.rst -@@ -0,0 +1,92 @@ -+ -+.. SPDX-License-Identifier: GPL-2.0 -+ -+Cross-Thread Return Address Predictions -+======================================= -+ -+Certain AMD and Hygon processors are subject to a cross-thread return address -+predictions vulnerability. When running in SMT mode and one sibling thread -+transitions out of C0 state, the other sibling thread could use return target -+predictions from the sibling thread that transitioned out of C0. -+ -+The Spectre v2 mitigations protect the Linux kernel, as it fills the return -+address prediction entries with safe targets when context switching to the idle -+thread. However, KVM does allow a VMM to prevent exiting guest mode when -+transitioning out of C0. This could result in a guest-controlled return target -+being consumed by the sibling thread. -+ -+Affected processors -+------------------- -+ -+The following CPUs are vulnerable: -+ -+ - AMD Family 17h processors -+ - Hygon Family 18h processors -+ -+Related CVEs -+------------ -+ -+The following CVE entry is related to this issue: -+ -+ ============== ======================================= -+ CVE-2022-27672 Cross-Thread Return Address Predictions -+ ============== ======================================= -+ -+Problem -+------- -+ -+Affected SMT-capable processors support 1T and 2T modes of execution when SMT -+is enabled. In 2T mode, both threads in a core are executing code. For the -+processor core to enter 1T mode, it is required that one of the threads -+requests to transition out of the C0 state. This can be communicated with the -+HLT instruction or with an MWAIT instruction that requests non-C0. -+When the thread re-enters the C0 state, the processor transitions back -+to 2T mode, assuming the other thread is also still in C0 state. -+ -+In affected processors, the return address predictor (RAP) is partitioned -+depending on the SMT mode. For instance, in 2T mode each thread uses a private -+16-entry RAP, but in 1T mode, the active thread uses a 32-entry RAP. Upon -+transition between 1T/2T mode, the RAP contents are not modified but the RAP -+pointers (which control the next return target to use for predictions) may -+change. This behavior may result in return targets from one SMT thread being -+used by RET predictions in the sibling thread following a 1T/2T switch. In -+particular, a RET instruction executed immediately after a transition to 1T may -+use a return target from the thread that just became idle. In theory, this -+could lead to information disclosure if the return targets used do not come -+from trustworthy code. -+ -+Attack scenarios -+---------------- -+ -+An attack can be mounted on affected processors by performing a series of CALL -+instructions with targeted return locations and then transitioning out of C0 -+state. -+ -+Mitigation mechanism -+-------------------- -+ -+Before entering idle state, the kernel context switches to the idle thread. The -+context switch fills the RAP entries (referred to as the RSB in Linux) with safe -+targets by performing a sequence of CALL instructions. -+ -+Prevent a guest VM from directly putting the processor into an idle state by -+intercepting HLT and MWAIT instructions. -+ -+Both mitigations are required to fully address this issue. -+ -+Mitigation control on the kernel command line -+--------------------------------------------- -+ -+Use existing Spectre v2 mitigations that will fill the RSB on context switch. -+ -+Mitigation control for KVM - module parameter -+--------------------------------------------- -+ -+By default, the KVM hypervisor mitigates this issue by intercepting guest -+attempts to transition out of C0. A VMM can use the KVM_CAP_X86_DISABLE_EXITS -+capability to override those interceptions, but since this is not common, the -+mitigation that covers this path is not enabled by default. -+ -+The mitigation for the KVM_CAP_X86_DISABLE_EXITS capability can be turned on -+using the boolean module parameter mitigate_smt_rsb, e.g.: -+ kvm.mitigate_smt_rsb=1 -diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst -new file mode 100644 -index 0000000000000..264bfa937f7de ---- /dev/null -+++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst -@@ -0,0 +1,109 @@ -+.. SPDX-License-Identifier: GPL-2.0 -+ -+GDS - Gather Data Sampling -+========================== -+ -+Gather Data Sampling is a hardware vulnerability which allows unprivileged -+speculative access to data which was previously stored in vector registers. -+ -+Problem -+------- -+When a gather instruction performs loads from memory, different data elements -+are merged into the destination vector register. However, when a gather -+instruction that is transiently executed encounters a fault, stale data from -+architectural or internal vector registers may get transiently forwarded to the -+destination vector register instead. This will allow a malicious attacker to -+infer stale data using typical side channel techniques like cache timing -+attacks. GDS is a purely sampling-based attack. -+ -+The attacker uses gather instructions to infer the stale vector register data. -+The victim does not need to do anything special other than use the vector -+registers. The victim does not need to use gather instructions to be -+vulnerable. -+ -+Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks -+are possible. -+ -+Attack scenarios -+---------------- -+Without mitigation, GDS can infer stale data across virtually all -+permission boundaries: -+ -+ Non-enclaves can infer SGX enclave data -+ Userspace can infer kernel data -+ Guests can infer data from hosts -+ Guest can infer guest from other guests -+ Users can infer data from other users -+ -+Because of this, it is important to ensure that the mitigation stays enabled in -+lower-privilege contexts like guests and when running outside SGX enclaves. -+ -+The hardware enforces the mitigation for SGX. Likewise, VMMs should ensure -+that guests are not allowed to disable the GDS mitigation. If a host erred and -+allowed this, a guest could theoretically disable GDS mitigation, mount an -+attack, and re-enable it. -+ -+Mitigation mechanism -+-------------------- -+This issue is mitigated in microcode. The microcode defines the following new -+bits: -+ -+ ================================ === ============================ -+ IA32_ARCH_CAPABILITIES[GDS_CTRL] R/O Enumerates GDS vulnerability -+ and mitigation support. -+ IA32_ARCH_CAPABILITIES[GDS_NO] R/O Processor is not vulnerable. -+ IA32_MCU_OPT_CTRL[GDS_MITG_DIS] R/W Disables the mitigation -+ 0 by default. -+ IA32_MCU_OPT_CTRL[GDS_MITG_LOCK] R/W Locks GDS_MITG_DIS=0. Writes -+ to GDS_MITG_DIS are ignored -+ Can't be cleared once set. -+ ================================ === ============================ -+ -+GDS can also be mitigated on systems that don't have updated microcode by -+disabling AVX. This can be done by setting gather_data_sampling="force" or -+"clearcpuid=avx" on the kernel command-line. -+ -+If used, these options will disable AVX use by turning off XSAVE YMM support. -+However, the processor will still enumerate AVX support. Userspace that -+does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM -+support will break. -+ -+Mitigation control on the kernel command line -+--------------------------------------------- -+The mitigation can be disabled by setting "gather_data_sampling=off" or -+"mitigations=off" on the kernel command line. Not specifying either will default -+to the mitigation being enabled. Specifying "gather_data_sampling=force" will -+use the microcode mitigation when available or disable AVX on affected systems -+where the microcode hasn't been updated to include the mitigation. -+ -+GDS System Information -+------------------------ -+The kernel provides vulnerability status information through sysfs. For -+GDS this can be accessed by the following sysfs file: -+ -+/sys/devices/system/cpu/vulnerabilities/gather_data_sampling -+ -+The possible values contained in this file are: -+ -+ ============================== ============================================= -+ Not affected Processor not vulnerable. -+ Vulnerable Processor vulnerable and mitigation disabled. -+ Vulnerable: No microcode Processor vulnerable and microcode is missing -+ mitigation. -+ Mitigation: AVX disabled, -+ no microcode Processor is vulnerable and microcode is missing -+ mitigation. AVX disabled as mitigation. -+ Mitigation: Microcode Processor is vulnerable and mitigation is in -+ effect. -+ Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in -+ effect and cannot be disabled. -+ Unknown: Dependent on -+ hypervisor status Running on a virtual guest processor that is -+ affected but with no way to know if host -+ processor is mitigated or vulnerable. -+ ============================== ============================================= -+ -+GDS Default mitigation -+---------------------- -+The updated microcode will enable the mitigation by default. The kernel's -+default action is to leave the mitigation enabled. -diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst -index 8cbc711cda935..6828102baaa7a 100644 ---- a/Documentation/admin-guide/hw-vuln/index.rst -+++ b/Documentation/admin-guide/hw-vuln/index.rst -@@ -17,3 +17,7 @@ are configurable at compile, boot or run time. - special-register-buffer-data-sampling.rst - core-scheduling.rst - l1d_flush.rst -+ processor_mmio_stale_data.rst -+ cross-thread-rsb.rst -+ gather_data_sampling.rst -+ srso -diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst -new file mode 100644 -index 0000000000000..c98fd11907cc8 ---- /dev/null -+++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst -@@ -0,0 +1,260 @@ -+========================================= -+Processor MMIO Stale Data Vulnerabilities -+========================================= -+ -+Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O -+(MMIO) vulnerabilities that can expose data. The sequences of operations for -+exposing data range from simple to very complex. Because most of the -+vulnerabilities require the attacker to have access to MMIO, many environments -+are not affected. System environments using virtualization where MMIO access is -+provided to untrusted guests may need mitigation. These vulnerabilities are -+not transient execution attacks. However, these vulnerabilities may propagate -+stale data into core fill buffers where the data can subsequently be inferred -+by an unmitigated transient execution attack. Mitigation for these -+vulnerabilities includes a combination of microcode update and software -+changes, depending on the platform and usage model. Some of these mitigations -+are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or -+those used to mitigate Special Register Buffer Data Sampling (SRBDS). -+ -+Data Propagators -+================ -+Propagators are operations that result in stale data being copied or moved from -+one microarchitectural buffer or register to another. Processor MMIO Stale Data -+Vulnerabilities are operations that may result in stale data being directly -+read into an architectural, software-visible state or sampled from a buffer or -+register. -+ -+Fill Buffer Stale Data Propagator (FBSDP) -+----------------------------------------- -+Stale data may propagate from fill buffers (FB) into the non-coherent portion -+of the uncore on some non-coherent writes. Fill buffer propagation by itself -+does not make stale data architecturally visible. Stale data must be propagated -+to a location where it is subject to reading or sampling. -+ -+Sideband Stale Data Propagator (SSDP) -+------------------------------------- -+The sideband stale data propagator (SSDP) is limited to the client (including -+Intel Xeon server E3) uncore implementation. The sideband response buffer is -+shared by all client cores. For non-coherent reads that go to sideband -+destinations, the uncore logic returns 64 bytes of data to the core, including -+both requested data and unrequested stale data, from a transaction buffer and -+the sideband response buffer. As a result, stale data from the sideband -+response and transaction buffers may now reside in a core fill buffer. -+ -+Primary Stale Data Propagator (PSDP) -+------------------------------------ -+The primary stale data propagator (PSDP) is limited to the client (including -+Intel Xeon server E3) uncore implementation. Similar to the sideband response -+buffer, the primary response buffer is shared by all client cores. For some -+processors, MMIO primary reads will return 64 bytes of data to the core fill -+buffer including both requested data and unrequested stale data. This is -+similar to the sideband stale data propagator. -+ -+Vulnerabilities -+=============== -+Device Register Partial Write (DRPW) (CVE-2022-21166) -+----------------------------------------------------- -+Some endpoint MMIO registers incorrectly handle writes that are smaller than -+the register size. Instead of aborting the write or only copying the correct -+subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than -+specified by the write transaction may be written to the register. On -+processors affected by FBSDP, this may expose stale data from the fill buffers -+of the core that created the write transaction. -+ -+Shared Buffers Data Sampling (SBDS) (CVE-2022-21125) -+---------------------------------------------------- -+After propagators may have moved data around the uncore and copied stale data -+into client core fill buffers, processors affected by MFBDS can leak data from -+the fill buffer. It is limited to the client (including Intel Xeon server E3) -+uncore implementation. -+ -+Shared Buffers Data Read (SBDR) (CVE-2022-21123) -+------------------------------------------------ -+It is similar to Shared Buffer Data Sampling (SBDS) except that the data is -+directly read into the architectural software-visible state. It is limited to -+the client (including Intel Xeon server E3) uncore implementation. -+ -+Affected Processors -+=================== -+Not all the CPUs are affected by all the variants. For instance, most -+processors for the server market (excluding Intel Xeon E3 processors) are -+impacted by only Device Register Partial Write (DRPW). -+ -+Below is the list of affected Intel processors [#f1]_: -+ -+ =================== ============ ========= -+ Common name Family_Model Steppings -+ =================== ============ ========= -+ HASWELL_X 06_3FH 2,4 -+ SKYLAKE_L 06_4EH 3 -+ BROADWELL_X 06_4FH All -+ SKYLAKE_X 06_55H 3,4,6,7,11 -+ BROADWELL_D 06_56H 3,4,5 -+ SKYLAKE 06_5EH 3 -+ ICELAKE_X 06_6AH 4,5,6 -+ ICELAKE_D 06_6CH 1 -+ ICELAKE_L 06_7EH 5 -+ ATOM_TREMONT_D 06_86H All -+ LAKEFIELD 06_8AH 1 -+ KABYLAKE_L 06_8EH 9 to 12 -+ ATOM_TREMONT 06_96H 1 -+ ATOM_TREMONT_L 06_9CH 0 -+ KABYLAKE 06_9EH 9 to 13 -+ COMETLAKE 06_A5H 2,3,5 -+ COMETLAKE_L 06_A6H 0,1 -+ ROCKETLAKE 06_A7H 1 -+ =================== ============ ========= -+ -+If a CPU is in the affected processor list, but not affected by a variant, it -+is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later -+section, mitigation largely remains the same for all the variants, i.e. to -+clear the CPU fill buffers via VERW instruction. -+ -+New bits in MSRs -+================ -+Newer processors and microcode update on existing affected processors added new -+bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate -+specific variants of Processor MMIO Stale Data vulnerabilities and mitigation -+capability. -+ -+MSR IA32_ARCH_CAPABILITIES -+-------------------------- -+Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the -+ Shared Buffers Data Read (SBDR) vulnerability or the sideband stale -+ data propagator (SSDP). -+Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer -+ Stale Data Propagator (FBSDP). -+Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data -+ Propagator (PSDP). -+Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer -+ values as part of MD_CLEAR operations. Processors that do not -+ enumerate MDS_NO (meaning they are affected by MDS) but that do -+ enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate -+ FB_CLEAR as part of their MD_CLEAR support. -+Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR -+ IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS -+ bit can be set to cause the VERW instruction to not perform the -+ FB_CLEAR action. Not all processors that support FB_CLEAR will support -+ FB_CLEAR_CTRL. -+ -+MSR IA32_MCU_OPT_CTRL -+--------------------- -+Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR -+action. This may be useful to reduce the performance impact of FB_CLEAR in -+cases where system software deems it warranted (for example, when performance -+is more critical, or the untrusted software has no MMIO access). Note that -+FB_CLEAR_DIS has no impact on enumeration (for example, it does not change -+FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors -+that enumerate FB_CLEAR. -+ -+Mitigation -+========== -+Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the -+same mitigation strategy to force the CPU to clear the affected buffers before -+an attacker can extract the secrets. -+ -+This is achieved by using the otherwise unused and obsolete VERW instruction in -+combination with a microcode update. The microcode clears the affected CPU -+buffers when the VERW instruction is executed. -+ -+Kernel reuses the MDS function to invoke the buffer clearing: -+ -+ mds_clear_cpu_buffers() -+ -+On MDS affected CPUs, the kernel already invokes CPU buffer clear on -+kernel/userspace, hypervisor/guest and C-state (idle) transitions. No -+additional mitigation is needed on such CPUs. -+ -+For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker -+with MMIO capability. Therefore, VERW is not required for kernel/userspace. For -+virtualization case, VERW is only needed at VMENTER for a guest with MMIO -+capability. -+ -+Mitigation points -+----------------- -+Return to user space -+^^^^^^^^^^^^^^^^^^^^ -+Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation -+needed. -+ -+C-State transition -+^^^^^^^^^^^^^^^^^^ -+Control register writes by CPU during C-state transition can propagate data -+from fill buffer to uncore buffers. Execute VERW before C-state transition to -+clear CPU fill buffers. -+ -+Guest entry point -+^^^^^^^^^^^^^^^^^ -+Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise -+execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by -+MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO -+Stale Data vulnerabilities, so there is no need to execute VERW for such guests. -+ -+Mitigation control on the kernel command line -+--------------------------------------------- -+The kernel command line allows to control the Processor MMIO Stale Data -+mitigations at boot time with the option "mmio_stale_data=". The valid -+arguments for this option are: -+ -+ ========== ================================================================= -+ full If the CPU is vulnerable, enable mitigation; CPU buffer clearing -+ on exit to userspace and when entering a VM. Idle transitions are -+ protected as well. It does not automatically disable SMT. -+ full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the -+ complete mitigation. -+ off Disables mitigation completely. -+ ========== ================================================================= -+ -+If the CPU is affected and mmio_stale_data=off is not supplied on the kernel -+command line, then the kernel selects the appropriate mitigation. -+ -+Mitigation status information -+----------------------------- -+The Linux kernel provides a sysfs interface to enumerate the current -+vulnerability status of the system: whether the system is vulnerable, and -+which mitigations are active. The relevant sysfs file is: -+ -+ /sys/devices/system/cpu/vulnerabilities/mmio_stale_data -+ -+The possible values in this file are: -+ -+ .. list-table:: -+ -+ * - 'Not affected' -+ - The processor is not vulnerable -+ * - 'Vulnerable' -+ - The processor is vulnerable, but no mitigation enabled -+ * - 'Vulnerable: Clear CPU buffers attempted, no microcode' -+ - The processor is vulnerable, but microcode is not updated. The -+ mitigation is enabled on a best effort basis. -+ * - 'Mitigation: Clear CPU buffers' -+ - The processor is vulnerable and the CPU buffer clearing mitigation is -+ enabled. -+ * - 'Unknown: No mitigations' -+ - The processor vulnerability status is unknown because it is -+ out of Servicing period. Mitigation is not attempted. -+ -+Definitions: -+------------ -+ -+Servicing period: The process of providing functional and security updates to -+Intel processors or platforms, utilizing the Intel Platform Update (IPU) -+process or other similar mechanisms. -+ -+End of Servicing Updates (ESU): ESU is the date at which Intel will no -+longer provide Servicing, such as through IPU or other similar update -+processes. ESU dates will typically be aligned to end of quarter. -+ -+If the processor is vulnerable then the following information is appended to -+the above information: -+ -+ ======================== =========================================== -+ 'SMT vulnerable' SMT is enabled -+ 'SMT disabled' SMT is disabled -+ 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown -+ ======================== =========================================== -+ -+References -+---------- -+.. [#f1] Affected Processors -+ https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html -diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst -index e05e581af5cfe..0fba3758d0da8 100644 ---- a/Documentation/admin-guide/hw-vuln/spectre.rst -+++ b/Documentation/admin-guide/hw-vuln/spectre.rst -@@ -60,8 +60,8 @@ privileged data touched during the speculative execution. - Spectre variant 1 attacks take advantage of speculative execution of - conditional branches, while Spectre variant 2 attacks use speculative - execution of indirect branches to leak privileged memory. --See :ref:`[1] ` :ref:`[5] ` :ref:`[7] ` --:ref:`[10] ` :ref:`[11] `. -+See :ref:`[1] ` :ref:`[5] ` :ref:`[6] ` -+:ref:`[7] ` :ref:`[10] ` :ref:`[11] `. - - Spectre variant 1 (Bounds Check Bypass) - --------------------------------------- -@@ -131,6 +131,19 @@ steer its indirect branch speculations to gadget code, and measure the - speculative execution's side effects left in level 1 cache to infer the - victim's data. - -+Yet another variant 2 attack vector is for the attacker to poison the -+Branch History Buffer (BHB) to speculatively steer an indirect branch -+to a specific Branch Target Buffer (BTB) entry, even if the entry isn't -+associated with the source address of the indirect branch. Specifically, -+the BHB might be shared across privilege levels even in the presence of -+Enhanced IBRS. -+ -+Currently the only known real-world BHB attack vector is via -+unprivileged eBPF. Therefore, it's highly recommended to not enable -+unprivileged eBPF, especially when eIBRS is used (without retpolines). -+For a full mitigation against BHB attacks, it's recommended to use -+retpolines (or eIBRS combined with retpolines). -+ - Attack scenarios - ---------------- - -@@ -364,13 +377,15 @@ The possible values in this file are: - - - Kernel status: - -- ==================================== ================================= -- 'Not affected' The processor is not vulnerable -- 'Vulnerable' Vulnerable, no mitigation -- 'Mitigation: Full generic retpoline' Software-focused mitigation -- 'Mitigation: Full AMD retpoline' AMD-specific software mitigation -- 'Mitigation: Enhanced IBRS' Hardware-focused mitigation -- ==================================== ================================= -+ ======================================== ================================= -+ 'Not affected' The processor is not vulnerable -+ 'Mitigation: None' Vulnerable, no mitigation -+ 'Mitigation: Retpolines' Use Retpoline thunks -+ 'Mitigation: LFENCE' Use LFENCE instructions -+ 'Mitigation: Enhanced IBRS' Hardware-focused mitigation -+ 'Mitigation: Enhanced IBRS + Retpolines' Hardware-focused + Retpolines -+ 'Mitigation: Enhanced IBRS + LFENCE' Hardware-focused + LFENCE -+ ======================================== ================================= - - - Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is - used to protect against Spectre variant 2 attacks when calling firmware (x86 only). -@@ -407,6 +422,14 @@ The possible values in this file are: - 'RSB filling' Protection of RSB on context switch enabled - ============= =========================================== - -+ - EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status: -+ -+ =========================== ======================================================= -+ 'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled -+ 'PBRSB-eIBRS: Vulnerable' CPU is vulnerable -+ 'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB -+ =========================== ======================================================= -+ - Full mitigation might require a microcode update from the CPU - vendor. When the necessary microcode is not available, the kernel will - report vulnerability. -@@ -456,8 +479,16 @@ Spectre variant 2 - On Intel Skylake-era systems the mitigation covers most, but not all, - cases. See :ref:`[3] ` for more details. - -- On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced -- IBRS on x86), retpoline is automatically disabled at run time. -+ On CPUs with hardware mitigation for Spectre variant 2 (e.g. IBRS -+ or enhanced IBRS on x86), retpoline is automatically disabled at run time. -+ -+ Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at -+ boot, by setting the IBRS bit, and they're automatically protected against -+ Spectre v2 variant attacks, including cross-thread branch target injections -+ on SMT systems (STIBP). In other words, eIBRS enables STIBP too. -+ -+ Legacy IBRS systems clear the IBRS bit on exit to userspace and -+ therefore explicitly enable STIBP for that - - The retpoline mitigation is turned on by default on vulnerable - CPUs. It can be forced on or off by the administrator -@@ -468,7 +499,7 @@ Spectre variant 2 - before invoking any firmware code to prevent Spectre variant 2 exploits - using the firmware. - -- Using kernel address space randomization (CONFIG_RANDOMIZE_SLAB=y -+ Using kernel address space randomization (CONFIG_RANDOMIZE_BASE=y - and CONFIG_SLAB_FREELIST_RANDOM=y in the kernel configuration) makes - attacks on the kernel generally more difficult. - -@@ -481,9 +512,12 @@ Spectre variant 2 - For Spectre variant 2 mitigation, individual user programs - can be compiled with return trampolines for indirect branches. - This protects them from consuming poisoned entries in the branch -- target buffer left by malicious software. Alternatively, the -- programs can disable their indirect branch speculation via prctl() -- (See :ref:`Documentation/userspace-api/spec_ctrl.rst `). -+ target buffer left by malicious software. -+ -+ On legacy IBRS systems, at return to userspace, implicit STIBP is disabled -+ because the kernel clears the IBRS bit. In this case, the userspace programs -+ can disable indirect branch speculation via prctl() (See -+ :ref:`Documentation/userspace-api/spec_ctrl.rst `). - On x86, this will turn on STIBP to guard against attacks from the - sibling thread when the user program is running, and use IBPB to - flush the branch target buffer when switching to/from the program. -@@ -584,12 +618,13 @@ kernel command line. - - Specific mitigations can also be selected manually: - -- retpoline -- replace indirect branches -- retpoline,generic -- google's original retpoline -- retpoline,amd -- AMD-specific minimal thunk -+ retpoline auto pick between generic,lfence -+ retpoline,generic Retpolines -+ retpoline,lfence LFENCE; indirect branch -+ retpoline,amd alias for retpoline,lfence -+ eibrs enhanced IBRS -+ eibrs,retpoline enhanced IBRS + Retpolines -+ eibrs,lfence enhanced IBRS + LFENCE - - Not specifying this option is equivalent to - spectre_v2=auto. -@@ -730,7 +765,7 @@ AMD white papers: - - .. _spec_ref6: - --[6] `Software techniques for managing speculation on AMD processors `_. -+[6] `Software techniques for managing speculation on AMD processors `_. - - ARM white papers: - -diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst -new file mode 100644 -index 0000000000000..f79cb11b080f6 ---- /dev/null -+++ b/Documentation/admin-guide/hw-vuln/srso.rst -@@ -0,0 +1,133 @@ -+.. SPDX-License-Identifier: GPL-2.0 -+ -+Speculative Return Stack Overflow (SRSO) -+======================================== -+ -+This is a mitigation for the speculative return stack overflow (SRSO) -+vulnerability found on AMD processors. The mechanism is by now the well -+known scenario of poisoning CPU functional units - the Branch Target -+Buffer (BTB) and Return Address Predictor (RAP) in this case - and then -+tricking the elevated privilege domain (the kernel) into leaking -+sensitive data. -+ -+AMD CPUs predict RET instructions using a Return Address Predictor (aka -+Return Address Stack/Return Stack Buffer). In some cases, a non-architectural -+CALL instruction (i.e., an instruction predicted to be a CALL but is -+not actually a CALL) can create an entry in the RAP which may be used -+to predict the target of a subsequent RET instruction. -+ -+The specific circumstances that lead to this varies by microarchitecture -+but the concern is that an attacker can mis-train the CPU BTB to predict -+non-architectural CALL instructions in kernel space and use this to -+control the speculative target of a subsequent kernel RET, potentially -+leading to information disclosure via a speculative side-channel. -+ -+The issue is tracked under CVE-2023-20569. -+ -+Affected processors -+------------------- -+ -+AMD Zen, generations 1-4. That is, all families 0x17 and 0x19. Older -+processors have not been investigated. -+ -+System information and options -+------------------------------ -+ -+First of all, it is required that the latest microcode be loaded for -+mitigations to be effective. -+ -+The sysfs file showing SRSO mitigation status is: -+ -+ /sys/devices/system/cpu/vulnerabilities/spec_rstack_overflow -+ -+The possible values in this file are: -+ -+ - 'Not affected' The processor is not vulnerable -+ -+ - 'Vulnerable: no microcode' The processor is vulnerable, no -+ microcode extending IBPB functionality -+ to address the vulnerability has been -+ applied. -+ -+ - 'Mitigation: microcode' Extended IBPB functionality microcode -+ patch has been applied. It does not -+ address User->Kernel and Guest->Host -+ transitions protection but it does -+ address User->User and VM->VM attack -+ vectors. -+ -+ (spec_rstack_overflow=microcode) -+ -+ - 'Mitigation: safe RET' Software-only mitigation. It complements -+ the extended IBPB microcode patch -+ functionality by addressing User->Kernel -+ and Guest->Host transitions protection. -+ -+ Selected by default or by -+ spec_rstack_overflow=safe-ret -+ -+ - 'Mitigation: IBPB' Similar protection as "safe RET" above -+ but employs an IBPB barrier on privilege -+ domain crossings (User->Kernel, -+ Guest->Host). -+ -+ (spec_rstack_overflow=ibpb) -+ -+ - 'Mitigation: IBPB on VMEXIT' Mitigation addressing the cloud provider -+ scenario - the Guest->Host transitions -+ only. -+ -+ (spec_rstack_overflow=ibpb-vmexit) -+ -+In order to exploit vulnerability, an attacker needs to: -+ -+ - gain local access on the machine -+ -+ - break kASLR -+ -+ - find gadgets in the running kernel in order to use them in the exploit -+ -+ - potentially create and pin an additional workload on the sibling -+ thread, depending on the microarchitecture (not necessary on fam 0x19) -+ -+ - run the exploit -+ -+Considering the performance implications of each mitigation type, the -+default one is 'Mitigation: safe RET' which should take care of most -+attack vectors, including the local User->Kernel one. -+ -+As always, the user is advised to keep her/his system up-to-date by -+applying software updates regularly. -+ -+The default setting will be reevaluated when needed and especially when -+new attack vectors appear. -+ -+As one can surmise, 'Mitigation: safe RET' does come at the cost of some -+performance depending on the workload. If one trusts her/his userspace -+and does not want to suffer the performance impact, one can always -+disable the mitigation with spec_rstack_overflow=off. -+ -+Similarly, 'Mitigation: IBPB' is another full mitigation type employing -+an indrect branch prediction barrier after having applied the required -+microcode patch for one's system. This mitigation comes also at -+a performance cost. -+ -+Mitigation: safe RET -+-------------------- -+ -+The mitigation works by ensuring all RET instructions speculate to -+a controlled location, similar to how speculation is controlled in the -+retpoline sequence. To accomplish this, the __x86_return_thunk forces -+the CPU to mispredict every function return using a 'safe return' -+sequence. -+ -+To ensure the safety of this mitigation, the kernel must ensure that the -+safe return sequence is itself free from attacker interference. In Zen3 -+and Zen4, this is accomplished by creating a BTB alias between the -+untraining function srso_alias_untrain_ret() and the safe return -+function srso_alias_safe_ret() which results in evicting a potentially -+poisoned BTB entry and using that safe one for all function returns. -+ -+In older Zen1 and Zen2, this is accomplished using a reinterpretation -+technique similar to Retbleed one: srso_untrain_ret() and -+srso_safe_ret(). -diff --git a/Documentation/admin-guide/kdump/gdbmacros.txt b/Documentation/admin-guide/kdump/gdbmacros.txt -index 82aecdcae8a6c..030de95e3e6b2 100644 ---- a/Documentation/admin-guide/kdump/gdbmacros.txt -+++ b/Documentation/admin-guide/kdump/gdbmacros.txt -@@ -312,10 +312,10 @@ define dmesg - set var $prev_flags = $info->flags - end - -- set var $id = ($id + 1) & $id_mask - if ($id == $end_id) - loop_break - end -+ set var $id = ($id + 1) & $id_mask - end - end - document dmesg -diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 43dc35fe5bc03..2cd4d66ab64c2 100644 ---- a/Documentation/admin-guide/kernel-parameters.txt -+++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -916,10 +916,6 @@ - - debugpat [X86] Enable PAT debugging - -- decnet.addr= [HW,NET] -- Format: [,] -- See also Documentation/networking/decnet.rst. -- - default_hugepagesz= - [HW] The size of the default HugeTLB page. This is - the size represented by the legacy /proc/ hugepages -@@ -1505,6 +1501,26 @@ - Format: off | on - default: on - -+ gather_data_sampling= -+ [X86,INTEL] Control the Gather Data Sampling (GDS) -+ mitigation. -+ -+ Gather Data Sampling is a hardware vulnerability which -+ allows unprivileged speculative access to data which was -+ previously stored in vector registers. -+ -+ This issue is mitigated by default in updated microcode. -+ The mitigation may have a performance impact but can be -+ disabled. On systems without the microcode mitigation -+ disabling AVX serves as a mitigation. -+ -+ force: Disable AVX to mitigate systems without -+ microcode mitigation. No effect if the microcode -+ mitigation is present. Known to cause crashes in -+ userspace with buggy AVX enumeration. -+ -+ off: Disable GDS mitigation. -+ - gcov_persist= [GCOV] When non-zero (default), profiling data for - kernel modules is saved and remains accessible via - debugfs, even when the module is unloaded/reloaded. -@@ -1690,6 +1706,8 @@ - architectures force reset to be always executed - i8042.unlock [HW] Unlock (ignore) the keylock - i8042.kbdreset [HW] Reset device connected to KBD port -+ i8042.probe_defer -+ [HW] Allow deferred probing upon i8042 probe errors - - i810= [HW,DRM] - -@@ -2198,24 +2216,57 @@ - - ivrs_ioapic [HW,X86-64] - Provide an override to the IOAPIC-ID<->DEVICE-ID -- mapping provided in the IVRS ACPI table. For -- example, to map IOAPIC-ID decimal 10 to -- PCI device 00:14.0 write the parameter as: -+ mapping provided in the IVRS ACPI table. -+ By default, PCI segment is 0, and can be omitted. -+ -+ For example, to map IOAPIC-ID decimal 10 to -+ PCI segment 0x1 and PCI device 00:14.0, -+ write the parameter as: -+ ivrs_ioapic=10@0001:00:14.0 -+ -+ Deprecated formats: -+ * To map IOAPIC-ID decimal 10 to PCI device 00:14.0 -+ write the parameter as: - ivrs_ioapic[10]=00:14.0 -+ * To map IOAPIC-ID decimal 10 to PCI segment 0x1 and -+ PCI device 00:14.0 write the parameter as: -+ ivrs_ioapic[10]=0001:00:14.0 - - ivrs_hpet [HW,X86-64] - Provide an override to the HPET-ID<->DEVICE-ID -- mapping provided in the IVRS ACPI table. For -- example, to map HPET-ID decimal 0 to -- PCI device 00:14.0 write the parameter as: -+ mapping provided in the IVRS ACPI table. -+ By default, PCI segment is 0, and can be omitted. -+ -+ For example, to map HPET-ID decimal 10 to -+ PCI segment 0x1 and PCI device 00:14.0, -+ write the parameter as: -+ ivrs_hpet=10@0001:00:14.0 -+ -+ Deprecated formats: -+ * To map HPET-ID decimal 0 to PCI device 00:14.0 -+ write the parameter as: - ivrs_hpet[0]=00:14.0 -+ * To map HPET-ID decimal 10 to PCI segment 0x1 and -+ PCI device 00:14.0 write the parameter as: -+ ivrs_ioapic[10]=0001:00:14.0 - - ivrs_acpihid [HW,X86-64] - Provide an override to the ACPI-HID:UID<->DEVICE-ID -- mapping provided in the IVRS ACPI table. For -- example, to map UART-HID:UID AMD0020:0 to -- PCI device 00:14.5 write the parameter as: -+ mapping provided in the IVRS ACPI table. -+ By default, PCI segment is 0, and can be omitted. -+ -+ For example, to map UART-HID:UID AMD0020:0 to -+ PCI segment 0x1 and PCI device ID 00:14.5, -+ write the parameter as: -+ ivrs_acpihid=AMD0020:0@0001:00:14.5 -+ -+ Deprecated formats: -+ * To map UART-HID:UID AMD0020:0 to PCI segment is 0, -+ PCI device ID 00:14.5, write the parameter as: - ivrs_acpihid[00:14.5]=AMD0020:0 -+ * To map UART-HID:UID AMD0020:0 to PCI segment 0x1 and -+ PCI device ID 00:14.5, write the parameter as: -+ ivrs_acpihid[0001:00:14.5]=AMD0020:0 - - js= [HW,JOY] Analog joystick - See Documentation/input/joydev/joystick.rst. -@@ -2403,8 +2454,12 @@ - Default is 1 (enabled) - - kvm-intel.emulate_invalid_guest_state= -- [KVM,Intel] Enable emulation of invalid guest states -- Default is 0 (disabled) -+ [KVM,Intel] Disable emulation of invalid guest state. -+ Ignored if kvm-intel.enable_unrestricted_guest=1, as -+ guest state is never invalid for unrestricted guests. -+ This param doesn't apply to nested guests (L2), as KVM -+ never emulates invalid L2 guest state. -+ Default is 1 (enabled) - - kvm-intel.flexpriority= - [KVM,Intel] Disable FlexPriority feature (TPR shadow). -@@ -2999,20 +3054,23 @@ - Disable all optional CPU mitigations. This - improves system performance, but it may also - expose users to several CPU vulnerabilities. -- Equivalent to: nopti [X86,PPC] -+ Equivalent to: gather_data_sampling=off [X86] - kpti=0 [ARM64] -- nospectre_v1 [X86,PPC] -+ kvm.nx_huge_pages=off [X86] -+ l1tf=off [X86] -+ mds=off [X86] -+ mmio_stale_data=off [X86] -+ no_entry_flush [PPC] -+ no_uaccess_flush [PPC] - nobp=0 [S390] -+ nopti [X86,PPC] -+ nospectre_v1 [X86,PPC] - nospectre_v2 [X86,PPC,S390,ARM64] -- spectre_v2_user=off [X86] -+ retbleed=off [X86] - spec_store_bypass_disable=off [X86,PPC] -+ spectre_v2_user=off [X86] - ssbd=force-off [ARM64] -- l1tf=off [X86] -- mds=off [X86] - tsx_async_abort=off [X86] -- kvm.nx_huge_pages=off [X86] -- no_entry_flush [PPC] -- no_uaccess_flush [PPC] - - Exceptions: - This does not have any effect on -@@ -3034,6 +3092,8 @@ - Equivalent to: l1tf=flush,nosmt [X86] - mds=full,nosmt [X86] - tsx_async_abort=full,nosmt [X86] -+ mmio_stale_data=full,nosmt [X86] -+ retbleed=auto,nosmt [X86] - - mminit_loglevel= - [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this -@@ -3043,6 +3103,40 @@ - log everything. Information is printed at KERN_DEBUG - so loglevel=8 may also need to be specified. - -+ mmio_stale_data= -+ [X86,INTEL] Control mitigation for the Processor -+ MMIO Stale Data vulnerabilities. -+ -+ Processor MMIO Stale Data is a class of -+ vulnerabilities that may expose data after an MMIO -+ operation. Exposed data could originate or end in -+ the same CPU buffers as affected by MDS and TAA. -+ Therefore, similar to MDS and TAA, the mitigation -+ is to clear the affected CPU buffers. -+ -+ This parameter controls the mitigation. The -+ options are: -+ -+ full - Enable mitigation on vulnerable CPUs -+ -+ full,nosmt - Enable mitigation and disable SMT on -+ vulnerable CPUs. -+ -+ off - Unconditionally disable mitigation -+ -+ On MDS or TAA affected machines, -+ mmio_stale_data=off can be prevented by an active -+ MDS or TAA mitigation as these vulnerabilities are -+ mitigated with the same mechanism so in order to -+ disable this mitigation, you need to specify -+ mds=off and tsx_async_abort=off too. -+ -+ Not specifying this option is equivalent to -+ mmio_stale_data=full. -+ -+ For details see: -+ Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst -+ - module.sig_enforce - [KNL] When CONFIG_MODULE_SIG is set, this means that - modules without (valid) signatures will fail to load. -@@ -3446,8 +3540,7 @@ - difficult since unequal pointers can no longer be - compared. However, if this command-line option is - specified, then all normal pointers will have their true -- value printed. Pointers printed via %pK may still be -- hashed. This option should only be specified when -+ value printed. This option should only be specified when - debugging the kernel. Please do not use on production - kernels. - -@@ -4302,6 +4395,12 @@ - fully seed the kernel's CRNG. Default is controlled - by CONFIG_RANDOM_TRUST_CPU. - -+ random.trust_bootloader={on,off} -+ [KNL] Enable or disable trusting the use of a -+ seed passed by the bootloader (if available) to -+ fully seed the kernel's CRNG. Default is controlled -+ by CONFIG_RANDOM_TRUST_BOOTLOADER. -+ - randomize_kstack_offset= - [KNL] Enable or disable kernel stack offset - randomization, which provides roughly 5 bits of -@@ -4921,6 +5020,43 @@ - - retain_initrd [RAM] Keep initrd memory after extraction - -+ retbleed= [X86] Control mitigation of RETBleed (Arbitrary -+ Speculative Code Execution with Return Instructions) -+ vulnerability. -+ -+ AMD-based UNRET and IBPB mitigations alone do not stop -+ sibling threads from influencing the predictions of other -+ sibling threads. For that reason, STIBP is used on pro- -+ cessors that support it, and mitigate SMT on processors -+ that don't. -+ -+ off - no mitigation -+ auto - automatically select a migitation -+ auto,nosmt - automatically select a mitigation, -+ disabling SMT if necessary for -+ the full mitigation (only on Zen1 -+ and older without STIBP). -+ ibpb - On AMD, mitigate short speculation -+ windows on basic block boundaries too. -+ Safe, highest perf impact. It also -+ enables STIBP if present. Not suitable -+ on Intel. -+ ibpb,nosmt - Like "ibpb" above but will disable SMT -+ when STIBP is not available. This is -+ the alternative for systems which do not -+ have STIBP. -+ unret - Force enable untrained return thunks, -+ only effective on AMD f15h-f17h based -+ systems. -+ unret,nosmt - Like unret, but will disable SMT when STIBP -+ is not available. This is the alternative for -+ systems which do not have STIBP. -+ -+ Selecting 'auto' will choose a mitigation method at run -+ time according to the CPU. -+ -+ Not specifying this option is equivalent to retbleed=auto. -+ - rfkill.default_state= - 0 "airplane mode". All wifi, bluetooth, wimax, gps, fm, - etc. communication is blocked by default. -@@ -5261,8 +5397,13 @@ - Specific mitigations can also be selected manually: - - retpoline - replace indirect branches -- retpoline,generic - google's original retpoline -- retpoline,amd - AMD-specific minimal thunk -+ retpoline,generic - Retpolines -+ retpoline,lfence - LFENCE; indirect branch -+ retpoline,amd - alias for retpoline,lfence -+ eibrs - enhanced IBRS -+ eibrs,retpoline - enhanced IBRS + Retpolines -+ eibrs,lfence - enhanced IBRS + LFENCE -+ ibrs - use IBRS to protect kernel - - Not specifying this option is equivalent to - spectre_v2=auto. -@@ -5309,6 +5450,17 @@ - Not specifying this option is equivalent to - spectre_v2_user=auto. - -+ spec_rstack_overflow= -+ [X86] Control RAS overflow mitigation on AMD Zen CPUs -+ -+ off - Disable mitigation -+ microcode - Enable microcode mitigation only -+ safe-ret - Enable sw-only safe RET mitigation (default) -+ ibpb - Enable mitigation by issuing IBPB on -+ kernel entry -+ ibpb-vmexit - Issue IBPB only on VMEXIT -+ (cloud-specific mitigation) -+ - spec_store_bypass_disable= - [HW] Control Speculative Store Bypass (SSB) Disable mitigation - (Speculative Store Bypass vulnerability) -@@ -5618,10 +5770,6 @@ - -1: disable all critical trip points in all thermal zones - : override all critical trip points - -- thermal.nocrt= [HW,ACPI] -- Set to disable actions on ACPI thermal zone -- critical and hot trip points. -- - thermal.off= [HW,ACPI] - 1: disable ACPI thermal control - -@@ -6349,6 +6497,13 @@ - improve timer resolution at the expense of processing - more timer interrupts. - -+ xen.balloon_boot_timeout= [XEN] -+ The time (in seconds) to wait before giving up to boot -+ in case initial ballooning fails to free enough memory. -+ Applies only when running as HVM or PVH guest and -+ started with less memory configured than allowed at -+ max. Default is 180. -+ - xen.event_eoi_delay= [XEN] - How long to delay EOI handling in case of event - storms (jiffies). Default is 10. -diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst -index fb578fbbb76ca..49857ce1cd03e 100644 ---- a/Documentation/admin-guide/mm/pagemap.rst -+++ b/Documentation/admin-guide/mm/pagemap.rst -@@ -23,7 +23,7 @@ There are four components to pagemap: - * Bit 56 page exclusively mapped (since 4.2) - * Bit 57 pte is uffd-wp write-protected (since 5.13) (see - :ref:`Documentation/admin-guide/mm/userfaultfd.rst `) -- * Bits 57-60 zero -+ * Bits 58-60 zero - * Bit 61 page is file-page or shared-anon (since 3.5) - * Bit 62 page swapped - * Bit 63 page present -diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst -index aec2cd2aaea73..19754beb5a4e6 100644 ---- a/Documentation/admin-guide/pm/cpuidle.rst -+++ b/Documentation/admin-guide/pm/cpuidle.rst -@@ -612,8 +612,8 @@ the ``menu`` governor to be used on the systems that use the ``ladder`` governor - by default this way, for example. - - The other kernel command line parameters controlling CPU idle time management --described below are only relevant for the *x86* architecture and some of --them affect Intel processors only. -+described below are only relevant for the *x86* architecture and references -+to ``intel_idle`` affect Intel processors only. - - The *x86* architecture support code recognizes three kernel command line - options related to CPU idle time management: ``idle=poll``, ``idle=halt``, -@@ -635,10 +635,13 @@ idle, so it very well may hurt single-thread computations performance as well as - energy-efficiency. Thus using it for performance reasons may not be a good idea - at all.] - --The ``idle=nomwait`` option disables the ``intel_idle`` driver and causes --``acpi_idle`` to be used (as long as all of the information needed by it is --there in the system's ACPI tables), but it is not allowed to use the --``MWAIT`` instruction of the CPUs to ask the hardware to enter idle states. -+The ``idle=nomwait`` option prevents the use of ``MWAIT`` instruction of -+the CPU to enter idle states. When this option is used, the ``acpi_idle`` -+driver will use the ``HLT`` instruction instead of ``MWAIT``. On systems -+running Intel processors, this option disables the ``intel_idle`` driver -+and forces the use of the ``acpi_idle`` driver instead. Note that in either -+case, ``acpi_idle`` driver will function only if all the information needed -+by it is in the system's ACPI tables. - - In addition to the architecture-level kernel command line options affecting CPU - idle time management, there are parameters affecting individual ``CPUIdle`` -diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst -index 82e29837d5898..5a6993795bd26 100644 ---- a/Documentation/admin-guide/security-bugs.rst -+++ b/Documentation/admin-guide/security-bugs.rst -@@ -63,31 +63,28 @@ information submitted to the security list and any followup discussions - of the report are treated confidentially even after the embargo has been - lifted, in perpetuity. - --Coordination -------------- -- --Fixes for sensitive bugs, such as those that might lead to privilege --escalations, may need to be coordinated with the private -- mailing list so that distribution vendors --are well prepared to issue a fixed kernel upon public disclosure of the --upstream fix. Distros will need some time to test the proposed patch and --will generally request at least a few days of embargo, and vendor update --publication prefers to happen Tuesday through Thursday. When appropriate, --the security team can assist with this coordination, or the reporter can --include linux-distros from the start. In this case, remember to prefix --the email Subject line with "[vs]" as described in the linux-distros wiki: -- -+Coordination with other groups -+------------------------------ -+ -+The kernel security team strongly recommends that reporters of potential -+security issues NEVER contact the "linux-distros" mailing list until -+AFTER discussing it with the kernel security team. Do not Cc: both -+lists at once. You may contact the linux-distros mailing list after a -+fix has been agreed on and you fully understand the requirements that -+doing so will impose on you and the kernel community. -+ -+The different lists have different goals and the linux-distros rules do -+not contribute to actually fixing any potential security problems. - - CVE assignment - -------------- - --The security team does not normally assign CVEs, nor do we require them --for reports or fixes, as this can needlessly complicate the process and --may delay the bug handling. If a reporter wishes to have a CVE identifier --assigned ahead of public disclosure, they will need to contact the private --linux-distros list, described above. When such a CVE identifier is known --before a patch is provided, it is desirable to mention it in the commit --message if the reporter agrees. -+The security team does not assign CVEs, nor do we require them for -+reports or fixes, as this can needlessly complicate the process and may -+delay the bug handling. If a reporter wishes to have a CVE identifier -+assigned, they should find one by themselves, for example by contacting -+MITRE directly. However under no circumstances will a patch inclusion -+be delayed to wait for a CVE identifier to arrive. - - Non-disclosure agreements - ------------------------- -diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst -index 426162009ce99..48b91c485c993 100644 ---- a/Documentation/admin-guide/sysctl/kernel.rst -+++ b/Documentation/admin-guide/sysctl/kernel.rst -@@ -671,6 +671,15 @@ This is the default behavior. - an oops event is detected. - - -+oops_limit -+========== -+ -+Number of kernel oopses after which the kernel should panic when -+``panic_on_oops`` is not set. Setting this to 0 disables checking -+the count. Setting this to 1 has the same effect as setting -+``panic_on_oops=1``. The default value is 10000. -+ -+ - osrelease, ostype & version - =========================== - -@@ -795,6 +804,7 @@ bit 1 print system memory info - bit 2 print timer info - bit 3 print locks info if ``CONFIG_LOCKDEP`` is on - bit 4 print ftrace buffer -+bit 5 print all printk messages in buffer - ===== ============================================ - - So for example to print tasks and memory info on panic, user can:: -@@ -1013,28 +1023,22 @@ This is a directory, with the following entries: - * ``boot_id``: a UUID generated the first time this is retrieved, and - unvarying after that; - -+* ``uuid``: a UUID generated every time this is retrieved (this can -+ thus be used to generate UUIDs at will); -+ - * ``entropy_avail``: the pool's entropy count, in bits; - - * ``poolsize``: the entropy pool size, in bits; - - * ``urandom_min_reseed_secs``: obsolete (used to determine the minimum -- number of seconds between urandom pool reseeding). -- --* ``uuid``: a UUID generated every time this is retrieved (this can -- thus be used to generate UUIDs at will); -+ number of seconds between urandom pool reseeding). This file is -+ writable for compatibility purposes, but writing to it has no effect -+ on any RNG behavior; - - * ``write_wakeup_threshold``: when the entropy count drops below this - (as a number of bits), processes waiting to write to ``/dev/random`` -- are woken up. -- --If ``drivers/char/random.c`` is built with ``ADD_INTERRUPT_BENCH`` --defined, these additional entries are present: -- --* ``add_interrupt_avg_cycles``: the average number of cycles between -- interrupts used to feed the pool; -- --* ``add_interrupt_avg_deviation``: the standard deviation seen on the -- number of cycles between interrupts used to feed the pool. -+ are woken up. This file is writable for compatibility purposes, but -+ writing to it has no effect on any RNG behavior. - - - randomize_va_space -@@ -1099,7 +1103,7 @@ task_delayacct - =============== - - Enables/disables task delay accounting (see --:doc:`accounting/delay-accounting.rst`). Enabling this feature incurs -+Documentation/accounting/delay-accounting.rst. Enabling this feature incurs - a small amount of overhead in the scheduler but is useful for debugging - and performance tuning. It is required by some tools such as iotop. - -@@ -1490,6 +1494,16 @@ entry will default to 2 instead of 0. - 2 Unprivileged calls to ``bpf()`` are disabled - = ============================================================= - -+ -+warn_limit -+========== -+ -+Number of kernel warnings after which the kernel should panic when -+``panic_on_warn`` is not set. Setting this to 0 disables checking -+the warning count. Setting this to 1 has the same effect as setting -+``panic_on_warn=1``. The default value is 0. -+ -+ - watchdog - ======== - -diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst -index 4150f74c521a8..806ecd5957a4d 100644 ---- a/Documentation/admin-guide/sysctl/net.rst -+++ b/Documentation/admin-guide/sysctl/net.rst -@@ -34,13 +34,14 @@ Table : Subdirectories in /proc/sys/net - ========= =================== = ========== ================== - Directory Content Directory Content - ========= =================== = ========== ================== -- core General parameter appletalk Appletalk protocol -- unix Unix domain sockets netrom NET/ROM -- 802 E802 protocol ax25 AX25 -- ethernet Ethernet protocol rose X.25 PLP layer -- ipv4 IP version 4 x25 X.25 protocol -- bridge Bridging decnet DEC net -- ipv6 IP version 6 tipc TIPC -+ 802 E802 protocol mptcp Multipath TCP -+ appletalk Appletalk protocol netfilter Network Filter -+ ax25 AX25 netrom NET/ROM -+ bridge Bridging rose X.25 PLP layer -+ core General parameter tipc TIPC -+ ethernet Ethernet protocol unix Unix domain sockets -+ ipv4 IP version 4 x25 X.25 protocol -+ ipv6 IP version 6 - ========= =================== = ========== ================== - - 1. /proc/sys/net/core - Network core options -@@ -271,7 +272,7 @@ poll cycle or the number of packets processed reaches netdev_budget. - netdev_max_backlog - ------------------ - --Maximum number of packets, queued on the INPUT side, when the interface -+Maximum number of packets, queued on the INPUT side, when the interface - receives packets faster than kernel can process them. - - netdev_rss_key -diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst -index 5e795202111f2..f4804ce37c58b 100644 ---- a/Documentation/admin-guide/sysctl/vm.rst -+++ b/Documentation/admin-guide/sysctl/vm.rst -@@ -948,7 +948,7 @@ how much memory needs to be free before kswapd goes back to sleep. - - The unit is in fractions of 10,000. The default value of 10 means the - distances between watermarks are 0.1% of the available memory in the --node/system. The maximum value is 1000, or 10% of memory. -+node/system. The maximum value is 3000, or 30% of memory. - - A high rate of threads entering direct reclaim (allocstall) or kswapd - going to sleep prematurely (kswapd_low_wmark_hit_quickly) can indicate -diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst -index 328e0c454fbd4..749ae970c3195 100644 ---- a/Documentation/arm64/cpu-feature-registers.rst -+++ b/Documentation/arm64/cpu-feature-registers.rst -@@ -235,7 +235,15 @@ infrastructure: - | DPB | [3-0] | y | - +------------------------------+---------+---------+ - -- 6) ID_AA64MMFR2_EL1 - Memory model feature register 2 -+ 6) ID_AA64MMFR0_EL1 - Memory model feature register 0 -+ -+ +------------------------------+---------+---------+ -+ | Name | bits | visible | -+ +------------------------------+---------+---------+ -+ | ECV | [63-60] | y | -+ +------------------------------+---------+---------+ -+ -+ 7) ID_AA64MMFR2_EL1 - Memory model feature register 2 - - +------------------------------+---------+---------+ - | Name | bits | visible | -@@ -243,7 +251,7 @@ infrastructure: - | AT | [35-32] | y | - +------------------------------+---------+---------+ - -- 7) ID_AA64ZFR0_EL1 - SVE feature ID register 0 -+ 8) ID_AA64ZFR0_EL1 - SVE feature ID register 0 - - +------------------------------+---------+---------+ - | Name | bits | visible | -@@ -267,6 +275,23 @@ infrastructure: - | SVEVer | [3-0] | y | - +------------------------------+---------+---------+ - -+ 8) ID_AA64MMFR1_EL1 - Memory model feature register 1 -+ -+ +------------------------------+---------+---------+ -+ | Name | bits | visible | -+ +------------------------------+---------+---------+ -+ | AFP | [47-44] | y | -+ +------------------------------+---------+---------+ -+ -+ 9) ID_AA64ISAR2_EL1 - Instruction set attribute register 2 -+ -+ +------------------------------+---------+---------+ -+ | Name | bits | visible | -+ +------------------------------+---------+---------+ -+ | RPRES | [7-4] | y | -+ +------------------------------+---------+---------+ -+ -+ - Appendix I: Example - ------------------- - -diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst -index ec1a5a63c1d09..b72ff17d600ae 100644 ---- a/Documentation/arm64/elf_hwcaps.rst -+++ b/Documentation/arm64/elf_hwcaps.rst -@@ -247,6 +247,18 @@ HWCAP2_MTE - Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0010, as described - by Documentation/arm64/memory-tagging-extension.rst. - -+HWCAP2_ECV -+ -+ Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001. -+ -+HWCAP2_AFP -+ -+ Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001. -+ -+HWCAP2_RPRES -+ -+ Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001. -+ - 4. Unused AT_HWCAP bits - ----------------------- - -diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst -index d410a47ffa57a..83a75e16e54de 100644 ---- a/Documentation/arm64/silicon-errata.rst -+++ b/Documentation/arm64/silicon-errata.rst -@@ -68,6 +68,8 @@ stable kernels. - +----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A55 | #1530923 | ARM64_ERRATUM_1530923 | - +----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Cortex-A55 | #2441007 | ARM64_ERRATUM_2441007 | -++----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | - +----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A57 | #852523 | N/A | -@@ -76,10 +78,14 @@ stable kernels. - +----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 | - +----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 | -++----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A72 | #853709 | N/A | - +----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 | - +----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 | -++----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | - +----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 | -@@ -92,14 +98,34 @@ stable kernels. - +----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 | - +----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 | -++----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | -++----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | -++----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | -++----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | -++----------------+-----------------+-----------------+-----------------------------+ - | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | - +----------------+-----------------+-----------------+-----------------------------+ - | ARM | Neoverse-N1 | #1349291 | N/A | - +----------------+-----------------+-----------------+-----------------------------+ - | ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | - +----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 | -++----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 | -++----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 | -++----------------+-----------------+-----------------+-----------------------------+ - | ARM | MMU-500 | #841119,826419 | N/A | - +----------------+-----------------+-----------------+-----------------------------+ -+| ARM | MMU-600 | #1076982,1209401| N/A | -++----------------+-----------------+-----------------+-----------------------------+ -+| ARM | MMU-700 | #2268618,2812531| N/A | -++----------------+-----------------+-----------------+-----------------------------+ - +----------------+-----------------+-----------------+-----------------------------+ - | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | - +----------------+-----------------+-----------------+-----------------------------+ -@@ -163,6 +189,9 @@ stable kernels. - +----------------+-----------------+-----------------+-----------------------------+ - | Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 | - +----------------+-----------------+-----------------+-----------------------------+ -+| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 | -++----------------+-----------------+-----------------+-----------------------------+ -+ - +----------------+-----------------+-----------------+-----------------------------+ - | Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 | - +----------------+-----------------+-----------------+-----------------------------+ -diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt -index 093cdaefdb373..d8b101c97031b 100644 ---- a/Documentation/atomic_bitops.txt -+++ b/Documentation/atomic_bitops.txt -@@ -59,7 +59,7 @@ Like with atomic_t, the rule of thumb is: - - RMW operations that have a return value are fully ordered. - - - RMW operations that are conditional are unordered on FAILURE, -- otherwise the above rules apply. In the case of test_and_{}_bit() operations, -+ otherwise the above rules apply. In the case of test_and_set_bit_lock(), - if the bit in memory is unchanged by the operation then it is deemed to have - failed. - -diff --git a/Documentation/conf.py b/Documentation/conf.py -index 948a97d6387dd..76b31798f94ff 100644 ---- a/Documentation/conf.py -+++ b/Documentation/conf.py -@@ -161,7 +161,7 @@ finally: - # - # This is also used if you do content translation via gettext catalogs. - # Usually you set "language" from the command line for these cases. --language = None -+language = 'en' - - # There are two options for replacing |today|: either, you set today to some - # non-false value, then it is used: -diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst -index 8e0f1fe8d17ad..895285c037c72 100644 ---- a/Documentation/dev-tools/gdb-kernel-debugging.rst -+++ b/Documentation/dev-tools/gdb-kernel-debugging.rst -@@ -39,6 +39,10 @@ Setup - this mode. In this case, you should build the kernel with - CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR. - -+- Build the gdb scripts (required on kernels v5.1 and above):: -+ -+ make scripts_gdb -+ - - Enable the gdb stub of QEMU/KVM, either - - - at VM startup time by appending "-s" to the QEMU command line -diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst -index 0fbe3308bf37f..48244d32780f6 100644 ---- a/Documentation/dev-tools/kfence.rst -+++ b/Documentation/dev-tools/kfence.rst -@@ -231,10 +231,14 @@ Guarded allocations are set up based on the sample interval. After expiration - of the sample interval, the next allocation through the main allocator (SLAB or - SLUB) returns a guarded allocation from the KFENCE object pool (allocation - sizes up to PAGE_SIZE are supported). At this point, the timer is reset, and --the next allocation is set up after the expiration of the interval. To "gate" a --KFENCE allocation through the main allocator's fast-path without overhead, --KFENCE relies on static branches via the static keys infrastructure. The static --branch is toggled to redirect the allocation to KFENCE. -+the next allocation is set up after the expiration of the interval. -+ -+When using ``CONFIG_KFENCE_STATIC_KEYS=y``, KFENCE allocations are "gated" -+through the main allocator's fast-path by relying on static branches via the -+static keys infrastructure. The static branch is toggled to redirect the -+allocation to KFENCE. Depending on sample interval, target workloads, and -+system architecture, this may perform better than the simple dynamic branch. -+Careful benchmarking is recommended. - - KFENCE objects each reside on a dedicated page, at either the left or right - page boundaries selected at random. The pages to the left and right of the -diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt -index e77635c5422c6..fa8b31660cadd 100644 ---- a/Documentation/devicetree/bindings/arm/omap/omap.txt -+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt -@@ -119,6 +119,9 @@ Boards (incomplete list of examples): - - OMAP3 BeagleBoard : Low cost community board - compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3" - -+- OMAP3 BeagleBoard A to B4 : Early BeagleBoard revisions A to B4 with a timer quirk -+ compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3" -+ - - OMAP3 Tobi with Overo : Commercial expansion board with daughter board - compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3" - -diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml -index 880ddafc634e6..a702a18d845e9 100644 ---- a/Documentation/devicetree/bindings/arm/qcom.yaml -+++ b/Documentation/devicetree/bindings/arm/qcom.yaml -@@ -135,28 +135,34 @@ properties: - - const: qcom,msm8974 - - - items: -- - enum: -- - alcatel,idol347 -- - const: qcom,msm8916-mtp/1 - - const: qcom,msm8916-mtp -+ - const: qcom,msm8916-mtp/1 - - const: qcom,msm8916 - - - items: - - enum: -- - longcheer,l8150 -+ - alcatel,idol347 - - samsung,a3u-eur - - samsung,a5u-eur - - const: qcom,msm8916 - -+ - items: -+ - const: longcheer,l8150 -+ - const: qcom,msm8916-v1-qrd/9-v1 -+ - const: qcom,msm8916 -+ - - items: - - enum: - - sony,karin_windy -+ - const: qcom,apq8094 -+ -+ - items: -+ - enum: - - sony,karin-row - - sony,satsuki-row - - sony,sumire-row - - sony,suzuran-row -- - qcom,msm8994 -- - const: qcom,apq8094 -+ - const: qcom,msm8994 - - - items: - - const: qcom,msm8996-mtp -diff --git a/Documentation/devicetree/bindings/ata/ahci-ceva.txt b/Documentation/devicetree/bindings/ata/ahci-ceva.txt -deleted file mode 100644 -index bfb6da0281ecd..0000000000000 ---- a/Documentation/devicetree/bindings/ata/ahci-ceva.txt -+++ /dev/null -@@ -1,63 +0,0 @@ --Binding for CEVA AHCI SATA Controller -- --Required properties: -- - reg: Physical base address and size of the controller's register area. -- - compatible: Compatibility string. Must be 'ceva,ahci-1v84'. -- - clocks: Input clock specifier. Refer to common clock bindings. -- - interrupts: Interrupt specifier. Refer to interrupt binding. -- - ceva,p0-cominit-params: OOB timing value for COMINIT parameter for port 0. -- - ceva,p1-cominit-params: OOB timing value for COMINIT parameter for port 1. -- The fields for the above parameter must be as shown below: -- ceva,pN-cominit-params = /bits/ 8 ; -- CINMP : COMINIT Negate Minimum Period. -- CIBGN : COMINIT Burst Gap Nominal. -- CIBGMX: COMINIT Burst Gap Maximum. -- CIBGMN: COMINIT Burst Gap Minimum. -- - ceva,p0-comwake-params: OOB timing value for COMWAKE parameter for port 0. -- - ceva,p1-comwake-params: OOB timing value for COMWAKE parameter for port 1. -- The fields for the above parameter must be as shown below: -- ceva,pN-comwake-params = /bits/ 8 ; -- CWBGMN: COMWAKE Burst Gap Minimum. -- CWBGMX: COMWAKE Burst Gap Maximum. -- CWBGN: COMWAKE Burst Gap Nominal. -- CWNMP: COMWAKE Negate Minimum Period. -- - ceva,p0-burst-params: Burst timing value for COM parameter for port 0. -- - ceva,p1-burst-params: Burst timing value for COM parameter for port 1. -- The fields for the above parameter must be as shown below: -- ceva,pN-burst-params = /bits/ 8 ; -- BMX: COM Burst Maximum. -- BNM: COM Burst Nominal. -- SFD: Signal Failure Detection value. -- PTST: Partial to Slumber timer value. -- - ceva,p0-retry-params: Retry interval timing value for port 0. -- - ceva,p1-retry-params: Retry interval timing value for port 1. -- The fields for the above parameter must be as shown below: -- ceva,pN-retry-params = /bits/ 16 ; -- RIT: Retry Interval Timer. -- RCT: Rate Change Timer. -- --Optional properties: -- - ceva,broken-gen2: limit to gen1 speed instead of gen2. -- - phys: phandle for the PHY device -- - resets: phandle to the reset controller for the SATA IP -- --Examples: -- ahci@fd0c0000 { -- compatible = "ceva,ahci-1v84"; -- reg = <0xfd0c0000 0x200>; -- interrupt-parent = <&gic>; -- interrupts = <0 133 4>; -- clocks = <&clkc SATA_CLK_ID>; -- ceva,p0-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>; -- ceva,p0-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>; -- ceva,p0-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>; -- ceva,p0-retry-params = /bits/ 16 <0x0216 0x7F06>; -- -- ceva,p1-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>; -- ceva,p1-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>; -- ceva,p1-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>; -- ceva,p1-retry-params = /bits/ 16 <0x0216 0x7F06>; -- ceva,broken-gen2; -- phys = <&psgtr 1 PHY_TYPE_SATA 1 1>; -- resets = <&zynqmp_reset ZYNQMP_RESET_SATA>; -- }; -diff --git a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml -new file mode 100644 -index 0000000000000..71364c6081ff5 ---- /dev/null -+++ b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml -@@ -0,0 +1,189 @@ -+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -+%YAML 1.2 -+--- -+$id: http://devicetree.org/schemas/ata/ceva,ahci-1v84.yaml# -+$schema: http://devicetree.org/meta-schemas/core.yaml# -+ -+title: Ceva AHCI SATA Controller -+ -+maintainers: -+ - Piyush Mehta -+ -+description: | -+ The Ceva SATA controller mostly conforms to the AHCI interface with some -+ special extensions to add functionality, is a high-performance dual-port -+ SATA host controller with an AHCI compliant command layer which supports -+ advanced features such as native command queuing and frame information -+ structure (FIS) based switching for systems employing port multipliers. -+ -+properties: -+ compatible: -+ const: ceva,ahci-1v84 -+ -+ reg: -+ maxItems: 1 -+ -+ clocks: -+ maxItems: 1 -+ -+ dma-coherent: true -+ -+ interrupts: -+ maxItems: 1 -+ -+ iommus: -+ maxItems: 4 -+ -+ power-domains: -+ maxItems: 1 -+ -+ ceva,p0-cominit-params: -+ $ref: /schemas/types.yaml#/definitions/uint8-array -+ description: | -+ OOB timing value for COMINIT parameter for port 0. -+ The fields for the above parameter must be as shown below:- -+ ceva,p0-cominit-params = /bits/ 8 ; -+ items: -+ - description: CINMP - COMINIT Negate Minimum Period. -+ - description: CIBGN - COMINIT Burst Gap Nominal. -+ - description: CIBGMX - COMINIT Burst Gap Maximum. -+ - description: CIBGMN - COMINIT Burst Gap Minimum. -+ -+ ceva,p0-comwake-params: -+ $ref: /schemas/types.yaml#/definitions/uint8-array -+ description: | -+ OOB timing value for COMWAKE parameter for port 0. -+ The fields for the above parameter must be as shown below:- -+ ceva,p0-comwake-params = /bits/ 8 ; -+ items: -+ - description: CWBGMN - COMWAKE Burst Gap Minimum. -+ - description: CWBGMX - COMWAKE Burst Gap Maximum. -+ - description: CWBGN - COMWAKE Burst Gap Nominal. -+ - description: CWNMP - COMWAKE Negate Minimum Period. -+ -+ ceva,p0-burst-params: -+ $ref: /schemas/types.yaml#/definitions/uint8-array -+ description: | -+ Burst timing value for COM parameter for port 0. -+ The fields for the above parameter must be as shown below:- -+ ceva,p0-burst-params = /bits/ 8 ; -+ items: -+ - description: BMX - COM Burst Maximum. -+ - description: BNM - COM Burst Nominal. -+ - description: SFD - Signal Failure Detection value. -+ - description: PTST - Partial to Slumber timer value. -+ -+ ceva,p0-retry-params: -+ $ref: /schemas/types.yaml#/definitions/uint16-array -+ description: | -+ Retry interval timing value for port 0. -+ The fields for the above parameter must be as shown below:- -+ ceva,p0-retry-params = /bits/ 16 ; -+ items: -+ - description: RIT - Retry Interval Timer. -+ - description: RCT - Rate Change Timer. -+ -+ ceva,p1-cominit-params: -+ $ref: /schemas/types.yaml#/definitions/uint8-array -+ description: | -+ OOB timing value for COMINIT parameter for port 1. -+ The fields for the above parameter must be as shown below:- -+ ceva,p1-cominit-params = /bits/ 8 ; -+ items: -+ - description: CINMP - COMINIT Negate Minimum Period. -+ - description: CIBGN - COMINIT Burst Gap Nominal. -+ - description: CIBGMX - COMINIT Burst Gap Maximum. -+ - description: CIBGMN - COMINIT Burst Gap Minimum. -+ -+ ceva,p1-comwake-params: -+ $ref: /schemas/types.yaml#/definitions/uint8-array -+ description: | -+ OOB timing value for COMWAKE parameter for port 1. -+ The fields for the above parameter must be as shown below:- -+ ceva,p1-comwake-params = /bits/ 8 ; -+ items: -+ - description: CWBGMN - COMWAKE Burst Gap Minimum. -+ - description: CWBGMX - COMWAKE Burst Gap Maximum. -+ - description: CWBGN - COMWAKE Burst Gap Nominal. -+ - description: CWNMP - COMWAKE Negate Minimum Period. -+ -+ ceva,p1-burst-params: -+ $ref: /schemas/types.yaml#/definitions/uint8-array -+ description: | -+ Burst timing value for COM parameter for port 1. -+ The fields for the above parameter must be as shown below:- -+ ceva,p1-burst-params = /bits/ 8 ; -+ items: -+ - description: BMX - COM Burst Maximum. -+ - description: BNM - COM Burst Nominal. -+ - description: SFD - Signal Failure Detection value. -+ - description: PTST - Partial to Slumber timer value. -+ -+ ceva,p1-retry-params: -+ $ref: /schemas/types.yaml#/definitions/uint16-array -+ description: | -+ Retry interval timing value for port 1. -+ The fields for the above parameter must be as shown below:- -+ ceva,pN-retry-params = /bits/ 16 ; -+ items: -+ - description: RIT - Retry Interval Timer. -+ - description: RCT - Rate Change Timer. -+ -+ ceva,broken-gen2: -+ $ref: /schemas/types.yaml#/definitions/flag -+ description: | -+ limit to gen1 speed instead of gen2. -+ -+ phys: -+ maxItems: 1 -+ -+ phy-names: -+ items: -+ - const: sata-phy -+ -+ resets: -+ maxItems: 1 -+ -+required: -+ - compatible -+ - reg -+ - clocks -+ - interrupts -+ - ceva,p0-cominit-params -+ - ceva,p0-comwake-params -+ - ceva,p0-burst-params -+ - ceva,p0-retry-params -+ - ceva,p1-cominit-params -+ - ceva,p1-comwake-params -+ - ceva,p1-burst-params -+ - ceva,p1-retry-params -+ -+additionalProperties: false -+ -+examples: -+ - | -+ #include -+ #include -+ #include -+ #include -+ #include -+ #include -+ -+ sata: ahci@fd0c0000 { -+ compatible = "ceva,ahci-1v84"; -+ reg = <0xfd0c0000 0x200>; -+ interrupt-parent = <&gic>; -+ interrupts = <0 133 IRQ_TYPE_LEVEL_HIGH>; -+ clocks = <&zynqmp_clk SATA_REF>; -+ ceva,p0-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>; -+ ceva,p0-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>; -+ ceva,p0-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>; -+ ceva,p0-retry-params = /bits/ 16 <0x0216 0x7F06>; -+ ceva,p1-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>; -+ ceva,p1-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>; -+ ceva,p1-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>; -+ ceva,p1-retry-params = /bits/ 16 <0x0216 0x7F06>; -+ ceva,broken-gen2; -+ phys = <&psgtr 1 PHY_TYPE_SATA 1 1>; -+ resets = <&zynqmp_reset ZYNQMP_RESET_SATA>; -+ }; -diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml -index 5a5b2214f0cae..005e0edd4609a 100644 ---- a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml -+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8996.yaml -@@ -22,16 +22,32 @@ properties: - const: qcom,gcc-msm8996 - - clocks: -+ minItems: 3 - items: - - description: XO source - - description: Second XO source - - description: Sleep clock source -+ - description: PCIe 0 PIPE clock (optional) -+ - description: PCIe 1 PIPE clock (optional) -+ - description: PCIe 2 PIPE clock (optional) -+ - description: USB3 PIPE clock (optional) -+ - description: UFS RX symbol 0 clock (optional) -+ - description: UFS RX symbol 1 clock (optional) -+ - description: UFS TX symbol 0 clock (optional) - - clock-names: -+ minItems: 3 - items: - - const: cxo - - const: cxo2 - - const: sleep_clk -+ - const: pcie_0_pipe_clk_src -+ - const: pcie_1_pipe_clk_src -+ - const: pcie_2_pipe_clk_src -+ - const: usb3_phy_pipe_clk_src -+ - const: ufs_rx_symbol_0_clk_src -+ - const: ufs_rx_symbol_1_clk_src -+ - const: ufs_tx_symbol_0_clk_src - - '#clock-cells': - const: 1 -diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml -index 229af98b1d305..7cd88bc3a67d7 100644 ---- a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml -+++ b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml -@@ -16,8 +16,6 @@ description: | - reads required input clock frequencies from the devicetree and acts as clock - provider for all clock consumers of PS clocks. - --select: false -- - properties: - compatible: - const: xlnx,versal-clk -diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml -index cf5a208f2f105..343598c9f473b 100644 ---- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml -+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml -@@ -10,6 +10,9 @@ title: Amlogic specific extensions to the Synopsys Designware HDMI Controller - maintainers: - - Neil Armstrong - -+allOf: -+ - $ref: /schemas/sound/name-prefix.yaml# -+ - description: | - The Amlogic Meson Synopsys Designware Integration is composed of - - A Synopsys DesignWare HDMI Controller IP -@@ -99,6 +102,8 @@ properties: - "#sound-dai-cells": - const: 0 - -+ sound-name-prefix: true -+ - required: - - compatible - - reg -diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml -index 851cb07812173..047fd69e03770 100644 ---- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml -+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml -@@ -78,6 +78,10 @@ properties: - interrupts: - maxItems: 1 - -+ amlogic,canvas: -+ description: should point to a canvas provider node -+ $ref: /schemas/types.yaml#/definitions/phandle -+ - power-domains: - maxItems: 1 - description: phandle to the associated power domain -@@ -106,6 +110,7 @@ required: - - port@1 - - "#address-cells" - - "#size-cells" -+ - amlogic,canvas - - additionalProperties: false - -@@ -118,6 +123,7 @@ examples: - interrupts = <3>; - #address-cells = <1>; - #size-cells = <0>; -+ amlogic,canvas = <&canvas>; - - /* CVBS VDAC output port */ - port@0 { -diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml -index 35426fde86106..4b2cd556483c0 100644 ---- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml -+++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml -@@ -31,7 +31,7 @@ properties: - - description: Display byte clock - - description: Display byte interface clock - - description: Display pixel clock -- - description: Display escape clock -+ - description: Display core clock - - description: Display AHB clock - - description: Display AXI clock - -@@ -64,6 +64,18 @@ properties: - Indicates if the DSI controller is driving a panel which needs - 2 DSI links. - -+ qcom,master-dsi: -+ type: boolean -+ description: | -+ Indicates if the DSI controller is the master DSI controller when -+ qcom,dual-dsi-mode enabled. -+ -+ qcom,sync-dual-dsi: -+ type: boolean -+ description: | -+ Indicates if the DSI controller needs to sync the other DSI controller -+ with MIPI DCS commands when qcom,dual-dsi-mode enabled. -+ - assigned-clocks: - minItems: 2 - maxItems: 2 -@@ -135,8 +147,6 @@ required: - - phy-names - - assigned-clocks - - assigned-clock-parents -- - power-domains -- - operating-points-v2 - - ports - - additionalProperties: false -diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml -index 4399715953e1a..4dd5eed50506a 100644 ---- a/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml -+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml -@@ -39,7 +39,6 @@ required: - - compatible - - reg - - reg-names -- - vdds-supply - - unevaluatedProperties: false - -diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml -index 064df50e21a5c..23355ac67d3d1 100644 ---- a/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml -+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml -@@ -37,7 +37,6 @@ required: - - compatible - - reg - - reg-names -- - vcca-supply - - unevaluatedProperties: false - -diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml -index 69eecaa64b187..ddb0ac4c29d44 100644 ---- a/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml -+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml -@@ -34,6 +34,10 @@ properties: - vddio-supply: - description: Phandle to vdd-io regulator device node. - -+ qcom,dsi-phy-regulator-ldo-mode: -+ type: boolean -+ description: Indicates if the LDO mode PHY regulator is wanted. -+ - required: - - compatible - - reg -diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml -index 0cebaaefda032..419c3b2ac5a6f 100644 ---- a/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml -+++ b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml -@@ -72,6 +72,7 @@ examples: - dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>; - rotation = <270>; -+ backlight = <&backlight>; - }; - }; - -diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml -index b6e1ebfaf3666..bb3cbc30d9121 100644 ---- a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml -+++ b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml -@@ -64,7 +64,7 @@ if: - then: - properties: - clocks: -- maxItems: 2 -+ minItems: 2 - - required: - - clock-names -diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt -index 8a9f3559335b5..7e14e26676ec9 100644 ---- a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt -+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt -@@ -34,8 +34,8 @@ Example: - Use specific request line passing from dma - For example, MMC request line is 5 - -- sdhci: sdhci@98e00000 { -- compatible = "moxa,moxart-sdhci"; -+ mmc: mmc@98e00000 { -+ compatible = "moxa,moxart-mmc"; - reg = <0x98e00000 0x5C>; - interrupts = <5 0>; - clocks = <&clk_apb>; -diff --git a/Documentation/devicetree/bindings/gpio/gpio-altera.txt b/Documentation/devicetree/bindings/gpio/gpio-altera.txt -index 146e554b3c676..2a80e272cd666 100644 ---- a/Documentation/devicetree/bindings/gpio/gpio-altera.txt -+++ b/Documentation/devicetree/bindings/gpio/gpio-altera.txt -@@ -9,8 +9,9 @@ Required properties: - - The second cell is reserved and is currently unused. - - gpio-controller : Marks the device node as a GPIO controller. - - interrupt-controller: Mark the device node as an interrupt controller --- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware. -+- #interrupt-cells : Should be 2. The interrupt type is fixed in the hardware. - - The first cell is the GPIO offset number within the GPIO controller. -+ - The second cell is the interrupt trigger type and level flags. - - interrupts: Specify the interrupt. - - altr,interrupt-type: Specifies the interrupt trigger type the GPIO - hardware is synthesized. This field is required if the Altera GPIO controller -@@ -38,6 +39,6 @@ gpio_altr: gpio@ff200000 { - altr,interrupt-type = ; - #gpio-cells = <2>; - gpio-controller; -- #interrupt-cells = <1>; -+ #interrupt-cells = <2>; - interrupt-controller; - }; -diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml b/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml -index 378da2649e668..980f92ad9eba2 100644 ---- a/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml -+++ b/Documentation/devicetree/bindings/gpio/gpio-zynq.yaml -@@ -11,7 +11,11 @@ maintainers: - - properties: - compatible: -- const: xlnx,zynq-gpio-1.0 -+ enum: -+ - xlnx,zynq-gpio-1.0 -+ - xlnx,zynqmp-gpio-1.0 -+ - xlnx,versal-gpio-1.0 -+ - xlnx,pmc-gpio-1.0 - - reg: - maxItems: 1 -diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml -new file mode 100644 -index 0000000000000..ba54d6998f2ee ---- /dev/null -+++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml -@@ -0,0 +1,88 @@ -+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause -+%YAML 1.2 -+--- -+$id: http://devicetree.org/schemas/iio/accel/adi,adxl355.yaml# -+$schema: http://devicetree.org/meta-schemas/core.yaml# -+ -+title: Analog Devices ADXL355 3-Axis, Low noise MEMS Accelerometer -+ -+maintainers: -+ - Puranjay Mohan -+ -+description: | -+ Analog Devices ADXL355 3-Axis, Low noise MEMS Accelerometer that supports -+ both I2C & SPI interfaces -+ https://www.analog.com/en/products/adxl355.html -+ -+properties: -+ compatible: -+ enum: -+ - adi,adxl355 -+ -+ reg: -+ maxItems: 1 -+ -+ interrupts: -+ minItems: 1 -+ maxItems: 3 -+ description: | -+ Type for DRDY should be IRQ_TYPE_EDGE_RISING. -+ Three configurable interrupt lines exist. -+ -+ interrupt-names: -+ description: Specify which interrupt line is in use. -+ items: -+ enum: -+ - INT1 -+ - INT2 -+ - DRDY -+ minItems: 1 -+ maxItems: 3 -+ -+ vdd-supply: -+ description: Regulator that provides power to the sensor -+ -+ vddio-supply: -+ description: Regulator that provides power to the bus -+ -+ spi-max-frequency: true -+ -+required: -+ - compatible -+ - reg -+ -+additionalProperties: false -+ -+examples: -+ - | -+ #include -+ #include -+ i2c { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ /* Example for a I2C device node */ -+ accelerometer@1d { -+ compatible = "adi,adxl355"; -+ reg = <0x1d>; -+ interrupt-parent = <&gpio>; -+ interrupts = <25 IRQ_TYPE_EDGE_RISING>; -+ interrupt-names = "DRDY"; -+ }; -+ }; -+ - | -+ #include -+ #include -+ spi { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ accelerometer@0 { -+ compatible = "adi,adxl355"; -+ reg = <0>; -+ spi-max-frequency = <1000000>; -+ interrupt-parent = <&gpio>; -+ interrupts = <25 IRQ_TYPE_EDGE_RISING>; -+ interrupt-names = "DRDY"; -+ }; -+ }; -diff --git a/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml b/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml -index c115e2e99bd9a..4a7b1385fdc7e 100644 ---- a/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml -+++ b/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml -@@ -86,7 +86,7 @@ patternProperties: - of the MAX chips to the GyroADC, while MISO line of each Maxim - ADC connects to a shared input pin of the GyroADC. - enum: -- - adi,7476 -+ - adi,ad7476 - - fujitsu,mb88101a - - maxim,max1162 - - maxim,max11100 -diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml -index d5c54813ce872..a8f7720d1e3e2 100644 ---- a/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml -+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5766.yaml -@@ -54,7 +54,7 @@ examples: - - ad5766@0 { - compatible = "adi,ad5766"; -- output-range-microvolts = <(-5000) 5000>; -+ output-range-microvolts = <(-5000000) 5000000>; - reg = <0>; - spi-cpol; - spi-max-frequency = <1000000>; -diff --git a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml -index b6bbc312a7cf7..1414ba9977c16 100644 ---- a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml -+++ b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml -@@ -24,8 +24,10 @@ properties: - - interrupts: - minItems: 1 -+ maxItems: 2 - description: - Should be configured with type IRQ_TYPE_EDGE_RISING. -+ If two interrupts are provided, expected order is INT1 and INT2. - - required: - - compatible -diff --git a/Documentation/devicetree/bindings/input/hid-over-i2c.txt b/Documentation/devicetree/bindings/input/hid-over-i2c.txt -index c76bafaf98d2f..34c43d3bddfd1 100644 ---- a/Documentation/devicetree/bindings/input/hid-over-i2c.txt -+++ b/Documentation/devicetree/bindings/input/hid-over-i2c.txt -@@ -32,6 +32,8 @@ device-specific compatible properties, which should be used in addition to the - - vdd-supply: phandle of the regulator that provides the supply voltage. - - post-power-on-delay-ms: time required by the device after enabling its regulators - or powering it on, before it is ready for communication. -+- touchscreen-inverted-x: See touchscreen.txt -+- touchscreen-inverted-y: See touchscreen.txt - - Example: - -diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml -index 877183cf42787..1ef849dc74d7e 100644 ---- a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml -+++ b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml -@@ -79,6 +79,8 @@ properties: - - properties: - data-lanes: -+ description: -+ Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines. - items: - minItems: 1 - maxItems: 4 -@@ -91,18 +93,6 @@ properties: - required: - - data-lanes - -- allOf: -- - if: -- properties: -- compatible: -- contains: -- const: fsl,imx7-mipi-csi2 -- then: -- properties: -- data-lanes: -- items: -- maxItems: 2 -- - port@1: - $ref: /schemas/graph.yaml#/properties/port - description: -diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml -index 2353f6cf3c805..750720e0123a0 100644 ---- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml -+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml -@@ -106,7 +106,6 @@ allOf: - - mediatek,mt2701-smi-larb - - mediatek,mt2712-smi-larb - - mediatek,mt6779-smi-larb -- - mediatek,mt8167-smi-larb - - mediatek,mt8192-smi-larb - - then: -diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml -index bd217e6f5018a..5cd144a9ec992 100644 ---- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml -+++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml -@@ -55,7 +55,7 @@ patternProperties: - properties: - reg: - description: -- Contains the native Ready/Busy IDs. -+ Contains the chip-select IDs. - - nand-ecc-engine: - allOf: -@@ -184,7 +184,7 @@ examples: - nand-use-soft-ecc-engine; - nand-ecc-algo = "bch"; - -- /* controller specific properties */ -+ /* NAND chip specific properties */ - }; - - nand@1 { -diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml b/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml -index fbdc2083bec4f..20ee96584aba2 100644 ---- a/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml -+++ b/Documentation/devicetree/bindings/net/broadcom-bluetooth.yaml -@@ -23,6 +23,7 @@ properties: - - brcm,bcm4345c5 - - brcm,bcm43540-bt - - brcm,bcm4335a0 -+ - brcm,bcm4349-bt - - shutdown-gpios: - maxItems: 1 -diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt -index 0968b40aef1e8..e3501bfa22e90 100644 ---- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt -+++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt -@@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 { - #address-cells = <1>; - #size-cells = <1>; - spi-max-frequency = <10000000>; -- bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>; -+ bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>; - interrupt-parent = <&gpio1>; - interrupts = <14 IRQ_TYPE_LEVEL_LOW>; - device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; -diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml -index 2766fe45bb98b..ee42328a109dc 100644 ---- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml -+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml -@@ -91,6 +91,14 @@ properties: - compensate for the board being designed with the lanes - swapped. - -+ enet-phy-lane-no-swap: -+ $ref: /schemas/types.yaml#/definitions/flag -+ description: -+ If set, indicates that PHY will disable swap of the -+ TX/RX lanes. This property allows the PHY to work correcly after -+ e.g. wrong bootstrap configuration caused by issues in PCB -+ layout design. -+ - eee-broken-100tx: - $ref: /schemas/types.yaml#/definitions/flag - description: -diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml -index b8a0b392b24ea..c52ec1ee7df6e 100644 ---- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml -+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml -@@ -106,6 +106,10 @@ properties: - - const: imem - - const: config - -+ qcom,qmp: -+ $ref: /schemas/types.yaml#/definitions/phandle -+ description: phandle to the AOSS side-channel message RAM -+ - qcom,smem-states: - $ref: /schemas/types.yaml#/definitions/phandle-array - description: State bits used in by the AP to signal the modem. -@@ -221,6 +225,8 @@ examples: - "imem", - "config"; - -+ qcom,qmp = <&aoss_qmp>; -+ - qcom,smem-states = <&ipa_smp2p_out 0>, - <&ipa_smp2p_out 1>; - qcom,smem-state-names = "ipa-clock-enabled-valid", -diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml -index c115c95ee584e..5b8db76b6cdd7 100644 ---- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml -+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml -@@ -53,20 +53,18 @@ properties: - - allwinner,sun8i-r40-emac - - allwinner,sun8i-v3s-emac - - allwinner,sun50i-a64-emac -- - loongson,ls2k-dwmac -- - loongson,ls7a-dwmac - - amlogic,meson6-dwmac - - amlogic,meson8b-dwmac - - amlogic,meson8m2-dwmac - - amlogic,meson-gxbb-dwmac - - amlogic,meson-axg-dwmac -- - loongson,ls2k-dwmac -- - loongson,ls7a-dwmac - - ingenic,jz4775-mac - - ingenic,x1000-mac - - ingenic,x1600-mac - - ingenic,x1830-mac - - ingenic,x2000-mac -+ - loongson,ls2k-dwmac -+ - loongson,ls7a-dwmac - - rockchip,px30-gmac - - rockchip,rk3128-gmac - - rockchip,rk3228-gmac -diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml -index acea1cd444fd5..9b0548264a397 100644 ---- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml -+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml -@@ -14,9 +14,6 @@ description: |+ - This PCIe host controller is based on the Synopsys DesignWare PCIe IP - and thus inherits all the common properties defined in snps,dw-pcie.yaml. - --allOf: -- - $ref: /schemas/pci/snps,dw-pcie.yaml# -- - properties: - compatible: - enum: -@@ -59,7 +56,7 @@ properties: - - const: pcie - - const: pcie_bus - - const: pcie_phy -- - const: pcie_inbound_axi for imx6sx-pcie, pcie_aux for imx8mq-pcie -+ - enum: [ pcie_inbound_axi, pcie_aux ] - - num-lanes: - const: 1 -@@ -166,6 +163,47 @@ required: - - clocks - - clock-names - -+allOf: -+ - $ref: /schemas/pci/snps,dw-pcie.yaml# -+ - if: -+ properties: -+ compatible: -+ contains: -+ const: fsl,imx6sx-pcie -+ then: -+ properties: -+ clock-names: -+ items: -+ - {} -+ - {} -+ - {} -+ - const: pcie_inbound_axi -+ - if: -+ properties: -+ compatible: -+ contains: -+ const: fsl,imx8mq-pcie -+ then: -+ properties: -+ clock-names: -+ items: -+ - {} -+ - {} -+ - {} -+ - const: pcie_aux -+ - if: -+ properties: -+ compatible: -+ not: -+ contains: -+ enum: -+ - fsl,imx6sx-pcie -+ - fsl,imx8mq-pcie -+ then: -+ properties: -+ clock-names: -+ maxItems: 3 -+ - unevaluatedProperties: false - - examples: -diff --git a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml -index 30b6396d83c83..aea0e2bcdd778 100644 ---- a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml -+++ b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml -@@ -36,7 +36,7 @@ properties: - - const: mpu - - interrupts: -- maxItems: 1 -+ maxItems: 2 - - clocks: - items: -@@ -94,8 +94,9 @@ examples: - #interrupt-cells = <1>; - ranges = <0x81000000 0 0x40000000 0 0x40000000 0 0x00010000>, - <0x82000000 0 0x50000000 0 0x50000000 0 0x20000000>; -- interrupts = ; -- interrupt-names = "intr"; -+ interrupts = , -+ ; -+ interrupt-names = "msi", "intr"; - interrupt-map-mask = <0 0 0 7>; - interrupt-map = - <0 0 0 1 &gic GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH -diff --git a/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml -new file mode 100644 -index 0000000000000..ff86c87309a41 ---- /dev/null -+++ b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml -@@ -0,0 +1,78 @@ -+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -+# Copyright 2019 BayLibre, SAS -+%YAML 1.2 -+--- -+$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb2-phy.yaml#" -+$schema: "http://devicetree.org/meta-schemas/core.yaml#" -+ -+title: Amlogic G12A USB2 PHY -+ -+maintainers: -+ - Neil Armstrong -+ -+properties: -+ compatible: -+ enum: -+ - amlogic,g12a-usb2-phy -+ - amlogic,a1-usb2-phy -+ -+ reg: -+ maxItems: 1 -+ -+ clocks: -+ maxItems: 1 -+ -+ clock-names: -+ items: -+ - const: xtal -+ -+ resets: -+ maxItems: 1 -+ -+ reset-names: -+ items: -+ - const: phy -+ -+ "#phy-cells": -+ const: 0 -+ -+ phy-supply: -+ description: -+ Phandle to a regulator that provides power to the PHY. This -+ regulator will be managed during the PHY power on/off sequence. -+ -+required: -+ - compatible -+ - reg -+ - clocks -+ - clock-names -+ - resets -+ - reset-names -+ - "#phy-cells" -+ -+if: -+ properties: -+ compatible: -+ enum: -+ - amlogic,meson-a1-usb-ctrl -+ -+then: -+ properties: -+ power-domains: -+ maxItems: 1 -+ required: -+ - power-domains -+ -+additionalProperties: false -+ -+examples: -+ - | -+ phy@36000 { -+ compatible = "amlogic,g12a-usb2-phy"; -+ reg = <0x36000 0x2000>; -+ clocks = <&xtal>; -+ clock-names = "xtal"; -+ resets = <&phy_reset>; -+ reset-names = "phy"; -+ #phy-cells = <0>; -+ }; -diff --git a/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml -new file mode 100644 -index 0000000000000..84738644e3989 ---- /dev/null -+++ b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml -@@ -0,0 +1,59 @@ -+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -+# Copyright 2019 BayLibre, SAS -+%YAML 1.2 -+--- -+$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb3-pcie-phy.yaml#" -+$schema: "http://devicetree.org/meta-schemas/core.yaml#" -+ -+title: Amlogic G12A USB3 + PCIE Combo PHY -+ -+maintainers: -+ - Neil Armstrong -+ -+properties: -+ compatible: -+ enum: -+ - amlogic,g12a-usb3-pcie-phy -+ -+ reg: -+ maxItems: 1 -+ -+ clocks: -+ maxItems: 1 -+ -+ clock-names: -+ items: -+ - const: ref_clk -+ -+ resets: -+ maxItems: 1 -+ -+ reset-names: -+ items: -+ - const: phy -+ -+ "#phy-cells": -+ const: 1 -+ -+required: -+ - compatible -+ - reg -+ - clocks -+ - clock-names -+ - resets -+ - reset-names -+ - "#phy-cells" -+ -+additionalProperties: false -+ -+examples: -+ - | -+ phy@46000 { -+ compatible = "amlogic,g12a-usb3-pcie-phy"; -+ reg = <0x46000 0x2000>; -+ clocks = <&ref_clk>; -+ clock-names = "ref_clk"; -+ resets = <&phy_reset>; -+ reset-names = "phy"; -+ #phy-cells = <1>; -+ }; -diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml -deleted file mode 100644 -index 399ebde454095..0000000000000 ---- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml -+++ /dev/null -@@ -1,78 +0,0 @@ --# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) --# Copyright 2019 BayLibre, SAS --%YAML 1.2 ----- --$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb2-phy.yaml#" --$schema: "http://devicetree.org/meta-schemas/core.yaml#" -- --title: Amlogic G12A USB2 PHY -- --maintainers: -- - Neil Armstrong -- --properties: -- compatible: -- enum: -- - amlogic,meson-g12a-usb2-phy -- - amlogic,meson-a1-usb2-phy -- -- reg: -- maxItems: 1 -- -- clocks: -- maxItems: 1 -- -- clock-names: -- items: -- - const: xtal -- -- resets: -- maxItems: 1 -- -- reset-names: -- items: -- - const: phy -- -- "#phy-cells": -- const: 0 -- -- phy-supply: -- description: -- Phandle to a regulator that provides power to the PHY. This -- regulator will be managed during the PHY power on/off sequence. -- --required: -- - compatible -- - reg -- - clocks -- - clock-names -- - resets -- - reset-names -- - "#phy-cells" -- --if: -- properties: -- compatible: -- enum: -- - amlogic,meson-a1-usb-ctrl -- --then: -- properties: -- power-domains: -- maxItems: 1 -- required: -- - power-domains -- --additionalProperties: false -- --examples: -- - | -- phy@36000 { -- compatible = "amlogic,meson-g12a-usb2-phy"; -- reg = <0x36000 0x2000>; -- clocks = <&xtal>; -- clock-names = "xtal"; -- resets = <&phy_reset>; -- reset-names = "phy"; -- #phy-cells = <0>; -- }; -diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml -deleted file mode 100644 -index 453c083cf44cb..0000000000000 ---- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml -+++ /dev/null -@@ -1,59 +0,0 @@ --# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) --# Copyright 2019 BayLibre, SAS --%YAML 1.2 ----- --$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml#" --$schema: "http://devicetree.org/meta-schemas/core.yaml#" -- --title: Amlogic G12A USB3 + PCIE Combo PHY -- --maintainers: -- - Neil Armstrong -- --properties: -- compatible: -- enum: -- - amlogic,meson-g12a-usb3-pcie-phy -- -- reg: -- maxItems: 1 -- -- clocks: -- maxItems: 1 -- -- clock-names: -- items: -- - const: ref_clk -- -- resets: -- maxItems: 1 -- -- reset-names: -- items: -- - const: phy -- -- "#phy-cells": -- const: 1 -- --required: -- - compatible -- - reg -- - clocks -- - clock-names -- - resets -- - reset-names -- - "#phy-cells" -- --additionalProperties: false -- --examples: -- - | -- phy@46000 { -- compatible = "amlogic,meson-g12a-usb3-pcie-phy"; -- reg = <0x46000 0x2000>; -- clocks = <&ref_clk>; -- clock-names = "ref_clk"; -- resets = <&phy_reset>; -- reset-names = "phy"; -- #phy-cells = <1>; -- }; -diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml -index ad2866c997383..fcd82df3aebbd 100644 ---- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml -+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml -@@ -58,7 +58,7 @@ patternProperties: - $ref: "/schemas/types.yaml#/definitions/string" - enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2, - ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4, -- EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP, -+ EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, - GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, - GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, - I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, -diff --git a/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml b/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml -index 4fe35e650909c..8dcdd32c2e015 100644 ---- a/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml -+++ b/Documentation/devicetree/bindings/pinctrl/microchip,sparx5-sgpio.yaml -@@ -138,7 +138,7 @@ examples: - clocks = <&sys_clk>; - pinctrl-0 = <&sgpio2_pins>; - pinctrl-names = "default"; -- reg = <0x1101059c 0x100>; -+ reg = <0x1101059c 0x118>; - microchip,sgpio-port-ranges = <0 0>, <16 18>, <28 31>; - bus-frequency = <25000000>; - sgpio_in2: gpio@0 { -diff --git a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml -index 61dd5af80db67..37402c370fbbc 100644 ---- a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml -+++ b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml -@@ -31,7 +31,7 @@ properties: - $ref: "regulator.yaml#" - - properties: -- regulator-name: -+ regulator-compatible: - pattern: "^vbuck[1-4]$" - - additionalProperties: false -@@ -55,7 +55,7 @@ examples: - regulator-min-microvolt = <300000>; - regulator-max-microvolt = <1193750>; - regulator-enable-ramp-delay = <256>; -- regulator-allowed-modes = <0 1 2 4>; -+ regulator-allowed-modes = <0 1 2>; - }; - - vbuck3 { -@@ -63,7 +63,7 @@ examples: - regulator-min-microvolt = <300000>; - regulator-max-microvolt = <1193750>; - regulator-enable-ramp-delay = <256>; -- regulator-allowed-modes = <0 1 2 4>; -+ regulator-allowed-modes = <0 1 2>; - }; - }; - }; -diff --git a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml -index f70f2e758a002..e66aac0ad735e 100644 ---- a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml -+++ b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml -@@ -47,12 +47,6 @@ properties: - description: - Properties for single LDO regulator. - -- properties: -- regulator-name: -- pattern: "^LDO[1-5]$" -- description: -- should be "LDO1", ..., "LDO5" -- - unevaluatedProperties: false - - "^BUCK[1-6]$": -@@ -62,11 +56,6 @@ properties: - Properties for single BUCK regulator. - - properties: -- regulator-name: -- pattern: "^BUCK[1-6]$" -- description: -- should be "BUCK1", ..., "BUCK6" -- - nxp,dvs-run-voltage: - $ref: "/schemas/types.yaml#/definitions/uint32" - minimum: 600000 -diff --git a/Documentation/devicetree/bindings/regulator/samsung,s5m8767.txt b/Documentation/devicetree/bindings/regulator/samsung,s5m8767.txt -index 093edda0c8dfc..6cd83d920155f 100644 ---- a/Documentation/devicetree/bindings/regulator/samsung,s5m8767.txt -+++ b/Documentation/devicetree/bindings/regulator/samsung,s5m8767.txt -@@ -13,6 +13,14 @@ common regulator binding documented in: - - - Required properties of the main device node (the parent!): -+ - s5m8767,pmic-buck-ds-gpios: GPIO specifiers for three host gpio's used -+ for selecting GPIO DVS lines. It is one-to-one mapped to dvs gpio lines. -+ -+ [1] If either of the 's5m8767,pmic-buck[2/3/4]-uses-gpio-dvs' optional -+ property is specified, then all the eight voltage values for the -+ 's5m8767,pmic-buck[2/3/4]-dvs-voltage' should be specified. -+ -+Optional properties of the main device node (the parent!): - - s5m8767,pmic-buck2-dvs-voltage: A set of 8 voltage values in micro-volt (uV) - units for buck2 when changing voltage using gpio dvs. Refer to [1] below - for additional information. -@@ -25,26 +33,13 @@ Required properties of the main device node (the parent!): - units for buck4 when changing voltage using gpio dvs. Refer to [1] below - for additional information. - -- - s5m8767,pmic-buck-ds-gpios: GPIO specifiers for three host gpio's used -- for selecting GPIO DVS lines. It is one-to-one mapped to dvs gpio lines. -- -- [1] If none of the 's5m8767,pmic-buck[2/3/4]-uses-gpio-dvs' optional -- property is specified, the 's5m8767,pmic-buck[2/3/4]-dvs-voltage' -- property should specify atleast one voltage level (which would be a -- safe operating voltage). -- -- If either of the 's5m8767,pmic-buck[2/3/4]-uses-gpio-dvs' optional -- property is specified, then all the eight voltage values for the -- 's5m8767,pmic-buck[2/3/4]-dvs-voltage' should be specified. -- --Optional properties of the main device node (the parent!): - - s5m8767,pmic-buck2-uses-gpio-dvs: 'buck2' can be controlled by gpio dvs. - - s5m8767,pmic-buck3-uses-gpio-dvs: 'buck3' can be controlled by gpio dvs. - - s5m8767,pmic-buck4-uses-gpio-dvs: 'buck4' can be controlled by gpio dvs. - - Additional properties required if either of the optional properties are used: - -- - s5m8767,pmic-buck234-default-dvs-idx: Default voltage setting selected from -+ - s5m8767,pmic-buck-default-dvs-idx: Default voltage setting selected from - the possible 8 options selectable by the dvs gpios. The value of this - property should be between 0 and 7. If not specified or if out of range, the - default value of this property is set to 0. -diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml -index 2b1f916038972..b72ec404adcd7 100644 ---- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml -+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml -@@ -47,7 +47,7 @@ properties: - const: 2 - - cache-sets: -- const: 1024 -+ enum: [1024, 2048] - - cache-size: - const: 2097152 -@@ -85,6 +85,8 @@ then: - description: | - Must contain entries for DirError, DataError and DataFail signals. - maxItems: 3 -+ cache-sets: -+ const: 1024 - - else: - properties: -@@ -92,6 +94,8 @@ else: - description: | - Must contain entries for DirError, DataError, DataFail, DirFail signals. - minItems: 4 -+ cache-sets: -+ const: 2048 - - additionalProperties: false - -diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml -index 6b8731f7f2fba..1a8d9bf89feb6 100644 ---- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml -+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml -@@ -79,7 +79,7 @@ properties: - - description: Error interrupt - - description: Receive buffer full interrupt - - description: Transmit buffer empty interrupt -- - description: Transmit End interrupt -+ - description: Break interrupt - - items: - - description: Error interrupt - - description: Receive buffer full interrupt -@@ -94,7 +94,7 @@ properties: - - const: eri - - const: rxi - - const: txi -- - const: tei -+ - const: bri - - items: - - const: eri - - const: rxi -diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml -index cc3fe5ed7421e..1b0062e3c1a4b 100644 ---- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml -+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.yaml -@@ -34,6 +34,8 @@ properties: - - qcom,rpm-ipq6018 - - qcom,rpm-msm8226 - - qcom,rpm-msm8916 -+ - qcom,rpm-msm8936 -+ - qcom,rpm-msm8953 - - qcom,rpm-msm8974 - - qcom,rpm-msm8976 - - qcom,rpm-msm8996 -@@ -57,6 +59,7 @@ if: - - qcom,rpm-apq8084 - - qcom,rpm-msm8916 - - qcom,rpm-msm8974 -+ - qcom,rpm-msm8953 - then: - required: - - qcom,smd-channels -diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml -index 2e35aeaa8781d..89e3819c6127a 100644 ---- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml -+++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml -@@ -61,7 +61,7 @@ patternProperties: - description: phandle of the CPU DAI - - patternProperties: -- "^codec-[0-9]+$": -+ "^codec(-[0-9]+)?$": - type: object - description: |- - Codecs: -diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt -index 5d6ea66a863fe..1f75feec3dec6 100644 ---- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt -+++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt -@@ -109,7 +109,7 @@ audio-codec@1{ - reg = <1 0>; - interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "intr2" -- reset-gpios = <&msmgpio 64 0>; -+ reset-gpios = <&msmgpio 64 GPIO_ACTIVE_LOW>; - slim-ifc-dev = <&wc9335_ifd>; - clock-names = "mclk", "native"; - clocks = <&rpmcc RPM_SMD_DIV_CLK1>, -diff --git a/Documentation/devicetree/bindings/sound/tas2562.yaml b/Documentation/devicetree/bindings/sound/tas2562.yaml -index acd4bbe697315..4adaf92233c8e 100644 ---- a/Documentation/devicetree/bindings/sound/tas2562.yaml -+++ b/Documentation/devicetree/bindings/sound/tas2562.yaml -@@ -52,7 +52,9 @@ properties: - description: TDM TX current sense time slot. - - '#sound-dai-cells': -- const: 1 -+ # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward -+ # compatibility but is deprecated. -+ enum: [0, 1] - - required: - - compatible -@@ -69,7 +71,7 @@ examples: - codec: codec@4c { - compatible = "ti,tas2562"; - reg = <0x4c>; -- #sound-dai-cells = <1>; -+ #sound-dai-cells = <0>; - interrupt-parent = <&gpio1>; - interrupts = <14>; - shutdown-gpios = <&gpio1 15 0>; -diff --git a/Documentation/devicetree/bindings/sound/tas2764.yaml b/Documentation/devicetree/bindings/sound/tas2764.yaml -index 5bf8c76ecda11..1ffe1a01668fe 100644 ---- a/Documentation/devicetree/bindings/sound/tas2764.yaml -+++ b/Documentation/devicetree/bindings/sound/tas2764.yaml -@@ -46,7 +46,9 @@ properties: - description: TDM TX voltage sense time slot. - - '#sound-dai-cells': -- const: 1 -+ # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward -+ # compatibility but is deprecated. -+ enum: [0, 1] - - required: - - compatible -@@ -63,7 +65,7 @@ examples: - codec: codec@38 { - compatible = "ti,tas2764"; - reg = <0x38>; -- #sound-dai-cells = <1>; -+ #sound-dai-cells = <0>; - interrupt-parent = <&gpio1>; - interrupts = <14>; - reset-gpios = <&gpio1 15 0>; -diff --git a/Documentation/devicetree/bindings/sound/tas2770.yaml b/Documentation/devicetree/bindings/sound/tas2770.yaml -index 027bebf4e8cf5..aceba9ed813ef 100644 ---- a/Documentation/devicetree/bindings/sound/tas2770.yaml -+++ b/Documentation/devicetree/bindings/sound/tas2770.yaml -@@ -54,7 +54,9 @@ properties: - - 1 # Falling edge - - '#sound-dai-cells': -- const: 1 -+ # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward -+ # compatibility but is deprecated. -+ enum: [0, 1] - - required: - - compatible -@@ -71,7 +73,7 @@ examples: - codec: codec@41 { - compatible = "ti,tas2770"; - reg = <0x41>; -- #sound-dai-cells = <1>; -+ #sound-dai-cells = <0>; - interrupt-parent = <&gpio1>; - interrupts = <14>; - reset-gpio = <&gpio1 15 0>; -diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml -index 35a8045b2c70d..53627c6e2ae32 100644 ---- a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml -+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml -@@ -106,7 +106,7 @@ examples: - dma-names = "rx", "tx"; - - flash@0 { -- compatible = "spi-nor"; -+ compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <104000000>; - spi-tx-bus-width = <2>; -diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml -index ef5698f426b2c..392204a08e96c 100644 ---- a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml -+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml -@@ -45,6 +45,7 @@ properties: - maxItems: 2 - - interconnect-names: -+ minItems: 1 - items: - - const: qspi-config - - const: qspi-memory -diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.yaml b/Documentation/devicetree/bindings/spi/spi-cadence.yaml -index 9787be21318e6..82d0ca5c00f3b 100644 ---- a/Documentation/devicetree/bindings/spi/spi-cadence.yaml -+++ b/Documentation/devicetree/bindings/spi/spi-cadence.yaml -@@ -49,6 +49,13 @@ properties: - enum: [ 0, 1 ] - default: 0 - -+required: -+ - compatible -+ - reg -+ - interrupts -+ - clock-names -+ - clocks -+ - unevaluatedProperties: false - - examples: -diff --git a/Documentation/devicetree/bindings/spi/spi-mxic.txt b/Documentation/devicetree/bindings/spi/spi-mxic.txt -index 529f2dab2648a..7bcbb229b78bb 100644 ---- a/Documentation/devicetree/bindings/spi/spi-mxic.txt -+++ b/Documentation/devicetree/bindings/spi/spi-mxic.txt -@@ -8,11 +8,13 @@ Required properties: - - reg: should contain 2 entries, one for the registers and one for the direct - mapping area - - reg-names: should contain "regs" and "dirmap" --- interrupts: interrupt line connected to the SPI controller - - clock-names: should contain "ps_clk", "send_clk" and "send_dly_clk" - - clocks: should contain 3 entries for the "ps_clk", "send_clk" and - "send_dly_clk" clocks - -+Optional properties: -+- interrupts: interrupt line connected to the SPI controller -+ - Example: - - spi@43c30000 { -diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml -index ea72c8001256f..fafde1c06be67 100644 ---- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml -+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml -@@ -30,6 +30,13 @@ properties: - clocks: - maxItems: 2 - -+required: -+ - compatible -+ - reg -+ - interrupts -+ - clock-names -+ - clocks -+ - unevaluatedProperties: false - - examples: -diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml -index a07de5ed0ca6a..2d34f3ccb2572 100644 ---- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml -+++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml -@@ -199,12 +199,11 @@ patternProperties: - - contribution: - $ref: /schemas/types.yaml#/definitions/uint32 -- minimum: 0 -- maximum: 100 - description: -- The percentage contribution of the cooling devices at the -- specific trip temperature referenced in this map -- to this thermal zone -+ The cooling contribution to the thermal zone of the referred -+ cooling device at the referred trip point. The contribution is -+ a ratio of the sum of all cooling contributions within a -+ thermal zone. - - required: - - trip -diff --git a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml -index dc9d6ed0781d2..5d0bfea2c087e 100644 ---- a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml -+++ b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml -@@ -64,7 +64,7 @@ properties: - description: - size of memory intended as internal memory for endpoints - buffers expressed in KB -- $ref: /schemas/types.yaml#/definitions/uint32 -+ $ref: /schemas/types.yaml#/definitions/uint16 - - cdns,phyrst-a-enable: - description: Enable resetting of PHY if Rx fail is detected -diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml -index 8913497624de2..cb5da1df8d405 100644 ---- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml -+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml -@@ -135,7 +135,8 @@ properties: - Phandle of a companion. - - phys: -- maxItems: 1 -+ minItems: 1 -+ maxItems: 3 - - phy-names: - const: usb -diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml -index acbf94fa5f74a..d5fd3aa53ed29 100644 ---- a/Documentation/devicetree/bindings/usb/generic-ohci.yaml -+++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml -@@ -102,7 +102,8 @@ properties: - Overrides the detected port count - - phys: -- maxItems: 1 -+ minItems: 1 -+ maxItems: 3 - - phy-names: - const: usb -diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml -index 11f7bacd4e2b0..620cbf00bedb5 100644 ---- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml -+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml -@@ -56,6 +56,7 @@ properties: - - description: optional, wakeup interrupt used to support runtime PM - - interrupt-names: -+ minItems: 1 - items: - - const: host - - const: wakeup -diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml -index 078fb78895937..5d1e49d823c96 100644 ---- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml -+++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml -@@ -240,7 +240,7 @@ properties: - description: - High-Speed PHY interface selection between UTMI+ and ULPI when the - DWC_USB3_HSPHY_INTERFACE has value 3. -- $ref: /schemas/types.yaml#/definitions/uint8 -+ $ref: /schemas/types.yaml#/definitions/string - enum: [utmi, ulpi] - - snps,quirk-frame-length-adjustment: -diff --git a/Documentation/devicetree/bindings/usb/usb-hcd.yaml b/Documentation/devicetree/bindings/usb/usb-hcd.yaml -index 56853c17af667..1dc3d5d7b44fe 100644 ---- a/Documentation/devicetree/bindings/usb/usb-hcd.yaml -+++ b/Documentation/devicetree/bindings/usb/usb-hcd.yaml -@@ -33,7 +33,7 @@ patternProperties: - "^.*@[0-9a-f]{1,2}$": - description: The hard wired USB devices - type: object -- $ref: /usb/usb-device.yaml -+ $ref: /schemas/usb/usb-device.yaml - - additionalProperties: true - -diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml -index 76cb9586ee00c..93cd77a6e92c0 100644 ---- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml -+++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml -@@ -39,8 +39,8 @@ properties: - samsung,syscon-phandle: - $ref: /schemas/types.yaml#/definitions/phandle - description: -- Phandle to the PMU system controller node (in case of Exynos5250 -- and Exynos5420). -+ Phandle to the PMU system controller node (in case of Exynos5250, -+ Exynos5420 and Exynos7). - - required: - - compatible -@@ -58,6 +58,7 @@ allOf: - enum: - - samsung,exynos5250-wdt - - samsung,exynos5420-wdt -+ - samsung,exynos7-wdt - then: - required: - - samsung,syscon-phandle -diff --git a/Documentation/devicetree/overlay-notes.rst b/Documentation/devicetree/overlay-notes.rst -index b2b8db765b8c6..e139f22b363e9 100644 ---- a/Documentation/devicetree/overlay-notes.rst -+++ b/Documentation/devicetree/overlay-notes.rst -@@ -119,10 +119,32 @@ Finally, if you need to remove all overlays in one-go, just call - of_overlay_remove_all() which will remove every single one in the correct - order. - --In addition, there is the option to register notifiers that get called on -+There is the option to register notifiers that get called on - overlay operations. See of_overlay_notifier_register/unregister and - enum of_overlay_notify_action for details. - --Note that a notifier callback is not supposed to store pointers to a device --tree node or its content beyond OF_OVERLAY_POST_REMOVE corresponding to the --respective node it received. -+A notifier callback for OF_OVERLAY_PRE_APPLY, OF_OVERLAY_POST_APPLY, or -+OF_OVERLAY_PRE_REMOVE may store pointers to a device tree node in the overlay -+or its content but these pointers must not persist past the notifier callback -+for OF_OVERLAY_POST_REMOVE. The memory containing the overlay will be -+kfree()ed after OF_OVERLAY_POST_REMOVE notifiers are called. Note that the -+memory will be kfree()ed even if the notifier for OF_OVERLAY_POST_REMOVE -+returns an error. -+ -+The changeset notifiers in drivers/of/dynamic.c are a second type of notifier -+that could be triggered by applying or removing an overlay. These notifiers -+are not allowed to store pointers to a device tree node in the overlay -+or its content. The overlay code does not protect against such pointers -+remaining active when the memory containing the overlay is freed as a result -+of removing the overlay. -+ -+Any other code that retains a pointer to the overlay nodes or data is -+considered to be a bug because after removing the overlay the pointer -+will refer to freed memory. -+ -+Users of overlays must be especially aware of the overall operations that -+occur on the system to ensure that other kernel code does not retain any -+pointers to the overlay nodes or data. Any example of an inadvertent use -+of such pointers is if a driver or subsystem module is loaded after an -+overlay has been applied, and the driver or subsystem scans the entire -+devicetree or a large portion of it, including the overlay nodes. -diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst -index ee268d445d38b..d2e1d8b58e7dc 100644 ---- a/Documentation/driver-api/dmaengine/dmatest.rst -+++ b/Documentation/driver-api/dmaengine/dmatest.rst -@@ -143,13 +143,14 @@ Part 5 - Handling channel allocation - Allocating Channels - ------------------- - --Channels are required to be configured prior to starting the test run. --Attempting to run the test without configuring the channels will fail. -+Channels do not need to be configured prior to starting a test run. Attempting -+to run the test without configuring the channels will result in testing any -+channels that are available. - - Example:: - - % echo 1 > /sys/module/dmatest/parameters/run -- dmatest: Could not start test, no channels configured -+ dmatest: No channels configured, continue with any - - Channels are registered using the "channel" parameter. Channels can be requested by their - name, once requested, the channel is registered and a pending thread is added to the test list. -diff --git a/Documentation/driver-api/firewire.rst b/Documentation/driver-api/firewire.rst -index 94a2d7f01d999..d3cfa73cbb2b4 100644 ---- a/Documentation/driver-api/firewire.rst -+++ b/Documentation/driver-api/firewire.rst -@@ -19,7 +19,7 @@ of kernel interfaces is available via exported symbols in `firewire-core` module - Firewire char device data structures - ==================================== - --.. include:: /ABI/stable/firewire-cdev -+.. include:: ../ABI/stable/firewire-cdev - :literal: - - .. kernel-doc:: include/uapi/linux/firewire-cdev.h -@@ -28,7 +28,7 @@ Firewire char device data structures - Firewire device probing and sysfs interfaces - ============================================ - --.. include:: /ABI/stable/sysfs-bus-firewire -+.. include:: ../ABI/stable/sysfs-bus-firewire - :literal: - - .. kernel-doc:: drivers/firewire/core-device.c -diff --git a/Documentation/driver-api/firmware/other_interfaces.rst b/Documentation/driver-api/firmware/other_interfaces.rst -index b81794e0cfbb9..06ac89adaafba 100644 ---- a/Documentation/driver-api/firmware/other_interfaces.rst -+++ b/Documentation/driver-api/firmware/other_interfaces.rst -@@ -13,6 +13,12 @@ EDD Interfaces - .. kernel-doc:: drivers/firmware/edd.c - :internal: - -+Generic System Framebuffers Interface -+------------------------------------- -+ -+.. kernel-doc:: drivers/firmware/sysfb.c -+ :export: -+ - Intel Stratix10 SoC Service Layer - --------------------------------- - Some features of the Intel Stratix10 SoC require a level of privilege -diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst -index 64fe7db080e52..252aeb639bc40 100644 ---- a/Documentation/driver-api/generic-counter.rst -+++ b/Documentation/driver-api/generic-counter.rst -@@ -247,7 +247,7 @@ for defining a counter device. - .. kernel-doc:: include/linux/counter.h - :internal: - --.. kernel-doc:: drivers/counter/counter.c -+.. kernel-doc:: drivers/counter/counter-core.c - :export: - - Implementation -diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst -index f64cb666498aa..f28887045049d 100644 ---- a/Documentation/driver-api/spi.rst -+++ b/Documentation/driver-api/spi.rst -@@ -25,8 +25,8 @@ hardware, which may be as simple as a set of GPIO pins or as complex as - a pair of FIFOs connected to dual DMA engines on the other side of the - SPI shift register (maximizing throughput). Such drivers bridge between - whatever bus they sit on (often the platform bus) and SPI, and expose --the SPI side of their device as a :c:type:`struct spi_master --`. SPI devices are children of that master, -+the SPI side of their device as a :c:type:`struct spi_controller -+`. SPI devices are children of that master, - represented as a :c:type:`struct spi_device ` and - manufactured from :c:type:`struct spi_board_info - ` descriptors which are usually provided by -diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst -index 4a25c5eb6f072..8c47847755a68 100644 ---- a/Documentation/fault-injection/fault-injection.rst -+++ b/Documentation/fault-injection/fault-injection.rst -@@ -83,9 +83,7 @@ configuration of fault-injection capabilities. - - /sys/kernel/debug/fail*/times: - - specifies how many times failures may happen at most. A value of -1 -- means "no limit". Note, though, that this file only accepts unsigned -- values. So, if you want to specify -1, you better use 'printf' instead -- of 'echo', e.g.: $ printf %#x -1 > times -+ means "no limit". - - - /sys/kernel/debug/fail*/space: - -@@ -277,7 +275,7 @@ Application Examples - echo Y > /sys/kernel/debug/$FAILTYPE/task-filter - echo 10 > /sys/kernel/debug/$FAILTYPE/probability - echo 100 > /sys/kernel/debug/$FAILTYPE/interval -- printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times -+ echo -1 > /sys/kernel/debug/$FAILTYPE/times - echo 0 > /sys/kernel/debug/$FAILTYPE/space - echo 2 > /sys/kernel/debug/$FAILTYPE/verbose - echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait -@@ -331,7 +329,7 @@ Application Examples - echo N > /sys/kernel/debug/$FAILTYPE/task-filter - echo 10 > /sys/kernel/debug/$FAILTYPE/probability - echo 100 > /sys/kernel/debug/$FAILTYPE/interval -- printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times -+ echo -1 > /sys/kernel/debug/$FAILTYPE/times - echo 0 > /sys/kernel/debug/$FAILTYPE/space - echo 2 > /sys/kernel/debug/$FAILTYPE/verbose - echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait -@@ -362,7 +360,7 @@ Application Examples - echo N > /sys/kernel/debug/$FAILTYPE/task-filter - echo 100 > /sys/kernel/debug/$FAILTYPE/probability - echo 0 > /sys/kernel/debug/$FAILTYPE/interval -- printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times -+ echo -1 > /sys/kernel/debug/$FAILTYPE/times - echo 0 > /sys/kernel/debug/$FAILTYPE/space - echo 1 > /sys/kernel/debug/$FAILTYPE/verbose - -diff --git a/Documentation/filesystems/autofs-mount-control.rst b/Documentation/filesystems/autofs-mount-control.rst -index bf4b511cdbe85..b5a379d25c40b 100644 ---- a/Documentation/filesystems/autofs-mount-control.rst -+++ b/Documentation/filesystems/autofs-mount-control.rst -@@ -196,7 +196,7 @@ information and return operation results:: - struct args_ismountpoint ismountpoint; - }; - -- char path[0]; -+ char path[]; - }; - - The ioctlfd field is a mount point file descriptor of an autofs mount -diff --git a/Documentation/filesystems/autofs.rst b/Documentation/filesystems/autofs.rst -index 681c6a492bc0c..1b495768e7aaf 100644 ---- a/Documentation/filesystems/autofs.rst -+++ b/Documentation/filesystems/autofs.rst -@@ -467,7 +467,7 @@ Each ioctl is passed a pointer to an `autofs_dev_ioctl` structure:: - struct args_ismountpoint ismountpoint; - }; - -- char path[0]; -+ char path[]; - }; - - For the **OPEN_MOUNT** and **IS_MOUNTPOINT** commands, the target -diff --git a/Documentation/filesystems/directory-locking.rst b/Documentation/filesystems/directory-locking.rst -index 504ba940c36c1..dccd61c7c5c3b 100644 ---- a/Documentation/filesystems/directory-locking.rst -+++ b/Documentation/filesystems/directory-locking.rst -@@ -22,12 +22,11 @@ exclusive. - 3) object removal. Locking rules: caller locks parent, finds victim, - locks victim and calls the method. Locks are exclusive. - --4) rename() that is _not_ cross-directory. Locking rules: caller locks --the parent and finds source and target. In case of exchange (with --RENAME_EXCHANGE in flags argument) lock both. In any case, --if the target already exists, lock it. If the source is a non-directory, --lock it. If we need to lock both, lock them in inode pointer order. --Then call the method. All locks are exclusive. -+4) rename() that is _not_ cross-directory. Locking rules: caller locks the -+parent and finds source and target. We lock both (provided they exist). If we -+need to lock two inodes of different type (dir vs non-dir), we lock directory -+first. If we need to lock two inodes of the same type, lock them in inode -+pointer order. Then call the method. All locks are exclusive. - NB: we might get away with locking the source (and target in exchange - case) shared. - -@@ -44,15 +43,17 @@ All locks are exclusive. - rules: - - * lock the filesystem -- * lock parents in "ancestors first" order. -+ * lock parents in "ancestors first" order. If one is not ancestor of -+ the other, lock them in inode pointer order. - * find source and target. - * if old parent is equal to or is a descendent of target - fail with -ENOTEMPTY - * if new parent is equal to or is a descendent of source - fail with -ELOOP -- * If it's an exchange, lock both the source and the target. -- * If the target exists, lock it. If the source is a non-directory, -- lock it. If we need to lock both, do so in inode pointer order. -+ * Lock both the source and the target provided they exist. If we -+ need to lock two inodes of different type (dir vs non-dir), we lock -+ the directory first. If we need to lock two inodes of the same type, -+ lock them in inode pointer order. - * call the method. - - All ->i_rwsem are taken exclusive. Again, we might get away with locking -@@ -66,8 +67,9 @@ If no directory is its own ancestor, the scheme above is deadlock-free. - - Proof: - -- First of all, at any moment we have a partial ordering of the -- objects - A < B iff A is an ancestor of B. -+ First of all, at any moment we have a linear ordering of the -+ objects - A < B iff (A is an ancestor of B) or (B is not an ancestor -+ of A and ptr(A) < ptr(B)). - - That ordering can change. However, the following is true: - -diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst -index 54386a010a8d7..871d2da7a0a91 100644 ---- a/Documentation/filesystems/ext4/attributes.rst -+++ b/Documentation/filesystems/ext4/attributes.rst -@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in - - Checksum of the extended attribute block. - * - 0x14 - - \_\_u32 -- - h\_reserved[2] -+ - h\_reserved[3] - - Zero. - - The checksum is calculated against the FS UUID, the 64-bit block number -diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst -index 09de6ebbbdfa2..7fe50b0bccde9 100644 ---- a/Documentation/filesystems/f2fs.rst -+++ b/Documentation/filesystems/f2fs.rst -@@ -197,6 +197,7 @@ fault_type=%d Support configuring fault injection type, should be - FAULT_DISCARD 0x000002000 - FAULT_WRITE_IO 0x000004000 - FAULT_SLAB_ALLOC 0x000008000 -+ FAULT_DQUOT_INIT 0x000010000 - =================== =========== - mode=%s Control block allocation mode which supports "adaptive" - and "lfs". In "lfs" mode, there should be no random -diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst -index 0eb799d9d05a2..7940a45d39522 100644 ---- a/Documentation/filesystems/fscrypt.rst -+++ b/Documentation/filesystems/fscrypt.rst -@@ -176,11 +176,11 @@ Master Keys - - Each encrypted directory tree is protected by a *master key*. Master - keys can be up to 64 bytes long, and must be at least as long as the --greater of the key length needed by the contents and filenames --encryption modes being used. For example, if AES-256-XTS is used for --contents encryption, the master key must be 64 bytes (512 bits). Note --that the XTS mode is defined to require a key twice as long as that --required by the underlying block cipher. -+greater of the security strength of the contents and filenames -+encryption modes being used. For example, if any AES-256 mode is -+used, the master key must be at least 256 bits, i.e. 32 bytes. A -+stricter requirement applies if the key is used by a v1 encryption -+policy and AES-256-XTS is used; such keys must be 64 bytes. - - To "unlock" an encrypted directory tree, userspace must provide the - appropriate master key. There can be any number of master keys, each -diff --git a/Documentation/filesystems/idmappings.rst b/Documentation/filesystems/idmappings.rst -index 1229a75ec75dd..7a879ec3b6bf0 100644 ---- a/Documentation/filesystems/idmappings.rst -+++ b/Documentation/filesystems/idmappings.rst -@@ -952,75 +952,3 @@ The raw userspace id that is put on disk is ``u1000`` so when the user takes - their home directory back to their home computer where they are assigned - ``u1000`` using the initial idmapping and mount the filesystem with the initial - idmapping they will see all those files owned by ``u1000``. -- --Shortcircuting ---------------- -- --Currently, the implementation of idmapped mounts enforces that the filesystem --is mounted with the initial idmapping. The reason is simply that none of the --filesystems that we targeted were mountable with a non-initial idmapping. But --that might change soon enough. As we've seen above, thanks to the properties of --idmappings the translation works for both filesystems mounted with the initial --idmapping and filesystem with non-initial idmappings. -- --Based on this current restriction to filesystem mounted with the initial --idmapping two noticeable shortcuts have been taken: -- --1. We always stash a reference to the initial user namespace in ``struct -- vfsmount``. Idmapped mounts are thus mounts that have a non-initial user -- namespace attached to them. -- -- In order to support idmapped mounts this needs to be changed. Instead of -- stashing the initial user namespace the user namespace the filesystem was -- mounted with must be stashed. An idmapped mount is then any mount that has -- a different user namespace attached then the filesystem was mounted with. -- This has no user-visible consequences. -- --2. The translation algorithms in ``mapped_fs*id()`` and ``i_*id_into_mnt()`` -- are simplified. -- -- Let's consider ``mapped_fs*id()`` first. This function translates the -- caller's kernel id into a kernel id in the filesystem's idmapping via -- a mount's idmapping. The full algorithm is:: -- -- mapped_fsuid(kid): -- /* Map the kernel id up into a userspace id in the mount's idmapping. */ -- from_kuid(mount-idmapping, kid) = uid -- -- /* Map the userspace id down into a kernel id in the filesystem's idmapping. */ -- make_kuid(filesystem-idmapping, uid) = kuid -- -- We know that the filesystem is always mounted with the initial idmapping as -- we enforce this in ``mount_setattr()``. So this can be shortened to:: -- -- mapped_fsuid(kid): -- /* Map the kernel id up into a userspace id in the mount's idmapping. */ -- from_kuid(mount-idmapping, kid) = uid -- -- /* Map the userspace id down into a kernel id in the filesystem's idmapping. */ -- KUIDT_INIT(uid) = kuid -- -- Similarly, for ``i_*id_into_mnt()`` which translated the filesystem's kernel -- id into a mount's kernel id:: -- -- i_uid_into_mnt(kid): -- /* Map the kernel id up into a userspace id in the filesystem's idmapping. */ -- from_kuid(filesystem-idmapping, kid) = uid -- -- /* Map the userspace id down into a kernel id in the mounts's idmapping. */ -- make_kuid(mount-idmapping, uid) = kuid -- -- Again, we know that the filesystem is always mounted with the initial -- idmapping as we enforce this in ``mount_setattr()``. So this can be -- shortened to:: -- -- i_uid_into_mnt(kid): -- /* Map the kernel id up into a userspace id in the filesystem's idmapping. */ -- __kuid_val(kid) = uid -- -- /* Map the userspace id down into a kernel id in the mounts's idmapping. */ -- make_kuid(mount-idmapping, uid) = kuid -- --Handling filesystems mounted with non-initial idmappings requires that the --translation functions be converted to their full form. They can still be --shortcircuited on non-idmapped mounts. This has no user-visible consequences. -diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst -index bf5c48066fac5..a15527940b461 100644 ---- a/Documentation/filesystems/vfs.rst -+++ b/Documentation/filesystems/vfs.rst -@@ -274,6 +274,9 @@ or bottom half). - This is specifically for the inode itself being marked dirty, - not its data. If the update needs to be persisted by fdatasync(), - then I_DIRTY_DATASYNC will be set in the flags argument. -+ I_DIRTY_TIME will be set in the flags in case lazytime is enabled -+ and struct inode has times updated since the last ->dirty_inode -+ call. - - ``write_inode`` - this method is called when the VFS needs to write an inode to -@@ -1207,7 +1210,7 @@ defined: - return - -ECHILD and it will be called again in ref-walk mode. - --``_weak_revalidate`` -+``d_weak_revalidate`` - called when the VFS needs to revalidate a "jumped" dentry. This - is called when a path-walk ends at dentry that was not acquired - by doing a lookup in the parent directory. This includes "/", -diff --git a/Documentation/firmware-guide/acpi/apei/einj.rst b/Documentation/firmware-guide/acpi/apei/einj.rst -index c042176e17078..50ac87fa22958 100644 ---- a/Documentation/firmware-guide/acpi/apei/einj.rst -+++ b/Documentation/firmware-guide/acpi/apei/einj.rst -@@ -168,7 +168,7 @@ An error injection example:: - 0x00000008 Memory Correctable - 0x00000010 Memory Uncorrectable non-fatal - # echo 0x12345000 > param1 # Set memory address for injection -- # echo $((-1 << 12)) > param2 # Mask 0xfffffffffffff000 - anywhere in this page -+ # echo 0xfffffffffffff000 > param2 # Mask - anywhere in this page - # echo 0x8 > error_type # Choose correctable memory error - # echo 1 > error_inject # Inject now - -diff --git a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst -index b7ad47df49de0..8b65b32e6e40e 100644 ---- a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst -+++ b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst -@@ -5,7 +5,7 @@ - Referencing hierarchical data nodes - =================================== - --:Copyright: |copy| 2018 Intel Corporation -+:Copyright: |copy| 2018, 2021 Intel Corporation - :Author: Sakari Ailus - - ACPI in general allows referring to device objects in the tree only. -@@ -52,12 +52,14 @@ the ANOD object which is also the final target node of the reference. - Name (NOD0, Package() { - ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), - Package () { -+ Package () { "reg", 0 }, - Package () { "random-property", 3 }, - } - }) - Name (NOD1, Package() { - ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), - Package () { -+ Package () { "reg", 1 }, - Package () { "anothernode", "ANOD" }, - } - }) -@@ -74,7 +76,11 @@ the ANOD object which is also the final target node of the reference. - Name (_DSD, Package () { - ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), - Package () { -- Package () { "reference", ^DEV0, "node@1", "anothernode" }, -+ Package () { -+ "reference", Package () { -+ ^DEV0, "node@1", "anothernode" -+ } -+ }, - } - }) - } -diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst -index 204ebdaadb45a..03021dfa0dd81 100644 ---- a/Documentation/gpu/i915.rst -+++ b/Documentation/gpu/i915.rst -@@ -183,25 +183,25 @@ Frame Buffer Compression (FBC) - Display Refresh Rate Switching (DRRS) - ------------------------------------- - --.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c -+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c - :doc: Display Refresh Rate Switching (DRRS) - --.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c -+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c - :functions: intel_dp_set_drrs_state - --.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c -+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c - :functions: intel_edp_drrs_enable - --.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c -+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c - :functions: intel_edp_drrs_disable - --.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c -+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c - :functions: intel_edp_drrs_invalidate - --.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c -+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c - :functions: intel_edp_drrs_flush - --.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c -+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c - :functions: intel_dp_drrs_init - - DPIO -diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst -index 12e61869939e8..67de1e94fdf76 100644 ---- a/Documentation/gpu/todo.rst -+++ b/Documentation/gpu/todo.rst -@@ -311,27 +311,6 @@ Contact: Daniel Vetter, Noralf Tronnes - - Level: Advanced - --Garbage collect fbdev scrolling acceleration ---------------------------------------------- -- --Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode = --SCROLL_REDRAW. There's a ton of code this will allow us to remove: -- --- lots of code in fbcon.c -- --- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called -- directly instead of the function table (with a switch on p->rotate) -- --- fb_copyarea is unused after this, and can be deleted from all drivers -- --Note that not all acceleration code can be deleted, since clearing and cursor --support is still accelerated, which might be good candidates for further --deletion projects. -- --Contact: Daniel Vetter -- --Level: Intermediate -- - idr_init_base() - --------------- - -diff --git a/Documentation/hwmon/ftsteutates.rst b/Documentation/hwmon/ftsteutates.rst -index 58a2483d8d0da..198fa8e2819da 100644 ---- a/Documentation/hwmon/ftsteutates.rst -+++ b/Documentation/hwmon/ftsteutates.rst -@@ -22,6 +22,10 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and - 8 fans. It also contains an integrated watchdog which is currently - implemented in this driver. - -+The 4 voltages require a board-specific multiplier, since the BMC can -+only measure voltages up to 3.3V and thus relies on voltage dividers. -+Consult your motherboard manual for details. -+ - To clear a temperature or fan alarm, execute the following command with the - correct path to the alarm file:: - -diff --git a/Documentation/hwmon/lm90.rst b/Documentation/hwmon/lm90.rst -index 3da8c6e06a365..05391fb4042d9 100644 ---- a/Documentation/hwmon/lm90.rst -+++ b/Documentation/hwmon/lm90.rst -@@ -265,6 +265,16 @@ Supported chips: - - https://www.ti.com/litv/pdf/sbos686 - -+ * Texas Instruments TMP461 -+ -+ Prefix: 'tmp461' -+ -+ Addresses scanned: I2C 0x48 through 0x4F -+ -+ Datasheet: Publicly available at TI website -+ -+ https://www.ti.com/lit/gpn/tmp461 -+ - Author: Jean Delvare - - -diff --git a/Documentation/input/joydev/joystick.rst b/Documentation/input/joydev/joystick.rst -index f615906a0821b..6d721396717a2 100644 ---- a/Documentation/input/joydev/joystick.rst -+++ b/Documentation/input/joydev/joystick.rst -@@ -517,6 +517,7 @@ All I-Force devices are supported by the iforce module. This includes: - * AVB Mag Turbo Force - * AVB Top Shot Pegasus - * AVB Top Shot Force Feedback Racing Wheel -+* Boeder Force Feedback Wheel - * Logitech WingMan Force - * Logitech WingMan Force Wheel - * Guillemot Race Leader Force Feedback -diff --git a/Documentation/kernel-hacking/locking.rst b/Documentation/kernel-hacking/locking.rst -index 90bc3f51eda97..d431718921b79 100644 ---- a/Documentation/kernel-hacking/locking.rst -+++ b/Documentation/kernel-hacking/locking.rst -@@ -1352,7 +1352,7 @@ Mutex API reference - Futex API reference - =================== - --.. kernel-doc:: kernel/futex.c -+.. kernel-doc:: kernel/futex/core.c - :internal: - - Further reading -diff --git a/Documentation/locking/locktypes.rst b/Documentation/locking/locktypes.rst -index ddada4a537493..4fd7b70fcde19 100644 ---- a/Documentation/locking/locktypes.rst -+++ b/Documentation/locking/locktypes.rst -@@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels:: - spin_lock(&p->lock); - p->count += this_cpu_read(var2); - --On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable() --which makes the above code fully equivalent. On a PREEMPT_RT kernel - migrate_disable() ensures that the task is pinned on the current CPU which - in turn guarantees that the per-CPU access to var1 and var2 are staying on --the same CPU. -+the same CPU while the task remains preemptible. - - The migrate_disable() substitution is not valid for the following - scenario:: -@@ -456,9 +454,8 @@ scenario:: - p = this_cpu_ptr(&var1); - p->val = func2(); - --While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because --here migrate_disable() does not protect against reentrancy from a --preempting task. A correct substitution for this case is:: -+This breaks because migrate_disable() does not protect against reentrancy from -+a preempting task. A correct substitution for this case is:: - - func() - { -diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst -index 60b217b436be6..5b77b9e5ac7e6 100644 ---- a/Documentation/networking/af_xdp.rst -+++ b/Documentation/networking/af_xdp.rst -@@ -433,6 +433,15 @@ start N bytes into the buffer leaving the first N bytes for the - application to use. The final option is the flags field, but it will - be dealt with in separate sections for each UMEM flag. - -+SO_BINDTODEVICE setsockopt -+-------------------------- -+ -+This is a generic SOL_SOCKET option that can be used to tie AF_XDP -+socket to a particular network interface. It is useful when a socket -+is created by a privileged process and passed to a non-privileged one. -+Once the option is set, kernel will refuse attempts to bind that socket -+to a different interface. Updating the value requires CAP_NET_RAW. -+ - XDP_STATISTICS getsockopt - ------------------------- - -diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst -index 31cfd7d674a6c..ab98373535ea6 100644 ---- a/Documentation/networking/bonding.rst -+++ b/Documentation/networking/bonding.rst -@@ -196,11 +196,12 @@ ad_actor_sys_prio - ad_actor_system - - In an AD system, this specifies the mac-address for the actor in -- protocol packet exchanges (LACPDUs). The value cannot be NULL or -- multicast. It is preferred to have the local-admin bit set for this -- mac but driver does not enforce it. If the value is not given then -- system defaults to using the masters' mac address as actors' system -- address. -+ protocol packet exchanges (LACPDUs). The value cannot be a multicast -+ address. If the all-zeroes MAC is specified, bonding will internally -+ use the MAC of the bond itself. It is preferred to have the -+ local-admin bit set for this mac but driver does not enforce it. If -+ the value is not given then system defaults to using the masters' -+ mac address as actors' system address. - - This parameter has effect only in 802.3ad mode and is available through - SysFs interface. -@@ -421,6 +422,17 @@ arp_all_targets - consider the slave up only when all of the arp_ip_targets - are reachable - -+arp_missed_max -+ -+ Specifies the number of arp_interval monitor checks that must -+ fail in order for an interface to be marked down by the ARP monitor. -+ -+ In order to provide orderly failover semantics, backup interfaces -+ are permitted an extra monitor check (i.e., they must fail -+ arp_missed_max + 1 times before being marked down). -+ -+ The default value is 2, and the allowable range is 1 - 255. -+ - downdelay - - Specifies the time, in milliseconds, to wait before disabling -diff --git a/Documentation/networking/decnet.rst b/Documentation/networking/decnet.rst -deleted file mode 100644 -index b8bc11ff8370d..0000000000000 ---- a/Documentation/networking/decnet.rst -+++ /dev/null -@@ -1,243 +0,0 @@ --.. SPDX-License-Identifier: GPL-2.0 -- --========================================= --Linux DECnet Networking Layer Information --========================================= -- --1. Other documentation.... --========================== -- -- - Project Home Pages -- - http://www.chygwyn.com/ - Kernel info -- - http://linux-decnet.sourceforge.net/ - Userland tools -- - http://www.sourceforge.net/projects/linux-decnet/ - Status page -- --2. Configuring the kernel --========================= -- --Be sure to turn on the following options: -- -- - CONFIG_DECNET (obviously) -- - CONFIG_PROC_FS (to see what's going on) -- - CONFIG_SYSCTL (for easy configuration) -- --if you want to try out router support (not properly debugged yet) --you'll need the following options as well... -- -- - CONFIG_DECNET_ROUTER (to be able to add/delete routes) -- - CONFIG_NETFILTER (will be required for the DECnet routing daemon) -- --Don't turn on SIOCGIFCONF support for DECnet unless you are really sure --that you need it, in general you won't and it can cause ifconfig to --malfunction. -- --Run time configuration has changed slightly from the 2.4 system. If you --want to configure an endnode, then the simplified procedure is as follows: -- -- - Set the MAC address on your ethernet card before starting _any_ other -- network protocols. -- --As soon as your network card is brought into the UP state, DECnet should --start working. If you need something more complicated or are unsure how --to set the MAC address, see the next section. Also all configurations which --worked with 2.4 will work under 2.5 with no change. -- --3. Command line options --======================= -- --You can set a DECnet address on the kernel command line for compatibility --with the 2.4 configuration procedure, but in general it's not needed any more. --If you do st a DECnet address on the command line, it has only one purpose --which is that its added to the addresses on the loopback device. -- --With 2.4 kernels, DECnet would only recognise addresses as local if they --were added to the loopback device. In 2.5, any local interface address --can be used to loop back to the local machine. Of course this does not --prevent you adding further addresses to the loopback device if you --want to. -- --N.B. Since the address list of an interface determines the addresses for --which "hello" messages are sent, if you don't set an address on the loopback --interface then you won't see any entries in /proc/net/neigh for the local --host until such time as you start a connection. This doesn't affect the --operation of the local communications in any other way though. -- --The kernel command line takes options looking like the following:: -- -- decnet.addr=1,2 -- --the two numbers are the node address 1,2 = 1.2 For 2.2.xx kernels --and early 2.3.xx kernels, you must use a comma when specifying the --DECnet address like this. For more recent 2.3.xx kernels, you may --use almost any character except space, although a `.` would be the most --obvious choice :-) -- --There used to be a third number specifying the node type. This option --has gone away in favour of a per interface node type. This is now set --using /proc/sys/net/decnet/conf//forwarding. This file can be --set with a single digit, 0=EndNode, 1=L1 Router and 2=L2 Router. -- --There are also equivalent options for modules. The node address can --also be set through the /proc/sys/net/decnet/ files, as can other system --parameters. -- --Currently the only supported devices are ethernet and ip_gre. The --ethernet address of your ethernet card has to be set according to the DECnet --address of the node in order for it to be autoconfigured (and then appear in --/proc/net/decnet_dev). There is a utility available at the above --FTP sites called dn2ethaddr which can compute the correct ethernet --address to use. The address can be set by ifconfig either before or --at the time the device is brought up. If you are using RedHat you can --add the line:: -- -- MACADDR=AA:00:04:00:03:04 -- --or something similar, to /etc/sysconfig/network-scripts/ifcfg-eth0 or --wherever your network card's configuration lives. Setting the MAC address --of your ethernet card to an address starting with "hi-ord" will cause a --DECnet address which matches to be added to the interface (which you can --verify with iproute2). -- --The default device for routing can be set through the /proc filesystem --by setting /proc/sys/net/decnet/default_device to the --device you want DECnet to route packets out of when no specific route --is available. Usually this will be eth0, for example:: -- -- echo -n "eth0" >/proc/sys/net/decnet/default_device -- --If you don't set the default device, then it will default to the first --ethernet card which has been autoconfigured as described above. You can --confirm that by looking in the default_device file of course. -- --There is a list of what the other files under /proc/sys/net/decnet/ do --on the kernel patch web site (shown above). -- --4. Run time kernel configuration --================================ -- -- --This is either done through the sysctl/proc interface (see the kernel web --pages for details on what the various options do) or through the iproute2 --package in the same way as IPv4/6 configuration is performed. -- --Documentation for iproute2 is included with the package, although there is --as yet no specific section on DECnet, most of the features apply to both --IP and DECnet, albeit with DECnet addresses instead of IP addresses and --a reduced functionality. -- --If you want to configure a DECnet router you'll need the iproute2 package --since its the _only_ way to add and delete routes currently. Eventually --there will be a routing daemon to send and receive routing messages for --each interface and update the kernel routing tables accordingly. The --routing daemon will use netfilter to listen to routing packets, and --rtnetlink to update the kernels routing tables. -- --The DECnet raw socket layer has been removed since it was there purely --for use by the routing daemon which will now use netfilter (a much cleaner --and more generic solution) instead. -- --5. How can I tell if its working? --================================= -- --Here is a quick guide of what to look for in order to know if your DECnet --kernel subsystem is working. -- -- - Is the node address set (see /proc/sys/net/decnet/node_address) -- - Is the node of the correct type -- (see /proc/sys/net/decnet/conf//forwarding) -- - Is the Ethernet MAC address of each Ethernet card set to match -- the DECnet address. If in doubt use the dn2ethaddr utility available -- at the ftp archive. -- - If the previous two steps are satisfied, and the Ethernet card is up, -- you should find that it is listed in /proc/net/decnet_dev and also -- that it appears as a directory in /proc/sys/net/decnet/conf/. The -- loopback device (lo) should also appear and is required to communicate -- within a node. -- - If you have any DECnet routers on your network, they should appear -- in /proc/net/decnet_neigh, otherwise this file will only contain the -- entry for the node itself (if it doesn't check to see if lo is up). -- - If you want to send to any node which is not listed in the -- /proc/net/decnet_neigh file, you'll need to set the default device -- to point to an Ethernet card with connection to a router. This is -- again done with the /proc/sys/net/decnet/default_device file. -- - Try starting a simple server and client, like the dnping/dnmirror -- over the loopback interface. With luck they should communicate. -- For this step and those after, you'll need the DECnet library -- which can be obtained from the above ftp sites as well as the -- actual utilities themselves. -- - If this seems to work, then try talking to a node on your local -- network, and see if you can obtain the same results. -- - At this point you are on your own... :-) -- --6. How to send a bug report --=========================== -- --If you've found a bug and want to report it, then there are several things --you can do to help me work out exactly what it is that is wrong. Useful --information (_most_ of which _is_ _essential_) includes: -- -- - What kernel version are you running ? -- - What version of the patch are you running ? -- - How far though the above set of tests can you get ? -- - What is in the /proc/decnet* files and /proc/sys/net/decnet/* files ? -- - Which services are you running ? -- - Which client caused the problem ? -- - How much data was being transferred ? -- - Was the network congested ? -- - How can the problem be reproduced ? -- - Can you use tcpdump to get a trace ? (N.B. Most (all?) versions of -- tcpdump don't understand how to dump DECnet properly, so including -- the hex listing of the packet contents is _essential_, usually the -x flag. -- You may also need to increase the length grabbed with the -s flag. The -- -e flag also provides very useful information (ethernet MAC addresses)) -- --7. MAC FAQ --========== -- --A quick FAQ on ethernet MAC addresses to explain how Linux and DECnet --interact and how to get the best performance from your hardware. -- --Ethernet cards are designed to normally only pass received network frames --to a host computer when they are addressed to it, or to the broadcast address. -- --Linux has an interface which allows the setting of extra addresses for --an ethernet card to listen to. If the ethernet card supports it, the --filtering operation will be done in hardware, if not the extra unwanted packets --received will be discarded by the host computer. In the latter case, --significant processor time and bus bandwidth can be used up on a busy --network (see the NAPI documentation for a longer explanation of these --effects). -- --DECnet makes use of this interface to allow running DECnet on an ethernet --card which has already been configured using TCP/IP (presumably using the --built in MAC address of the card, as usual) and/or to allow multiple DECnet --addresses on each physical interface. If you do this, be aware that if your --ethernet card doesn't support perfect hashing in its MAC address filter --then your computer will be doing more work than required. Some cards --will simply set themselves into promiscuous mode in order to receive --packets from the DECnet specified addresses. So if you have one of these --cards its better to set the MAC address of the card as described above --to gain the best efficiency. Better still is to use a card which supports --NAPI as well. -- -- --8. Mailing list --=============== -- --If you are keen to get involved in development, or want to ask questions --about configuration, or even just report bugs, then there is a mailing --list that you can join, details are at: -- --http://sourceforge.net/mail/?group_id=4993 -- --9. Legal Info --============= -- --The Linux DECnet project team have placed their code under the GPL. The --software is provided "as is" and without warranty express or implied. --DECnet is a trademark of Compaq. This software is not a product of --Compaq. We acknowledge the help of people at Compaq in providing extra --documentation above and beyond what was previously publicly available. -- --Steve Whitehouse -- -diff --git a/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst b/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst -index f1d5233e5e510..0a233b17c664e 100644 ---- a/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst -+++ b/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst -@@ -440,6 +440,22 @@ NOTE: For 82599-based network connections, if you are enabling jumbo frames in - a virtual function (VF), jumbo frames must first be enabled in the physical - function (PF). The VF MTU setting cannot be larger than the PF MTU. - -+NBASE-T Support -+--------------- -+The ixgbe driver supports NBASE-T on some devices. However, the advertisement -+of NBASE-T speeds is suppressed by default, to accommodate broken network -+switches which cannot cope with advertised NBASE-T speeds. Use the ethtool -+command to enable advertising NBASE-T speeds on devices which support it:: -+ -+ ethtool -s eth? advertise 0x1800000001028 -+ -+On Linux systems with INTERFACES(5), this can be specified as a pre-up command -+in /etc/network/interfaces so that the interface is always brought up with -+NBASE-T support, e.g.:: -+ -+ iface eth? inet dhcp -+ pre-up ethtool -s eth? advertise 0x1800000001028 || true -+ - Generic Receive Offload, aka GRO - -------------------------------- - The driver supports the in-kernel software implementation of GRO. GRO has -diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst -index 58bc8cd367c67..fdfc73d4c90a8 100644 ---- a/Documentation/networking/index.rst -+++ b/Documentation/networking/index.rst -@@ -46,7 +46,6 @@ Contents: - cdc_mbim - dccp - dctcp -- decnet - dns_resolver - driver - eql -diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst -index d91ab28718d49..7890b395e629b 100644 ---- a/Documentation/networking/ip-sysctl.rst -+++ b/Documentation/networking/ip-sysctl.rst -@@ -322,6 +322,8 @@ tcp_app_win - INTEGER - Reserve max(window/2^tcp_app_win, mss) of window for application - buffer. Value 0 is special, it means that nothing is reserved. - -+ Possible values are [0, 31], inclusive. -+ - Default: 31 - - tcp_autocorking - BOOLEAN -@@ -1063,7 +1065,7 @@ cipso_cache_enable - BOOLEAN - cipso_cache_bucket_size - INTEGER - The CIPSO label cache consists of a fixed size hash table with each - hash bucket containing a number of cache entries. This variable limits -- the number of entries in each hash bucket; the larger the value the -+ the number of entries in each hash bucket; the larger the value is, the - more CIPSO label mappings that can be cached. When the number of - entries in a given hash bucket reaches this limit adding new entries - causes the oldest entry in the bucket to be removed to make room. -@@ -1157,7 +1159,7 @@ ip_autobind_reuse - BOOLEAN - option should only be set by experts. - Default: 0 - --ip_dynaddr - BOOLEAN -+ip_dynaddr - INTEGER - If set non-zero, enables support for dynamic addresses. - If set to a non-zero value larger than 1, a kernel log - message will be printed when dynamic address rewriting -@@ -2808,7 +2810,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max - Default: 4K - - sctp_wmem - vector of 3 INTEGERs: min, default, max -- Currently this tunable has no effect. -+ Only the first value ("min") is used, "default" and "max" are -+ ignored. -+ -+ min: Minimum size of send buffer that can be used by SCTP sockets. -+ It is guaranteed to each SCTP socket (but not association) even -+ under moderate memory pressure. -+ -+ Default: 4K - - addr_scope_policy - INTEGER - Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00 -diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst -index 2afccc63856ee..1cfbf1add2fc9 100644 ---- a/Documentation/networking/ipvs-sysctl.rst -+++ b/Documentation/networking/ipvs-sysctl.rst -@@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER - - 0: disable any special handling on port reuse. The new - connection will be delivered to the same real server that was -- servicing the previous connection. This will effectively -- disable expire_nodest_conn. -+ servicing the previous connection. - - bit 1: enable rescheduling of new connections when it is safe. - That is, whenever expire_nodest_conn and for TCP sockets, when -diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst -index e899f14a4ba24..43da2cc2e3b9b 100644 ---- a/Documentation/process/code-of-conduct-interpretation.rst -+++ b/Documentation/process/code-of-conduct-interpretation.rst -@@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're - uncertain how to handle situations that come up. It will not be - considered a violation report unless you want it to be. If you are - uncertain about approaching the TAB or any other maintainers, please --reach out to our conflict mediator, Mishi Choudhary . -+reach out to our conflict mediator, Joanna Lee . - - In the end, "be kind to each other" is really what the end goal is for - everybody. We know everyone is human and we all fail at times, but the -diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst -index 8ced754a5a0f6..f3484f60eae59 100644 ---- a/Documentation/process/deprecated.rst -+++ b/Documentation/process/deprecated.rst -@@ -70,6 +70,9 @@ Instead, the 2-factor form of the allocator should be used:: - - foo = kmalloc_array(count, size, GFP_KERNEL); - -+Specifically, kmalloc() can be replaced with kmalloc_array(), and -+kzalloc() can be replaced with kcalloc(). -+ - If no 2-factor form is available, the saturate-on-overflow helpers should - be used:: - -@@ -90,9 +93,20 @@ Instead, use the helper:: - array usage and switch to a `flexible array member - <#zero-length-and-one-element-arrays>`_ instead. - --See array_size(), array3_size(), and struct_size(), --for more details as well as the related check_add_overflow() and --check_mul_overflow() family of functions. -+For other calculations, please compose the use of the size_mul(), -+size_add(), and size_sub() helpers. For example, in the case of:: -+ -+ foo = krealloc(current_size + chunk_size * (count - 3), GFP_KERNEL); -+ -+Instead, use the helpers:: -+ -+ foo = krealloc(size_add(current_size, -+ size_mul(chunk_size, -+ size_sub(count, 3))), GFP_KERNEL); -+ -+For more details, also see array3_size() and flex_array_size(), -+as well as the related check_mul_overflow(), check_add_overflow(), -+check_sub_overflow(), and check_shl_overflow() family of functions. - - simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull() - ---------------------------------------------------------------------- -diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst -index 003c865e9c212..fbcb48bc2a903 100644 ---- a/Documentation/process/stable-kernel-rules.rst -+++ b/Documentation/process/stable-kernel-rules.rst -@@ -168,7 +168,16 @@ Trees - - The finalized and tagged releases of all stable kernels can be found - in separate branches per version at: - -- https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git -+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git -+ -+ - The release candidate of all stable kernel versions can be found at: -+ -+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git/ -+ -+ .. warning:: -+ The -stable-rc tree is a snapshot in time of the stable-queue tree and -+ will change frequently, hence will be rebased often. It should only be -+ used for testing purposes (e.g. to be consumed by CI systems). - - - Review committee -diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst -index 8ad6b93f91e6d..025272139539c 100644 ---- a/Documentation/process/submitting-patches.rst -+++ b/Documentation/process/submitting-patches.rst -@@ -72,7 +72,7 @@ as you intend it to. - - The maintainer will thank you if you write your patch description in a - form which can be easily pulled into Linux's source code management --system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`. -+system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`. - - Solve only one problem per patch. If your description starts to get - long, that's a sign that you probably need to split up your patch. -diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst -index b7f98930d38d3..a2ec11da38b04 100644 ---- a/Documentation/riscv/vm-layout.rst -+++ b/Documentation/riscv/vm-layout.rst -@@ -48,7 +48,7 @@ RISC-V Linux Kernel SV39 - ____________________________________________________________|___________________________________________________________ - | | | | - ffffffc000000000 | -256 GB | ffffffc7ffffffff | 32 GB | kasan -- ffffffcefee00000 | -196 GB | ffffffcefeffffff | 2 MB | fixmap -+ ffffffcefea00000 | -196 GB | ffffffcefeffffff | 6 MB | fixmap - ffffffceff000000 | -196 GB | ffffffceffffffff | 16 MB | PCI io - ffffffcf00000000 | -196 GB | ffffffcfffffffff | 4 GB | vmemmap - ffffffd000000000 | -192 GB | ffffffdfffffffff | 64 GB | vmalloc/ioremap space -diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst -index 63ddea2b96408..7c06e7fb9a316 100644 ---- a/Documentation/scsi/scsi_mid_low_api.rst -+++ b/Documentation/scsi/scsi_mid_low_api.rst -@@ -1190,11 +1190,11 @@ Members of interest: - - pointer to scsi_device object that this command is - associated with. - resid -- - an LLD should set this signed integer to the requested -+ - an LLD should set this unsigned integer to the requested - transfer length (i.e. 'request_bufflen') less the number - of bytes that are actually transferred. 'resid' is - preset to 0 so an LLD can ignore it if it cannot detect -- underruns (overruns should be rare). If possible an LLD -+ underruns (overruns should not be reported). An LLD - should set 'resid' prior to invoking 'done'. The most - interesting case is data transfers from a SCSI target - device (e.g. READs) that underrun. -diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst -index 65f61695f5618..5d093fb4896b5 100644 ---- a/Documentation/sound/alsa-configuration.rst -+++ b/Documentation/sound/alsa-configuration.rst -@@ -2237,7 +2237,7 @@ implicit_fb - Apply the generic implicit feedback sync mode. When this is set - and the playback stream sync mode is ASYNC, the driver tries to - tie an adjacent ASYNC capture stream as the implicit feedback -- source. -+ source. This is equivalent with quirk_flags bit 17. - use_vmalloc - Use vmalloc() for allocations of the PCM buffers (default: yes). - For architectures with non-coherent memory like ARM or MIPS, the -@@ -2279,6 +2279,8 @@ quirk_flags - * bit 14: Ignore errors for mixer access - * bit 15: Support generic DSD raw U32_BE format - * bit 16: Set up the interface at first like UAC1 -+ * bit 17: Apply the generic implicit feedback sync mode -+ * bit 18: Don't apply implicit feedback sync mode - - This module supports multiple devices, autoprobe and hotplugging. - -diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst -index 0ea967d345838..1204304500147 100644 ---- a/Documentation/sound/hd-audio/models.rst -+++ b/Documentation/sound/hd-audio/models.rst -@@ -261,6 +261,10 @@ alc-sense-combo - huawei-mbx-stereo - Enable initialization verbs for Huawei MBX stereo speakers; - might be risky, try this at your own risk -+alc298-samsung-headphone -+ Samsung laptops with ALC298 -+alc256-samsung-headphone -+ Samsung laptops with ALC256 - - ALC66x/67x/892 - ============== -@@ -326,6 +330,8 @@ usi-headset - Headset support on USI machines - dual-codecs - Lenovo laptops with dual codecs -+alc285-hp-amp-init -+ HP laptops which require speaker amplifier initialization (ALC285) - - ALC680 - ====== -@@ -698,7 +704,7 @@ ref - no-jd - BIOS setup but without jack-detection - intel -- Intel DG45* mobos -+ Intel D*45* mobos - dell-m6-amic - Dell desktops/laptops with analog mics - dell-m6-dmic -diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py -index eeb394b39e2cc..8b416bfd75ac1 100644 ---- a/Documentation/sphinx/load_config.py -+++ b/Documentation/sphinx/load_config.py -@@ -3,7 +3,7 @@ - - import os - import sys --from sphinx.util.pycompat import execfile_ -+from sphinx.util.osutil import fs_encoding - - # ------------------------------------------------------------------------------ - def loadConfig(namespace): -@@ -48,7 +48,9 @@ def loadConfig(namespace): - sys.stdout.write("load additional sphinx-config: %s\n" % config_file) - config = namespace.copy() - config['__file__'] = config_file -- execfile_(config_file, config) -+ with open(config_file, 'rb') as f: -+ code = compile(f.read(), fs_encoding, 'exec') -+ exec(code, config) - del config['__file__'] - namespace.update(config) - else: -diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt -index 9a35f50798a65..2c573541ab712 100644 ---- a/Documentation/sphinx/requirements.txt -+++ b/Documentation/sphinx/requirements.txt -@@ -1,2 +1,4 @@ -+# jinja2>=3.1 is not compatible with Sphinx<4.0 -+jinja2<3.1 - sphinx_rtd_theme - Sphinx==2.4.4 -diff --git a/Documentation/trace/coresight/coresight-config.rst b/Documentation/trace/coresight/coresight-config.rst -index a4e3ef2952401..6ed13398ca2ce 100644 ---- a/Documentation/trace/coresight/coresight-config.rst -+++ b/Documentation/trace/coresight/coresight-config.rst -@@ -211,19 +211,13 @@ also declared in the perf 'cs_etm' event infrastructure so that they can - be selected when running trace under perf:: - - $ ls /sys/devices/cs_etm -- configurations format perf_event_mux_interval_ms sinks type -- events nr_addr_filters power -+ cpu0 cpu2 events nr_addr_filters power subsystem uevent -+ cpu1 cpu3 format perf_event_mux_interval_ms sinks type - --Key directories here are 'configurations' - which lists the loaded --configurations, and 'events' - a generic perf directory which allows --selection on the perf command line.:: -+The key directory here is 'events' - a generic perf directory which allows -+selection on the perf command line. As with the sinks entries, this provides -+a hash of the configuration name. - -- $ ls configurations/ -- autofdo -- $ cat configurations/autofdo -- 0xa7c3dddd -- --As with the sinks entries, this provides a hash of the configuration name. - The entry in the 'events' directory uses perfs built in syntax generator - to substitute the syntax for the name when evaluating the command:: - -diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst -index 8ddb9b09451c8..c47f381d0c002 100644 ---- a/Documentation/trace/events.rst -+++ b/Documentation/trace/events.rst -@@ -198,6 +198,15 @@ The glob (~) accepts a wild card character (\*,?) and character classes - prev_comm ~ "*sh*" - prev_comm ~ "ba*sh" - -+If the field is a pointer that points into user space (for example -+"filename" from sys_enter_openat), then you have to append ".ustring" to the -+field name:: -+ -+ filename.ustring ~ "password" -+ -+As the kernel will have to know how to retrieve the memory that the pointer -+is at from user space. -+ - 5.2 Setting filters - ------------------- - -@@ -230,6 +239,16 @@ Currently the caret ('^') for an error always appears at the beginning of - the filter string; the error message should still be useful though - even without more accurate position info. - -+5.2.1 Filter limitations -+------------------------ -+ -+If a filter is placed on a string pointer ``(char *)`` that does not point -+to a string on the ring buffer, but instead points to kernel or user space -+memory, then, for safety reasons, at most 1024 bytes of the content is -+copied onto a temporary buffer to do the compare. If the copy of the memory -+faults (the pointer points to memory that should not be accessed), then the -+string compare will be treated as not matching. -+ - 5.3 Clearing filters - -------------------- - -diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst -index 4e5b26f03d5b1..d036946bce7ab 100644 ---- a/Documentation/trace/ftrace.rst -+++ b/Documentation/trace/ftrace.rst -@@ -2929,7 +2929,7 @@ Produces:: - bash-1994 [000] .... 4342.324898: ima_get_action <-process_measurement - bash-1994 [000] .... 4342.324898: ima_match_policy <-ima_get_action - bash-1994 [000] .... 4342.324899: do_truncate <-do_last -- bash-1994 [000] .... 4342.324899: should_remove_suid <-do_truncate -+ bash-1994 [000] .... 4342.324899: setattr_should_drop_suidgid <-do_truncate - bash-1994 [000] .... 4342.324899: notify_change <-do_truncate - bash-1994 [000] .... 4342.324900: current_fs_time <-notify_change - bash-1994 [000] .... 4342.324900: current_kernel_time <-current_fs_time -diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst -index 533415644c54d..a78350a8fed43 100644 ---- a/Documentation/trace/histogram.rst -+++ b/Documentation/trace/histogram.rst -@@ -39,7 +39,7 @@ Documentation written by Tom Zanussi - will use the event's kernel stacktrace as the key. The keywords - 'keys' or 'key' can be used to specify keys, and the keywords - 'values', 'vals', or 'val' can be used to specify values. Compound -- keys consisting of up to two fields can be specified by the 'keys' -+ keys consisting of up to three fields can be specified by the 'keys' - keyword. Hashing a compound key produces a unique entry in the - table for each unique combination of component keys, and can be - useful for providing more fine-grained summaries of event data. -diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst -index b175d88f31ebb..15e4bfa2bd83c 100644 ---- a/Documentation/trace/kprobetrace.rst -+++ b/Documentation/trace/kprobetrace.rst -@@ -58,8 +58,8 @@ Synopsis of kprobe_events - NAME=FETCHARG : Set NAME as the argument name of FETCHARG. - FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types - (u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types -- (x8/x16/x32/x64), "string", "ustring" and bitfield -- are supported. -+ (x8/x16/x32/x64), "string", "ustring", "symbol", "symstr" -+ and bitfield are supported. - - (\*1) only for the probe on function entry (offs == 0). - (\*2) only for return probe. -@@ -96,6 +96,10 @@ offset, and container-size (usually 32). The syntax is:: - - Symbol type('symbol') is an alias of u32 or u64 type (depends on BITS_PER_LONG) - which shows given pointer in "symbol+offset" style. -+On the other hand, symbol-string type ('symstr') converts the given address to -+"symbol+offset/symbolsize" style and stores it as a null-terminated string. -+With 'symstr' type, you can filter the event with wildcard pattern of the -+symbols, and you don't need to solve symbol name by yourself. - For $comm, the default type is "string"; any other type is invalid. - - .. _user_mem_access: -diff --git a/Documentation/translations/it_IT/kernel-hacking/locking.rst b/Documentation/translations/it_IT/kernel-hacking/locking.rst -index 1efb8293bf1f0..9d6387e7b083b 100644 ---- a/Documentation/translations/it_IT/kernel-hacking/locking.rst -+++ b/Documentation/translations/it_IT/kernel-hacking/locking.rst -@@ -1396,7 +1396,7 @@ Riferimento per l'API dei Mutex - Riferimento per l'API dei Futex - =============================== - --.. kernel-doc:: kernel/futex.c -+.. kernel-doc:: kernel/futex/core.c - :internal: - - Approfondimenti -diff --git a/Documentation/tty/device_drivers/oxsemi-tornado.rst b/Documentation/tty/device_drivers/oxsemi-tornado.rst -new file mode 100644 -index 0000000000000..0180d8bb08818 ---- /dev/null -+++ b/Documentation/tty/device_drivers/oxsemi-tornado.rst -@@ -0,0 +1,129 @@ -+.. SPDX-License-Identifier: GPL-2.0 -+ -+==================================================================== -+Notes on Oxford Semiconductor PCIe (Tornado) 950 serial port devices -+==================================================================== -+ -+Oxford Semiconductor PCIe (Tornado) 950 serial port devices are driven -+by a fixed 62.5MHz clock input derived from the 100MHz PCI Express clock. -+ -+The baud rate produced by the baud generator is obtained from this input -+frequency by dividing it by the clock prescaler, which can be set to any -+value from 1 to 63.875 in increments of 0.125, and then the usual 16-bit -+divisor is used as with the original 8250, to divide the frequency by a -+value from 1 to 65535. Finally a programmable oversampling rate is used -+that can take any value from 4 to 16 to divide the frequency further and -+determine the actual baud rate used. Baud rates from 15625000bps down -+to 0.933bps can be obtained this way. -+ -+By default the oversampling rate is set to 16 and the clock prescaler is -+set to 33.875, meaning that the frequency to be used as the reference -+for the usual 16-bit divisor is 115313.653, which is close enough to the -+frequency of 115200 used by the original 8250 for the same values to be -+used for the divisor to obtain the requested baud rates by software that -+is unaware of the extra clock controls available. -+ -+The oversampling rate is programmed with the TCR register and the clock -+prescaler is programmed with the CPR/CPR2 register pair[1][2][3][4]. -+To switch away from the default value of 33.875 for the prescaler the -+the enhanced mode has to be explicitly enabled though, by setting bit 4 -+of the EFR. In that mode setting bit 7 in the MCR enables the prescaler -+or otherwise it is bypassed as if the value of 1 was used. Additionally -+writing any value to CPR clears CPR2 for compatibility with old software -+written for older conventional PCI Oxford Semiconductor devices that do -+not have the extra prescaler's 9th bit in CPR2, so the CPR/CPR2 register -+pair has to be programmed in the right order. -+ -+By using these parameters rates from 15625000bps down to 1bps can be -+obtained, with either exact or highly-accurate actual bit rates for -+standard and many non-standard rates. -+ -+Here are the figures for the standard and some non-standard baud rates -+(including those quoted in Oxford Semiconductor documentation), giving -+the requested rate (r), the actual rate yielded (a) and its deviation -+from the requested rate (d), and the values of the oversampling rate -+(tcr), the clock prescaler (cpr) and the divisor (div) produced by the -+new `get_divisor' handler: -+ -+r: 15625000, a: 15625000.00, d: 0.0000%, tcr: 4, cpr: 1.000, div: 1 -+r: 12500000, a: 12500000.00, d: 0.0000%, tcr: 5, cpr: 1.000, div: 1 -+r: 10416666, a: 10416666.67, d: 0.0000%, tcr: 6, cpr: 1.000, div: 1 -+r: 8928571, a: 8928571.43, d: 0.0000%, tcr: 7, cpr: 1.000, div: 1 -+r: 7812500, a: 7812500.00, d: 0.0000%, tcr: 8, cpr: 1.000, div: 1 -+r: 4000000, a: 4000000.00, d: 0.0000%, tcr: 5, cpr: 3.125, div: 1 -+r: 3686400, a: 3676470.59, d: -0.2694%, tcr: 8, cpr: 2.125, div: 1 -+r: 3500000, a: 3496503.50, d: -0.0999%, tcr: 13, cpr: 1.375, div: 1 -+r: 3000000, a: 2976190.48, d: -0.7937%, tcr: 14, cpr: 1.500, div: 1 -+r: 2500000, a: 2500000.00, d: 0.0000%, tcr: 10, cpr: 2.500, div: 1 -+r: 2000000, a: 2000000.00, d: 0.0000%, tcr: 10, cpr: 3.125, div: 1 -+r: 1843200, a: 1838235.29, d: -0.2694%, tcr: 16, cpr: 2.125, div: 1 -+r: 1500000, a: 1492537.31, d: -0.4975%, tcr: 5, cpr: 8.375, div: 1 -+r: 1152000, a: 1152073.73, d: 0.0064%, tcr: 14, cpr: 3.875, div: 1 -+r: 921600, a: 919117.65, d: -0.2694%, tcr: 16, cpr: 2.125, div: 2 -+r: 576000, a: 576036.87, d: 0.0064%, tcr: 14, cpr: 3.875, div: 2 -+r: 460800, a: 460829.49, d: 0.0064%, tcr: 7, cpr: 3.875, div: 5 -+r: 230400, a: 230414.75, d: 0.0064%, tcr: 14, cpr: 3.875, div: 5 -+r: 115200, a: 115207.37, d: 0.0064%, tcr: 14, cpr: 1.250, div: 31 -+r: 57600, a: 57603.69, d: 0.0064%, tcr: 8, cpr: 3.875, div: 35 -+r: 38400, a: 38402.46, d: 0.0064%, tcr: 14, cpr: 3.875, div: 30 -+r: 19200, a: 19201.23, d: 0.0064%, tcr: 8, cpr: 3.875, div: 105 -+r: 9600, a: 9600.06, d: 0.0006%, tcr: 9, cpr: 1.125, div: 643 -+r: 4800, a: 4799.98, d: -0.0004%, tcr: 7, cpr: 2.875, div: 647 -+r: 2400, a: 2400.02, d: 0.0008%, tcr: 9, cpr: 2.250, div: 1286 -+r: 1200, a: 1200.00, d: 0.0000%, tcr: 14, cpr: 2.875, div: 1294 -+r: 300, a: 300.00, d: 0.0000%, tcr: 11, cpr: 2.625, div: 7215 -+r: 200, a: 200.00, d: 0.0000%, tcr: 16, cpr: 1.250, div: 15625 -+r: 150, a: 150.00, d: 0.0000%, tcr: 13, cpr: 2.250, div: 14245 -+r: 134, a: 134.00, d: 0.0000%, tcr: 11, cpr: 2.625, div: 16153 -+r: 110, a: 110.00, d: 0.0000%, tcr: 12, cpr: 1.000, div: 47348 -+r: 75, a: 75.00, d: 0.0000%, tcr: 4, cpr: 5.875, div: 35461 -+r: 50, a: 50.00, d: 0.0000%, tcr: 16, cpr: 1.250, div: 62500 -+r: 25, a: 25.00, d: 0.0000%, tcr: 16, cpr: 2.500, div: 62500 -+r: 4, a: 4.00, d: 0.0000%, tcr: 16, cpr: 20.000, div: 48828 -+r: 2, a: 2.00, d: 0.0000%, tcr: 16, cpr: 40.000, div: 48828 -+r: 1, a: 1.00, d: 0.0000%, tcr: 16, cpr: 63.875, div: 61154 -+ -+With the baud base set to 15625000 and the unsigned 16-bit UART_DIV_MAX -+limitation imposed by `serial8250_get_baud_rate' standard baud rates -+below 300bps become unavailable in the regular way, e.g. the rate of -+200bps requires the baud base to be divided by 78125 and that is beyond -+the unsigned 16-bit range. The historic spd_cust feature can still be -+used by encoding the values for, the prescaler, the oversampling rate -+and the clock divisor (DLM/DLL) as follows to obtain such rates if so -+required: -+ -+ 31 29 28 20 19 16 15 0 -++-----+-----------------+-------+-------------------------------+ -+|0 0 0| CPR2:CPR | TCR | DLM:DLL | -++-----+-----------------+-------+-------------------------------+ -+ -+Use a value such encoded for the `custom_divisor' field along with the -+ASYNC_SPD_CUST flag set in the `flags' field in `struct serial_struct' -+passed with the TIOCSSERIAL ioctl(2), such as with the setserial(8) -+utility and its `divisor' and `spd_cust' parameters, and the select -+the baud rate of 38400bps. Note that the value of 0 in TCR sets the -+oversampling rate to 16 and prescaler values below 1 in CPR2/CPR are -+clamped by the driver to 1. -+ -+For example the value of 0x1f4004e2 will set CPR2/CPR, TCR and DLM/DLL -+respectively to 0x1f4, 0x0 and 0x04e2, choosing the prescaler value, -+the oversampling rate and the clock divisor of 62.500, 16 and 1250 -+respectively. These parameters will set the baud rate for the serial -+port to 62500000 / 62.500 / 1250 / 16 = 50bps. -+ -+References: -+ -+[1] "OXPCIe200 PCI Express Multi-Port Bridge", Oxford Semiconductor, -+ Inc., DS-0045, 10 Nov 2008, Section "950 Mode", pp. 64-65 -+ -+[2] "OXPCIe952 PCI Express Bridge to Dual Serial & Parallel Port", -+ Oxford Semiconductor, Inc., DS-0046, Mar 06 08, Section "950 Mode", -+ p. 20 -+ -+[3] "OXPCIe954 PCI Express Bridge to Quad Serial Port", Oxford -+ Semiconductor, Inc., DS-0047, Feb 08, Section "950 Mode", p. 20 -+ -+[4] "OXPCIe958 PCI Express Bridge to Octal Serial Port", Oxford -+ Semiconductor, Inc., DS-0048, Feb 08, Section "950 Mode", p. 20 -+ -+Maciej W. Rozycki -diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst -index 6655d929a3518..404ecb6d0f87f 100644 ---- a/Documentation/userspace-api/ioctl/ioctl-number.rst -+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst -@@ -304,7 +304,6 @@ Code Seq# Include File Comments - 0x89 00-06 arch/x86/include/asm/sockios.h - 0x89 0B-DF linux/sockios.h - 0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range --0x89 E0-EF linux/dn.h PROTOPRIVATE range - 0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range - 0x8B all linux/wireless.h - 0x8C 00-3F WiNRADiO driver -diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst -index f35552ff19ba8..b68e7a51009f8 100644 ---- a/Documentation/userspace-api/landlock.rst -+++ b/Documentation/userspace-api/landlock.rst -@@ -267,8 +267,8 @@ restrict such paths with dedicated ruleset flags. - Ruleset layers - -------------- - --There is a limit of 64 layers of stacked rulesets. This can be an issue for a --task willing to enforce a new ruleset in complement to its 64 inherited -+There is a limit of 16 layers of stacked rulesets. This can be an issue for a -+task willing to enforce a new ruleset in complement to its 16 inherited - rulesets. Once this limit is reached, sys_landlock_restrict_self() returns - E2BIG. It is then strongly suggested to carefully build rulesets once in the - life of a thread, especially for applications able to launch other applications -diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst -index 976d34445a246..f1421cf1a1b31 100644 ---- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst -+++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec.rst -@@ -3326,15 +3326,15 @@ enum v4l2_mpeg_video_hevc_size_of_length_field - - * - __u8 - - ``poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]`` - - PocStCurrBefore as described in section 8.3.2 "Decoding process for reference -- picture set. -+ picture set": provides the index of the short term before references in DPB array. - * - __u8 - - ``poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]`` - - PocStCurrAfter as described in section 8.3.2 "Decoding process for reference -- picture set. -+ picture set": provides the index of the short term after references in DPB array. - * - __u8 - - ``poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]`` - - PocLtCurr as described in section 8.3.2 "Decoding process for reference -- picture set. -+ picture set": provides the index of the long term references in DPB array. - * - __u64 - - ``flags`` - - See :ref:`Decode Parameters Flags ` -diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst -index a6729c8cf0634..ec38299f9428a 100644 ---- a/Documentation/virt/kvm/api.rst -+++ b/Documentation/virt/kvm/api.rst -@@ -4117,6 +4117,18 @@ not holding a previously reported uncorrected error). - :Parameters: struct kvm_s390_cmma_log (in, out) - :Returns: 0 on success, a negative value on error - -+Errors: -+ -+ ====== ============================================================= -+ ENOMEM not enough memory can be allocated to complete the task -+ ENXIO if CMMA is not enabled -+ EINVAL if KVM_S390_CMMA_PEEK is not set but migration mode was not enabled -+ EINVAL if KVM_S390_CMMA_PEEK is not set but dirty tracking has been -+ disabled (and thus migration mode was automatically disabled) -+ EFAULT if the userspace address is invalid or if no page table is -+ present for the addresses (e.g. when using hugepages). -+ ====== ============================================================= -+ - This ioctl is used to get the values of the CMMA bits on the s390 - architecture. It is meant to be used in two scenarios: - -@@ -4197,12 +4209,6 @@ mask is unused. - - values points to the userspace buffer where the result will be stored. - --This ioctl can fail with -ENOMEM if not enough memory can be allocated to --complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if --KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with ---EFAULT if the userspace address is invalid or if no page table is --present for the addresses (e.g. when using hugepages). -- - 4.108 KVM_S390_SET_CMMA_BITS - ---------------------------- - -@@ -7265,3 +7271,63 @@ The argument to KVM_ENABLE_CAP is also a bitmask, and must be a subset - of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace - the hypercalls whose corresponding bit is in the argument, and return - ENOSYS for the others. -+ -+9. Known KVM API problems -+========================= -+ -+In some cases, KVM's API has some inconsistencies or common pitfalls -+that userspace need to be aware of. This section details some of -+these issues. -+ -+Most of them are architecture specific, so the section is split by -+architecture. -+ -+9.1. x86 -+-------- -+ -+``KVM_GET_SUPPORTED_CPUID`` issues -+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -+ -+In general, ``KVM_GET_SUPPORTED_CPUID`` is designed so that it is possible -+to take its result and pass it directly to ``KVM_SET_CPUID2``. This section -+documents some cases in which that requires some care. -+ -+Local APIC features -+~~~~~~~~~~~~~~~~~~~ -+ -+CPU[EAX=1]:ECX[21] (X2APIC) is reported by ``KVM_GET_SUPPORTED_CPUID``, -+but it can only be enabled if ``KVM_CREATE_IRQCHIP`` or -+``KVM_ENABLE_CAP(KVM_CAP_IRQCHIP_SPLIT)`` are used to enable in-kernel emulation of -+the local APIC. -+ -+The same is true for the ``KVM_FEATURE_PV_UNHALT`` paravirtualized feature. -+ -+CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``. -+It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel -+has enabled in-kernel emulation of the local APIC. -+ -+CPU topology -+~~~~~~~~~~~~ -+ -+Several CPUID values include topology information for the host CPU: -+0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems. Different -+versions of KVM return different values for this information and userspace -+should not rely on it. Currently they return all zeroes. -+ -+If userspace wishes to set up a guest topology, it should be careful that -+the values of these three leaves differ for each CPU. In particular, -+the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX -+for 0x8000001e; the latter also encodes the core id and node id in bits -+7:0 of EBX and ECX respectively. -+ -+Obsolete ioctls and capabilities -+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -+ -+KVM_CAP_DISABLE_QUIRKS does not let userspace know which quirks are actually -+available. Use ``KVM_CHECK_EXTENSION(KVM_CAP_DISABLE_QUIRKS2)`` instead if -+available. -+ -+Ordering of KVM_GET_*/KVM_SET_* ioctls -+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -+ -+TBD -diff --git a/Documentation/virt/kvm/devices/vm.rst b/Documentation/virt/kvm/devices/vm.rst -index 0aa5b1cfd700c..147efec626e52 100644 ---- a/Documentation/virt/kvm/devices/vm.rst -+++ b/Documentation/virt/kvm/devices/vm.rst -@@ -215,6 +215,7 @@ KVM_S390_VM_TOD_EXT). - :Parameters: address of a buffer in user space to store the data (u8) to - :Returns: -EFAULT if the given address is not accessible from kernel space; - -EINVAL if setting the TOD clock extension to != 0 is not supported -+ -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) - - 3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW - ----------------------------------- -@@ -224,6 +225,7 @@ the POP (u64). - - :Parameters: address of a buffer in user space to store the data (u64) to - :Returns: -EFAULT if the given address is not accessible from kernel space -+ -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) - - 3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT - ----------------------------------- -@@ -237,6 +239,7 @@ it, it is stored as 0 and not allowed to be set to a value != 0. - (kvm_s390_vm_tod_clock) to - :Returns: -EFAULT if the given address is not accessible from kernel space; - -EINVAL if setting the TOD clock extension to != 0 is not supported -+ -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) - - 4. GROUP: KVM_S390_VM_CRYPTO - ============================ -@@ -299,6 +302,10 @@ Allows userspace to start migration mode, needed for PGSTE migration. - Setting this attribute when migration mode is already active will have - no effects. - -+Dirty tracking must be enabled on all memslots, else -EINVAL is returned. When -+dirty tracking is disabled on any memslot, migration mode is automatically -+stopped. -+ - :Parameters: none - :Returns: -ENOMEM if there is not enough free memory to start migration mode; - -EINVAL if the state of the VM is invalid (e.g. no memory defined); -diff --git a/MAINTAINERS b/MAINTAINERS -index 3b79fd441dde8..9216b9c85ce92 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -434,6 +434,7 @@ ACPI VIOT DRIVER - M: Jean-Philippe Brucker - L: linux-acpi@vger.kernel.org - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - S: Maintained - F: drivers/acpi/viot.c - F: include/linux/acpi_viot.h -@@ -941,6 +942,7 @@ AMD IOMMU (AMD-VI) - M: Joerg Roedel - R: Suravee Suthikulpanit - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - S: Maintained - T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git - F: drivers/iommu/amd/ -@@ -1248,7 +1250,7 @@ APEX EMBEDDED SYSTEMS STX104 IIO DRIVER - M: William Breathitt Gray - L: linux-iio@vger.kernel.org - S: Maintained --F: drivers/iio/adc/stx104.c -+F: drivers/iio/addac/stx104.c - - APM DRIVER - M: Jiri Kosina -@@ -3112,7 +3114,7 @@ F: drivers/net/ieee802154/atusb.h - AUDIT SUBSYSTEM - M: Paul Moore - M: Eric Paris --L: linux-audit@redhat.com (moderated for non-subscribers) -+L: audit@vger.kernel.org - S: Supported - W: https://github.com/linux-audit - T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git -@@ -3405,6 +3407,7 @@ F: net/sched/act_bpf.c - F: net/sched/cls_bpf.c - F: samples/bpf/ - F: scripts/bpf_doc.py -+F: scripts/pahole-version.sh - F: tools/bpf/ - F: tools/lib/bpf/ - F: tools/testing/selftests/bpf/ -@@ -4663,6 +4666,7 @@ T: git git://git.samba.org/sfrench/cifs-2.6.git - F: Documentation/admin-guide/cifs/ - F: fs/cifs/ - F: fs/smbfs_common/ -+F: include/uapi/linux/cifs - - COMPACTPCI HOTPLUG CORE - M: Scott Murray -@@ -4810,7 +4814,6 @@ F: Documentation/ABI/testing/sysfs-bus-counter - F: Documentation/driver-api/generic-counter.rst - F: drivers/counter/ - F: include/linux/counter.h --F: include/linux/counter_enum.h - - CP2615 I2C DRIVER - M: Bence Csókás -@@ -5203,13 +5206,6 @@ F: include/linux/tfrc.h - F: include/uapi/linux/dccp.h - F: net/dccp/ - --DECnet NETWORK LAYER --L: linux-decnet-user@lists.sourceforge.net --S: Orphan --W: http://linux-decnet.sourceforge.net --F: Documentation/networking/decnet.rst --F: net/decnet/ -- - DECSTATION PLATFORM SUPPORT - M: "Maciej W. Rozycki" - L: linux-mips@vger.kernel.org -@@ -5602,6 +5598,7 @@ M: Christoph Hellwig - M: Marek Szyprowski - R: Robin Murphy - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - S: Supported - W: http://git.infradead.org/users/hch/dma-mapping.git - T: git git://git.infradead.org/users/hch/dma-mapping.git -@@ -5614,6 +5611,7 @@ F: kernel/dma/ - DMA MAPPING BENCHMARK - M: Barry Song - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - F: kernel/dma/map_benchmark.c - F: tools/testing/selftests/dma/ - -@@ -7024,7 +7022,6 @@ F: drivers/net/mdio/fwnode_mdio.c - F: drivers/net/mdio/of_mdio.c - F: drivers/net/pcs/ - F: drivers/net/phy/ --F: drivers/of/of_net.c - F: include/dt-bindings/net/qca-ar803x.h - F: include/linux/*mdio*.h - F: include/linux/mdio/*.h -@@ -7036,6 +7033,7 @@ F: include/linux/platform_data/mdio-gpio.h - F: include/trace/events/mdio.h - F: include/uapi/linux/mdio.h - F: include/uapi/linux/mii.h -+F: net/core/of_net.c - - EXFAT FILE SYSTEM - M: Namjae Jeon -@@ -7115,6 +7113,7 @@ F: drivers/gpu/drm/exynos/exynos_dp* - EXYNOS SYSMMU (IOMMU) driver - M: Marek Szyprowski - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - S: Maintained - F: drivers/iommu/exynos-iommu.c - -@@ -7239,9 +7238,6 @@ F: include/linux/fs.h - F: include/linux/fs_types.h - F: include/uapi/linux/fs.h - F: include/uapi/linux/openat2.h --X: fs/io-wq.c --X: fs/io-wq.h --X: fs/io_uring.c - - FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER - M: Riku Voipio -@@ -7744,7 +7740,7 @@ F: Documentation/locking/*futex* - F: include/asm-generic/futex.h - F: include/linux/futex.h - F: include/uapi/linux/futex.h --F: kernel/futex.c -+F: kernel/futex/* - F: tools/perf/bench/futex* - F: tools/testing/selftests/futex/ - -@@ -7947,9 +7943,10 @@ F: drivers/media/usb/go7007/ - - GOODIX TOUCHSCREEN - M: Bastien Nocera -+M: Hans de Goede - L: linux-input@vger.kernel.org - S: Maintained --F: drivers/input/touchscreen/goodix.c -+F: drivers/input/touchscreen/goodix* - - GOOGLE ETHERNET DRIVERS - M: Jeroen de Borst -@@ -8565,7 +8562,7 @@ F: drivers/net/wireless/intersil/hostap/ - HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER - L: platform-driver-x86@vger.kernel.org - S: Orphan --F: drivers/platform/x86/tc1100-wmi.c -+F: drivers/platform/x86/hp/tc1100-wmi.c - - HPET: High Precision Event Timers driver - M: Clemens Ladisch -@@ -9457,6 +9454,7 @@ INTEL IOMMU (VT-d) - M: David Woodhouse - M: Lu Baolu - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - S: Supported - T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git - F: drivers/iommu/intel/ -@@ -9793,6 +9791,7 @@ IOMMU DRIVERS - M: Joerg Roedel - M: Will Deacon - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - S: Maintained - T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git - F: Documentation/devicetree/bindings/iommu/ -@@ -9810,9 +9809,7 @@ L: io-uring@vger.kernel.org - S: Maintained - T: git git://git.kernel.dk/linux-block - T: git git://git.kernel.dk/liburing --F: fs/io-wq.c --F: fs/io-wq.h --F: fs/io_uring.c -+F: io_uring/ - F: include/linux/io_uring.h - F: include/uapi/linux/io_uring.h - F: tools/io_uring/ -@@ -10835,7 +10832,7 @@ M: Eric Piel - S: Maintained - F: Documentation/misc-devices/lis3lv02d.rst - F: drivers/misc/lis3lv02d/ --F: drivers/platform/x86/hp_accel.c -+F: drivers/platform/x86/hp/hp_accel.c - - LIST KUNIT TEST - M: David Gow -@@ -11795,6 +11792,7 @@ F: drivers/i2c/busses/i2c-mt65xx.c - MEDIATEK IOMMU DRIVER - M: Yong Wu - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) - S: Supported - F: Documentation/devicetree/bindings/iommu/mediatek* -@@ -15554,6 +15552,7 @@ F: drivers/i2c/busses/i2c-qcom-cci.c - QUALCOMM IOMMU - M: Rob Clark - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - L: linux-arm-msm@vger.kernel.org - S: Maintained - F: drivers/iommu/arm/arm-smmu/qcom_iommu.c -@@ -15720,6 +15719,8 @@ F: arch/mips/generic/board-ranchu.c - - RANDOM NUMBER DRIVER - M: "Theodore Ts'o" -+M: Jason A. Donenfeld -+T: git https://git.kernel.org/pub/scm/linux/kernel/git/crng/random.git - S: Maintained - F: drivers/char/random.c - -@@ -17980,6 +17981,7 @@ F: arch/x86/boot/video* - SWIOTLB SUBSYSTEM - M: Christoph Hellwig - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - S: Supported - W: http://git.infradead.org/users/hch/dma-mapping.git - T: git git://git.infradead.org/users/hch/dma-mapping.git -@@ -20560,12 +20562,14 @@ M: Juergen Gross - M: Stefano Stabellini - L: xen-devel@lists.xenproject.org (moderated for non-subscribers) - L: iommu@lists.linux-foundation.org -+L: iommu@lists.linux.dev - S: Supported - F: arch/x86/xen/*swiotlb* - F: drivers/xen/*swiotlb* - - XFS FILESYSTEM - C: irc://irc.oftc.net/xfs -+M: Leah Rumancik - M: Darrick J. Wong - M: linux-xfs@vger.kernel.org - L: linux-xfs@vger.kernel.org -diff --git a/Makefile b/Makefile -index ed6e7ec60eff6..b2ff07a0176be 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0 - VERSION = 5 - PATCHLEVEL = 15 --SUBLEVEL = 0 -+SUBLEVEL = 132 - EXTRAVERSION = - NAME = Trick or Treat - -@@ -93,10 +93,17 @@ endif - - # If the user is running make -s (silent mode), suppress echoing of - # commands -+# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS. - --ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),) -- quiet=silent_ -- KBUILD_VERBOSE = 0 -+ifeq ($(filter 3.%,$(MAKE_VERSION)),) -+silence:=$(findstring s,$(firstword -$(MAKEFLAGS))) -+else -+silence:=$(findstring s,$(filter-out --%,$(MAKEFLAGS))) -+endif -+ -+ifeq ($(silence),s) -+quiet=silent_ -+KBUILD_VERBOSE = 0 - endif - - export quiet Q KBUILD_VERBOSE -@@ -430,6 +437,7 @@ else - HOSTCC = gcc - HOSTCXX = g++ - endif -+HOSTPKG_CONFIG = pkg-config - - export KBUILD_USERCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \ - -O2 -fomit-frame-pointer -std=gnu89 -@@ -480,6 +488,8 @@ LZ4 = lz4c - XZ = xz - ZSTD = zstd - -+PAHOLE_FLAGS = $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh) -+ - CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ - -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) - NOSTDINC_FLAGS := -@@ -523,7 +533,7 @@ KBUILD_LDFLAGS_MODULE := - KBUILD_LDFLAGS := - CLANG_FLAGS := - --export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC -+export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC HOSTPKG_CONFIG - export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL - export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX - export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD -@@ -534,6 +544,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE - export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE - export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE - export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL -+export PAHOLE_FLAGS - - # Files to ignore in find ... statements - -@@ -687,12 +698,19 @@ endif - - ifdef CONFIG_CC_IS_GCC - RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) -+RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix) - RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register) - endif - ifdef CONFIG_CC_IS_CLANG - RETPOLINE_CFLAGS := -mretpoline-external-thunk - RETPOLINE_VDSO_CFLAGS := -mretpoline - endif -+ -+ifdef CONFIG_RETHUNK -+RETHUNK_CFLAGS := -mfunction-return=thunk-extern -+RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS) -+endif -+ - export RETPOLINE_CFLAGS - export RETPOLINE_VDSO_CFLAGS - -@@ -811,6 +829,9 @@ endif - KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) - KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) - -+# These result in bogus false positives -+KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer) -+ - ifdef CONFIG_FRAME_POINTER - KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls - else -@@ -831,12 +852,12 @@ endif - - # Initialize all stack variables with a zero value. - ifdef CONFIG_INIT_STACK_ALL_ZERO --# Future support for zero initialization is still being debated, see --# https://bugs.llvm.org/show_bug.cgi?id=45497. These flags are subject to being --# renamed or dropped. - KBUILD_CFLAGS += -ftrivial-auto-var-init=zero -+ifdef CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_ENABLER -+# https://github.com/llvm/llvm-project/issues/44842 - KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang - endif -+endif - - # While VLAs have been removed, GCC produces unreachable stack probes - # for the randomize_kstack_offset feature. Disable it for all compilers. -@@ -857,7 +878,9 @@ else - DEBUG_CFLAGS += -g - endif - --ifndef CONFIG_AS_IS_LLVM -+ifdef CONFIG_AS_IS_LLVM -+KBUILD_AFLAGS += -g -+else - KBUILD_AFLAGS += -Wa,-gdwarf-2 - endif - -@@ -865,6 +888,7 @@ ifndef CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT - dwarf-version-$(CONFIG_DEBUG_INFO_DWARF4) := 4 - dwarf-version-$(CONFIG_DEBUG_INFO_DWARF5) := 5 - DEBUG_CFLAGS += -gdwarf-$(dwarf-version-y) -+KBUILD_AFLAGS += -gdwarf-$(dwarf-version-y) - endif - - ifdef CONFIG_DEBUG_INFO_REDUCED -@@ -1008,6 +1032,21 @@ ifdef CONFIG_CC_IS_GCC - KBUILD_CFLAGS += -Wno-maybe-uninitialized - endif - -+ifdef CONFIG_CC_IS_GCC -+# The allocators already balk at large sizes, so silence the compiler -+# warnings for bounds checks involving those possible values. While -+# -Wno-alloc-size-larger-than would normally be used here, earlier versions -+# of gcc (<9.1) weirdly don't handle the option correctly when _other_ -+# warnings are produced (?!). Using -Walloc-size-larger-than=SIZE_MAX -+# doesn't work (as it is documented to), silently resolving to "0" prior to -+# version 9.1 (and producing an error more recently). Numeric values larger -+# than PTRDIFF_MAX also don't work prior to version 9.1, which are silently -+# ignored, continuing to default to PTRDIFF_MAX. So, left with no other -+# choice, we must perform a versioned check to disable this warning. -+# https://lore.kernel.org/lkml/20210824115859.187f272f@canb.auug.org.au -+KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0901, -Wno-alloc-size-larger-than) -+endif -+ - # disable invalid "can't wrap" optimizations for signed / pointers - KBUILD_CFLAGS += -fno-strict-overflow - -@@ -1053,6 +1092,11 @@ KBUILD_CFLAGS += $(KCFLAGS) - KBUILD_LDFLAGS_MODULE += --build-id=sha1 - LDFLAGS_vmlinux += --build-id=sha1 - -+KBUILD_LDFLAGS += -z noexecstack -+ifeq ($(CONFIG_LD_IS_BFD),y) -+KBUILD_LDFLAGS += $(call ld-option,--no-warn-rwx-segments) -+endif -+ - ifeq ($(CONFIG_STRIP_ASM_SYMS),y) - LDFLAGS_vmlinux += $(call ld-option, -X,) - endif -@@ -1115,7 +1159,9 @@ export MODORDER := $(extmod_prefix)modules.order - export MODULES_NSDEPS := $(extmod_prefix)modules.nsdeps - - ifeq ($(KBUILD_EXTMOD),) --core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ -+core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ -+core-$(CONFIG_BLOCK) += block/ -+core-$(CONFIG_IO_URING) += io_uring/ - - vmlinux-dirs := $(patsubst %/,%,$(filter %/, \ - $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ -@@ -1125,13 +1171,11 @@ vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \ - $(patsubst %/,%,$(filter %/, $(core-) \ - $(drivers-) $(libs-)))) - --subdir-modorder := $(addsuffix modules.order,$(filter %/, \ -- $(core-y) $(core-m) $(libs-y) $(libs-m) \ -- $(drivers-y) $(drivers-m))) -- - build-dirs := $(vmlinux-dirs) - clean-dirs := $(vmlinux-alldirs) - -+subdir-modorder := $(addsuffix /modules.order, $(build-dirs)) -+ - # Externally visible symbols (used by link-vmlinux.sh) - KBUILD_VMLINUX_OBJS := $(head-y) $(patsubst %/,%/built-in.a, $(core-y)) - KBUILD_VMLINUX_OBJS += $(addsuffix built-in.a, $(filter %/, $(libs-y))) -@@ -1160,7 +1204,7 @@ KBUILD_MODULES := 1 - - autoksyms_recursive: descend modules.order - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ -- "$(MAKE) -f $(srctree)/Makefile vmlinux" -+ "$(MAKE) -f $(srctree)/Makefile autoksyms_recursive" - endif - - autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h) -@@ -1301,8 +1345,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj - - PHONY += headers - headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts -- $(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \ -- $(error Headers not exportable for the $(SRCARCH) architecture)) -+ $(if $(filter um, $(SRCARCH)), $(error Headers not exportable for UML)) - $(Q)$(MAKE) $(hdr-inst)=include/uapi - $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi - -@@ -1792,7 +1835,9 @@ quiet_cmd_depmod = DEPMOD $(MODLIB) - - modules_install: - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst -+ifndef modules_sign_only - $(call cmd,depmod) -+endif - - else # CONFIG_MODULES - -@@ -1807,6 +1852,8 @@ modules modules_install: - @echo >&2 '***' - @exit 1 - -+KBUILD_MODULES := -+ - endif # CONFIG_MODULES - - # Single targets -@@ -1832,18 +1879,12 @@ $(single-ko): single_modpost - $(single-no-ko): descend - @: - --ifeq ($(KBUILD_EXTMOD),) --# For the single build of in-tree modules, use a temporary file to avoid --# the situation of modules_install installing an invalid modules.order. --MODORDER := .modules.tmp --endif -- -+# Remove MODORDER when done because it is not the real one. - PHONY += single_modpost - single_modpost: $(single-no-ko) modules_prepare - $(Q){ $(foreach m, $(single-ko), echo $(extmod_prefix)$m;) } > $(MODORDER) - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost -- --KBUILD_MODULES := 1 -+ $(Q)rm -f $(MODORDER) - - export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod_prefix), $(single-no-ko)) - -@@ -1851,10 +1892,8 @@ export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod_prefix), $(single-no-ko)) - build-dirs := $(foreach d, $(build-dirs), \ - $(if $(filter $(d)/%, $(KBUILD_SINGLE_TARGETS)), $(d))) - --endif -+KBUILD_MODULES := 1 - --ifndef CONFIG_MODULES --KBUILD_MODULES := - endif - - # Handle descending into subdirectories listed in $(build-dirs) -diff --git a/arch/Kconfig b/arch/Kconfig -index 8df1c71026435..b45c699c2bac3 100644 ---- a/arch/Kconfig -+++ b/arch/Kconfig -@@ -200,6 +200,9 @@ config HAVE_NMI - config TRACE_IRQFLAGS_SUPPORT - bool - -+config TRACE_IRQFLAGS_NMI_SUPPORT -+ bool -+ - # - # An arch should select this if it provides all these things: - # -@@ -261,6 +264,9 @@ config ARCH_HAS_DMA_SET_UNCACHED - config ARCH_HAS_DMA_CLEAR_UNCACHED - bool - -+config ARCH_HAS_CPU_FINALIZE_INIT -+ bool -+ - # Select if arch init_task must go in the __init_task_data section - config ARCH_TASK_STRUCT_ON_STACK - bool -@@ -1141,6 +1147,7 @@ config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET - config RANDOMIZE_KSTACK_OFFSET_DEFAULT - bool "Randomize kernel stack offset on syscall entry" - depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET -+ depends on INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION >= 140000 - help - The kernel stack offset can be randomized (after pt_regs) by - roughly 5 bits of entropy, frustrating memory corruption -@@ -1234,6 +1241,9 @@ config RELR - config ARCH_HAS_MEM_ENCRYPT - bool - -+config ARCH_HAS_CC_PLATFORM -+ bool -+ - config HAVE_SPARSE_SYSCALL_NR - bool - help -diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c -index 08b430d25a315..7cf92d172dce9 100644 ---- a/arch/alpha/boot/tools/objstrip.c -+++ b/arch/alpha/boot/tools/objstrip.c -@@ -148,7 +148,7 @@ main (int argc, char *argv[]) - #ifdef __ELF__ - elf = (struct elfhdr *) buf; - -- if (elf->e_ident[0] == 0x7f && str_has_prefix((char *)elf->e_ident + 1, "ELF")) { -+ if (memcmp(&elf->e_ident[EI_MAG0], ELFMAG, SELFMAG) == 0) { - if (elf->e_type != ET_EXEC) { - fprintf(stderr, "%s: %s is not an ELF executable\n", - prog_name, inname); -diff --git a/arch/alpha/include/asm/bugs.h b/arch/alpha/include/asm/bugs.h -deleted file mode 100644 -index 78030d1c7e7e0..0000000000000 ---- a/arch/alpha/include/asm/bugs.h -+++ /dev/null -@@ -1,20 +0,0 @@ --/* -- * include/asm-alpha/bugs.h -- * -- * Copyright (C) 1994 Linus Torvalds -- */ -- --/* -- * This is included by init/main.c to check for architecture-dependent bugs. -- * -- * Needs: -- * void check_bugs(void); -- */ -- --/* -- * I don't know of any alpha bugs yet.. Nice chip -- */ -- --static void check_bugs(void) --{ --} -diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h -index 18f48a6f2ff6d..8f3f5eecba28b 100644 ---- a/arch/alpha/include/asm/page.h -+++ b/arch/alpha/include/asm/page.h -@@ -18,7 +18,7 @@ extern void clear_page(void *page); - #define clear_user_page(page, vaddr, pg) clear_page(page) - - #define alloc_zeroed_user_highpage_movable(vma, vaddr) \ -- alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vmaddr) -+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr) - #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE - - extern void copy_page(void * _to, void * _from); -diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h -index 2592356e32154..0ce1eee0924b1 100644 ---- a/arch/alpha/include/asm/thread_info.h -+++ b/arch/alpha/include/asm/thread_info.h -@@ -77,7 +77,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); - - /* Work to do on interrupt/exception return. */ - #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ -- _TIF_NOTIFY_RESUME) -+ _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL) - - /* Work to do on any return to userspace. */ - #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ -diff --git a/arch/alpha/include/asm/timex.h b/arch/alpha/include/asm/timex.h -index b565cc6f408e9..f89798da8a147 100644 ---- a/arch/alpha/include/asm/timex.h -+++ b/arch/alpha/include/asm/timex.h -@@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void) - __asm__ __volatile__ ("rpcc %0" : "=r"(ret)); - return ret; - } -+#define get_cycles get_cycles - - #endif -diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S -index e227f3a29a43c..c41a5a9c3b9f2 100644 ---- a/arch/alpha/kernel/entry.S -+++ b/arch/alpha/kernel/entry.S -@@ -469,8 +469,10 @@ entSys: - #ifdef CONFIG_AUDITSYSCALL - lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT - and $3, $6, $3 --#endif - bne $3, strace -+#else -+ blbs $3, strace /* check for SYSCALL_TRACE in disguise */ -+#endif - beq $4, 1f - ldq $27, 0($5) - 1: jsr $26, ($27), sys_ni_syscall -diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c -index f6d2946edbd24..15f2effd6baf8 100644 ---- a/arch/alpha/kernel/irq.c -+++ b/arch/alpha/kernel/irq.c -@@ -60,7 +60,7 @@ int irq_select_affinity(unsigned int irq) - cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); - last_cpu = cpu; - -- cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu)); -+ irq_data_update_affinity(data, cpumask_of(cpu)); - chip->irq_set_affinity(data, cpumask_of(cpu), false); - return 0; - } -diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c -index 5b60c248de9ea..cbefa5a773846 100644 ---- a/arch/alpha/kernel/module.c -+++ b/arch/alpha/kernel/module.c -@@ -146,10 +146,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, - base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; - symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; - -- /* The small sections were sorted to the end of the segment. -- The following should definitely cover them. */ -- gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000; - got = sechdrs[me->arch.gotsecindex].sh_addr; -+ gp = got + 0x8000; - - for (i = 0; i < n; i++) { - unsigned long r_sym = ELF64_R_SYM (rela[i].r_info); -diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c -index ce3077946e1d9..fb3025396ac96 100644 ---- a/arch/alpha/kernel/rtc.c -+++ b/arch/alpha/kernel/rtc.c -@@ -80,7 +80,12 @@ init_rtc_epoch(void) - static int - alpha_rtc_read_time(struct device *dev, struct rtc_time *tm) - { -- mc146818_get_time(tm); -+ int ret = mc146818_get_time(tm); -+ -+ if (ret < 0) { -+ dev_err_ratelimited(dev, "unable to read current time\n"); -+ return ret; -+ } - - /* Adjust for non-default epochs. It's easier to depend on the - generic __get_rtc_time and adjust the epoch here than create -diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c -index b4fbbba30aa2b..8c4c14a171e23 100644 ---- a/arch/alpha/kernel/setup.c -+++ b/arch/alpha/kernel/setup.c -@@ -385,8 +385,7 @@ setup_memory(void *kernel_end) - #endif /* CONFIG_BLK_DEV_INITRD */ - } - --int __init --page_is_ram(unsigned long pfn) -+int page_is_ram(unsigned long pfn) - { - struct memclust_struct * cluster; - struct memdesc_struct * memdesc; -diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c -index 90635ef5dafac..6dc952b0df4a9 100644 ---- a/arch/alpha/kernel/srmcons.c -+++ b/arch/alpha/kernel/srmcons.c -@@ -59,7 +59,7 @@ srmcons_do_receive_chars(struct tty_port *port) - } while((result.bits.status & 1) && (++loops < 10)); - - if (count) -- tty_schedule_flip(port); -+ tty_flip_buffer_push(port); - - return count; - } -diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c -index e805106409f76..afaf4f6ad0f49 100644 ---- a/arch/alpha/kernel/traps.c -+++ b/arch/alpha/kernel/traps.c -@@ -192,7 +192,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) - local_irq_enable(); - while (1); - } -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - #ifndef CONFIG_MATHEMU -@@ -235,7 +235,21 @@ do_entIF(unsigned long type, struct pt_regs *regs) - { - int signo, code; - -- if ((regs->ps & ~IPL_MAX) == 0) { -+ if (type == 3) { /* FEN fault */ -+ /* Irritating users can call PAL_clrfen to disable the -+ FPU for the process. The kernel will then trap in -+ do_switch_stack and undo_switch_stack when we try -+ to save and restore the FP registers. -+ -+ Given that GCC by default generates code that uses the -+ FP registers, PAL_clrfen is not useful except for DoS -+ attacks. So turn the bleeding FPU back on and be done -+ with it. */ -+ current_thread_info()->pcb.flags |= 1; -+ __reload_thread(¤t_thread_info()->pcb); -+ return; -+ } -+ if (!user_mode(regs)) { - if (type == 1) { - const unsigned int *data - = (const unsigned int *) regs->pc; -@@ -368,20 +382,6 @@ do_entIF(unsigned long type, struct pt_regs *regs) - } - break; - -- case 3: /* FEN fault */ -- /* Irritating users can call PAL_clrfen to disable the -- FPU for the process. The kernel will then trap in -- do_switch_stack and undo_switch_stack when we try -- to save and restore the FP registers. -- -- Given that GCC by default generates code that uses the -- FP registers, PAL_clrfen is not useful except for DoS -- attacks. So turn the bleeding FPU back on and be done -- with it. */ -- current_thread_info()->pcb.flags |= 1; -- __reload_thread(¤t_thread_info()->pcb); -- return; -- - case 5: /* illoc */ - default: /* unexpected instruction-fault type */ - ; -@@ -577,7 +577,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, - - printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", - pc, va, opcode, reg); -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - - got_exception: - /* Ok, we caught the exception, but we don't want it. Is there -@@ -632,7 +632,7 @@ got_exception: - local_irq_enable(); - while (1); - } -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - /* -diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c -index eee5102c3d889..e9193d52222ea 100644 ---- a/arch/alpha/mm/fault.c -+++ b/arch/alpha/mm/fault.c -@@ -204,7 +204,7 @@ retry: - printk(KERN_ALERT "Unable to handle kernel paging request at " - "virtual address %016lx\n", address); - die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - - /* We ran out of memory, or some other thing happened to us that - made us unable to handle the page fault gracefully. */ -diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h -index 088d348781c1c..0b7c902c72ba8 100644 ---- a/arch/arc/include/asm/atomic-llsc.h -+++ b/arch/arc/include/asm/atomic-llsc.h -@@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \ - : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ - : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ - [i] "ir" (i) \ -- : "cc"); \ -+ : "cc", "memory"); \ - } \ - - #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -@@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ - : [val] "=&r" (val) \ - : [ctr] "r" (&v->counter), \ - [i] "ir" (i) \ -- : "cc"); \ -+ : "cc", "memory"); \ - \ - return val; \ - } -@@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ - [orig] "=&r" (orig) \ - : [ctr] "r" (&v->counter), \ - [i] "ir" (i) \ -- : "cc"); \ -+ : "cc", "memory"); \ - \ - return orig; \ - } -diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h -index c5a8010fdc97d..9089f34baac3b 100644 ---- a/arch/arc/include/asm/atomic64-arcv2.h -+++ b/arch/arc/include/asm/atomic64-arcv2.h -@@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \ - " bnz 1b \n" \ - : "=&r"(val) \ - : "r"(&v->counter), "ir"(a) \ -- : "cc"); \ -+ : "cc", "memory"); \ - } \ - - #define ATOMIC64_OP_RETURN(op, op1, op2) \ -@@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \ - " bnz 1b \n" \ - : [val] "=&r"(val) \ - : "r"(&v->counter), "ir"(a) \ -- : "cc"); /* memory clobber comes from smp_mb() */ \ -+ : "cc", "memory"); \ - \ - return val; \ - } -@@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \ - " bnz 1b \n" \ - : "=&r"(orig), "=&r"(val) \ - : "r"(&v->counter), "ir"(a) \ -- : "cc"); /* memory clobber comes from smp_mb() */ \ -+ : "cc", "memory"); \ - \ - return orig; \ - } -diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h -index 8f777d6441a5d..80347382a3800 100644 ---- a/arch/arc/include/asm/io.h -+++ b/arch/arc/include/asm/io.h -@@ -32,7 +32,7 @@ static inline void ioport_unmap(void __iomem *addr) - { - } - --extern void iounmap(const void __iomem *addr); -+extern void iounmap(const volatile void __iomem *addr); - - /* - * io{read,write}{16,32}be() macros -diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h -index c9434ff3aa4ce..8a3fb71e9cfad 100644 ---- a/arch/arc/include/asm/linkage.h -+++ b/arch/arc/include/asm/linkage.h -@@ -8,6 +8,10 @@ - - #include - -+#define ASM_NL ` /* use '`' to mark new line in macro */ -+#define __ALIGN .align 4 -+#define __ALIGN_STR __stringify(__ALIGN) -+ - #ifdef __ASSEMBLY__ - - .macro ST2 e, o, off -@@ -28,10 +32,6 @@ - #endif - .endm - --#define ASM_NL ` /* use '`' to mark new line in macro */ --#define __ALIGN .align 4 --#define __ALIGN_STR __stringify(__ALIGN) -- - /* annotation for data we want in DCCM - if enabled in .config */ - .macro ARCFP_DATA nm - #ifdef CONFIG_ARC_HAS_DCCM -diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h -index 8084ef2f64910..4e7a19cb8e528 100644 ---- a/arch/arc/include/asm/pgtable-levels.h -+++ b/arch/arc/include/asm/pgtable-levels.h -@@ -163,7 +163,7 @@ - #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) - #define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd)) - #define set_pmd(pmdp, pmd) (*(pmdp) = pmd) --#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) -+#define pmd_pgtable(pmd) ((pgtable_t) pmd_page(pmd)) - - /* - * 4th level paging: pte -diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S -index dd77a0c8f740b..66ba549b520fc 100644 ---- a/arch/arc/kernel/entry.S -+++ b/arch/arc/kernel/entry.S -@@ -196,6 +196,7 @@ tracesys_exit: - st r0, [sp, PT_r0] ; sys call return value in pt_regs - - ;POST Sys Call Ptrace Hook -+ mov r0, sp ; pt_regs needed - bl @syscall_trace_exit - b ret_from_exception ; NOT ret_from_system_call at is saves r0 which - ; we'd done before calling post hook above -diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c -index 3793876f42d9b..5f7f5aab361f1 100644 ---- a/arch/arc/kernel/process.c -+++ b/arch/arc/kernel/process.c -@@ -43,7 +43,7 @@ SYSCALL_DEFINE0(arc_gettls) - return task_thread_info(current)->thr_ptr; - } - --SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) -+SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new) - { - struct pt_regs *regs = current_pt_regs(); - u32 uval; -@@ -294,7 +294,7 @@ int elf_check_arch(const struct elf32_hdr *x) - eflags = x->e_flags; - if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) { - pr_err("ABI mismatch - you need newer toolchain\n"); -- force_sigsegv(SIGSEGV); -+ force_fatal_sig(SIGSEGV); - return 0; - } - -diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c -index 0ee75aca6e109..712c2311daefb 100644 ---- a/arch/arc/mm/ioremap.c -+++ b/arch/arc/mm/ioremap.c -@@ -94,7 +94,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, - EXPORT_SYMBOL(ioremap_prot); - - --void iounmap(const void __iomem *addr) -+void iounmap(const volatile void __iomem *addr) - { - /* weird double cast to handle phys_addr_t > 32 bits */ - if (arc_uncached_addr_space((phys_addr_t)(u32)addr)) -diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index dcf2df6da98f0..f2fbb170d813c 100644 ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -4,6 +4,7 @@ config ARM - default y - select ARCH_32BIT_OFF_T - select ARCH_HAS_BINFMT_FLAT -+ select ARCH_HAS_CPU_FINALIZE_INIT if MMU - select ARCH_HAS_DEBUG_VIRTUAL if MMU - select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE - select ARCH_HAS_ELF_RANDOMIZE -@@ -1455,6 +1456,7 @@ config HIGHMEM - bool "High Memory Support" - depends on MMU - select KMAP_LOCAL -+ select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY - help - The address space of ARM processors is only 4 Gigabytes large - and it has to accommodate user address space, kernel address -@@ -1740,7 +1742,6 @@ config CMDLINE - choice - prompt "Kernel command line type" if CMDLINE != "" - default CMDLINE_FROM_BOOTLOADER -- depends on ATAGS - - config CMDLINE_FROM_BOOTLOADER - bool "Use bootloader kernel arguments if available" -diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug -index 98436702e0c7e..644875d73ba15 100644 ---- a/arch/arm/Kconfig.debug -+++ b/arch/arm/Kconfig.debug -@@ -410,12 +410,12 @@ choice - Say Y here if you want kernel low-level debugging support - on i.MX25. - -- config DEBUG_IMX21_IMX27_UART -- bool "i.MX21 and i.MX27 Debug UART" -- depends on SOC_IMX21 || SOC_IMX27 -+ config DEBUG_IMX27_UART -+ bool "i.MX27 Debug UART" -+ depends on SOC_IMX27 - help - Say Y here if you want kernel low-level debugging support -- on i.MX21 or i.MX27. -+ on i.MX27. - - config DEBUG_IMX28_UART - bool "i.MX28 Debug UART" -@@ -1481,7 +1481,7 @@ config DEBUG_IMX_UART_PORT - int "i.MX Debug UART Port Selection" - depends on DEBUG_IMX1_UART || \ - DEBUG_IMX25_UART || \ -- DEBUG_IMX21_IMX27_UART || \ -+ DEBUG_IMX27_UART || \ - DEBUG_IMX31_UART || \ - DEBUG_IMX35_UART || \ - DEBUG_IMX50_UART || \ -@@ -1540,12 +1540,12 @@ config DEBUG_LL_INCLUDE - default "debug/icedcc.S" if DEBUG_ICEDCC - default "debug/imx.S" if DEBUG_IMX1_UART || \ - DEBUG_IMX25_UART || \ -- DEBUG_IMX21_IMX27_UART || \ -+ DEBUG_IMX27_UART || \ - DEBUG_IMX31_UART || \ - DEBUG_IMX35_UART || \ - DEBUG_IMX50_UART || \ - DEBUG_IMX51_UART || \ -- DEBUG_IMX53_UART ||\ -+ DEBUG_IMX53_UART || \ - DEBUG_IMX6Q_UART || \ - DEBUG_IMX6SL_UART || \ - DEBUG_IMX6SX_UART || \ -diff --git a/arch/arm/Makefile b/arch/arm/Makefile -index 847c31e7c3687..fa45837b8065c 100644 ---- a/arch/arm/Makefile -+++ b/arch/arm/Makefile -@@ -60,15 +60,15 @@ KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra) - # Note that GCC does not numerically define an architecture version - # macro, but instead defines a whole series of macros which makes - # testing for a specific architecture or later rather impossible. --arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m --arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) --arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) -+arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -+arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 -march=armv7-a -+arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 -march=armv6 - # Only override the compiler option if ARMv6. The ARMv6K extensions are - # always available in ARMv7 - ifeq ($(CONFIG_CPU_32v6),y) --arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) -+arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 -march=armv6k - endif --arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) -+arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 -march=armv5te - arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t - arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4 - arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3m -@@ -82,7 +82,7 @@ tune-$(CONFIG_CPU_ARM720T) =-mtune=arm7tdmi - tune-$(CONFIG_CPU_ARM740T) =-mtune=arm7tdmi - tune-$(CONFIG_CPU_ARM9TDMI) =-mtune=arm9tdmi - tune-$(CONFIG_CPU_ARM940T) =-mtune=arm9tdmi --tune-$(CONFIG_CPU_ARM946E) =$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi) -+tune-$(CONFIG_CPU_ARM946E) =-mtune=arm9e - tune-$(CONFIG_CPU_ARM920T) =-mtune=arm9tdmi - tune-$(CONFIG_CPU_ARM922T) =-mtune=arm9tdmi - tune-$(CONFIG_CPU_ARM925T) =-mtune=arm9tdmi -@@ -90,11 +90,11 @@ tune-$(CONFIG_CPU_ARM926T) =-mtune=arm9tdmi - tune-$(CONFIG_CPU_FA526) =-mtune=arm9tdmi - tune-$(CONFIG_CPU_SA110) =-mtune=strongarm110 - tune-$(CONFIG_CPU_SA1100) =-mtune=strongarm1100 --tune-$(CONFIG_CPU_XSCALE) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale --tune-$(CONFIG_CPU_XSC3) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale --tune-$(CONFIG_CPU_FEROCEON) =$(call cc-option,-mtune=marvell-f,-mtune=xscale) --tune-$(CONFIG_CPU_V6) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) --tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) -+tune-$(CONFIG_CPU_XSCALE) =-mtune=xscale -+tune-$(CONFIG_CPU_XSC3) =-mtune=xscale -+tune-$(CONFIG_CPU_FEROCEON) =-mtune=xscale -+tune-$(CONFIG_CPU_V6) =-mtune=arm1136j-s -+tune-$(CONFIG_CPU_V6K) =-mtune=arm1136j-s - - # Evaluate tune cc-option calls now - tune-y := $(tune-y) -diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S -index c0e7a745103e2..230030c130853 100644 ---- a/arch/arm/boot/compressed/efi-header.S -+++ b/arch/arm/boot/compressed/efi-header.S -@@ -9,16 +9,22 @@ - #include - - .macro __nop --#ifdef CONFIG_EFI_STUB -- @ This is almost but not quite a NOP, since it does clobber the -- @ condition flags. But it is the best we can do for EFI, since -- @ PE/COFF expects the magic string "MZ" at offset 0, while the -- @ ARM/Linux boot protocol expects an executable instruction -- @ there. -- .inst MZ_MAGIC | (0x1310 << 16) @ tstne r0, #0x4d000 --#else - AR_CLASS( mov r0, r0 ) - M_CLASS( nop.w ) -+ .endm -+ -+ .macro __initial_nops -+#ifdef CONFIG_EFI_STUB -+ @ This is a two-instruction NOP, which happens to bear the -+ @ PE/COFF signature "MZ" in the first two bytes, so the kernel -+ @ is accepted as an EFI binary. Booting via the UEFI stub -+ @ will not execute those instructions, but the ARM/Linux -+ @ boot protocol does, so we need some NOPs here. -+ .inst MZ_MAGIC | (0xe225 << 16) @ eor r5, r5, 0x4d000 -+ eor r5, r5, 0x4d000 @ undo previous insn -+#else -+ __nop -+ __nop - #endif - .endm - -diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S -index b1cb1972361b8..bf79f2f78d232 100644 ---- a/arch/arm/boot/compressed/head.S -+++ b/arch/arm/boot/compressed/head.S -@@ -203,7 +203,8 @@ start: - * were patching the initial instructions of the kernel, i.e - * had started to exploit this "patch area". - */ -- .rept 7 -+ __initial_nops -+ .rept 5 - __nop - .endr - #ifndef CONFIG_THUMB2_KERNEL -diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S -index 1bcb68ac4b011..3fcb3e62dc569 100644 ---- a/arch/arm/boot/compressed/vmlinux.lds.S -+++ b/arch/arm/boot/compressed/vmlinux.lds.S -@@ -23,6 +23,7 @@ SECTIONS - *(.ARM.extab*) - *(.note.*) - *(.rel.*) -+ *(.printk_index) - /* - * Discard any r/w data - this produces a link error if we have any, - * which is required for PIC decompression. Local data generates -@@ -57,6 +58,7 @@ SECTIONS - *(.rodata) - *(.rodata.*) - *(.data.rel.ro) -+ *(.data.rel.ro.*) - } - .piggydata : { - *(.piggydata) -diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile -index 7e0934180724d..7a72fc636a7a7 100644 ---- a/arch/arm/boot/dts/Makefile -+++ b/arch/arm/boot/dts/Makefile -@@ -127,6 +127,7 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \ - bcm47094-luxul-xwr-3150-v1.dtb \ - bcm47094-netgear-r8500.dtb \ - bcm47094-phicomm-k3.dtb \ -+ bcm53015-meraki-mr26.dtb \ - bcm53016-meraki-mr32.dtb \ - bcm94708.dtb \ - bcm94709.dtb \ -@@ -779,6 +780,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \ - logicpd-som-lv-37xx-devkit.dtb \ - omap3430-sdp.dtb \ - omap3-beagle.dtb \ -+ omap3-beagle-ab4.dtb \ - omap3-beagle-xm.dtb \ - omap3-beagle-xm-ab.dtb \ - omap3-cm-t3517.dtb \ -diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi -index 124026fa0d095..f207499461b34 100644 ---- a/arch/arm/boot/dts/am335x-pcm-953.dtsi -+++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi -@@ -12,22 +12,20 @@ - compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx"; - - /* Power */ -- regulators { -- vcc3v3: fixedregulator@1 { -- compatible = "regulator-fixed"; -- regulator-name = "vcc3v3"; -- regulator-min-microvolt = <3300000>; -- regulator-max-microvolt = <3300000>; -- regulator-boot-on; -- }; -+ vcc3v3: fixedregulator1 { -+ compatible = "regulator-fixed"; -+ regulator-name = "vcc3v3"; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; -+ regulator-boot-on; -+ }; - -- vcc1v8: fixedregulator@2 { -- compatible = "regulator-fixed"; -- regulator-name = "vcc1v8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- regulator-boot-on; -- }; -+ vcc1v8: fixedregulator2 { -+ compatible = "regulator-fixed"; -+ regulator-name = "vcc1v8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ regulator-boot-on; - }; - - /* User IO */ -diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi -index c9629cb5ccd1e..9a750883b987b 100644 ---- a/arch/arm/boot/dts/am33xx-l4.dtsi -+++ b/arch/arm/boot/dts/am33xx-l4.dtsi -@@ -1500,8 +1500,7 @@ - mmc1: mmc@0 { - compatible = "ti,am335-sdhci"; - ti,needs-special-reset; -- dmas = <&edma_xbar 24 0 0 -- &edma_xbar 25 0 0>; -+ dmas = <&edma 24 0>, <&edma 25 0>; - dma-names = "tx", "rx"; - interrupts = <64>; - reg = <0x0 0x1000>; -diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts -index 0d2fac98ce7d2..c8b80f156ec98 100644 ---- a/arch/arm/boot/dts/am3517-evm.dts -+++ b/arch/arm/boot/dts/am3517-evm.dts -@@ -161,6 +161,8 @@ - - /* HS USB Host PHY on PORT 1 */ - hsusb1_phy: hsusb1_phy { -+ pinctrl-names = "default"; -+ pinctrl-0 = <&hsusb1_rst_pins>; - compatible = "usb-nop-xceiv"; - reset-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>; /* gpio_57 */ - #phy-cells = <0>; -@@ -168,7 +170,9 @@ - }; - - &davinci_emac { -- status = "okay"; -+ pinctrl-names = "default"; -+ pinctrl-0 = <ðernet_pins>; -+ status = "okay"; - }; - - &davinci_mdio { -@@ -193,6 +197,8 @@ - }; - - &i2c2 { -+ pinctrl-names = "default"; -+ pinctrl-0 = <&i2c2_pins>; - clock-frequency = <400000>; - /* User DIP swithes [1:8] / User LEDS [1:2] */ - tca6416: gpio@21 { -@@ -205,6 +211,8 @@ - }; - - &i2c3 { -+ pinctrl-names = "default"; -+ pinctrl-0 = <&i2c3_pins>; - clock-frequency = <400000>; - }; - -@@ -223,6 +231,8 @@ - }; - - &usbhshost { -+ pinctrl-names = "default"; -+ pinctrl-0 = <&hsusb1_pins>; - port1-mode = "ehci-phy"; - }; - -@@ -231,8 +241,35 @@ - }; - - &omap3_pmx_core { -- pinctrl-names = "default"; -- pinctrl-0 = <&hsusb1_rst_pins>; -+ -+ ethernet_pins: pinmux_ethernet_pins { -+ pinctrl-single,pins = < -+ OMAP3_CORE1_IOPAD(0x21fe, PIN_INPUT | MUX_MODE0) /* rmii_mdio_data */ -+ OMAP3_CORE1_IOPAD(0x2200, MUX_MODE0) /* rmii_mdio_clk */ -+ OMAP3_CORE1_IOPAD(0x2202, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd0 */ -+ OMAP3_CORE1_IOPAD(0x2204, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd1 */ -+ OMAP3_CORE1_IOPAD(0x2206, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_crs_dv */ -+ OMAP3_CORE1_IOPAD(0x2208, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_rxer */ -+ OMAP3_CORE1_IOPAD(0x220a, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd0 */ -+ OMAP3_CORE1_IOPAD(0x220c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd1 */ -+ OMAP3_CORE1_IOPAD(0x220e, PIN_OUTPUT_PULLDOWN |MUX_MODE0) /* rmii_txen */ -+ OMAP3_CORE1_IOPAD(0x2210, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_50mhz_clk */ -+ >; -+ }; -+ -+ i2c2_pins: pinmux_i2c2_pins { -+ pinctrl-single,pins = < -+ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_scl */ -+ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_sda */ -+ >; -+ }; -+ -+ i2c3_pins: pinmux_i2c3_pins { -+ pinctrl-single,pins = < -+ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_scl */ -+ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda */ -+ >; -+ }; - - leds_pins: pinmux_leds_pins { - pinctrl-single,pins = < -@@ -300,8 +337,6 @@ - }; - - &omap3_pmx_core2 { -- pinctrl-names = "default"; -- pinctrl-0 = <&hsusb1_pins>; - - hsusb1_pins: pinmux_hsusb1_pins { - pinctrl-single,pins = < -diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi -index 8b669e2eafec4..f7b680f6c48ad 100644 ---- a/arch/arm/boot/dts/am3517-som.dtsi -+++ b/arch/arm/boot/dts/am3517-som.dtsi -@@ -69,6 +69,8 @@ - }; - - &i2c1 { -+ pinctrl-names = "default"; -+ pinctrl-0 = <&i2c1_pins>; - clock-frequency = <400000>; - - s35390a: s35390a@30 { -@@ -179,6 +181,13 @@ - - &omap3_pmx_core { - -+ i2c1_pins: pinmux_i2c1_pins { -+ pinctrl-single,pins = < -+ OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_scl */ -+ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_sda */ -+ >; -+ }; -+ - wl12xx_buffer_pins: pinmux_wl12xx_buffer_pins { - pinctrl-single,pins = < - OMAP3_CORE1_IOPAD(0x2156, PIN_OUTPUT | MUX_MODE4) /* mmc1_dat7.gpio_129 */ -diff --git a/arch/arm/boot/dts/am5748.dtsi b/arch/arm/boot/dts/am5748.dtsi -index c260aa1a85bdb..a1f029e9d1f3d 100644 ---- a/arch/arm/boot/dts/am5748.dtsi -+++ b/arch/arm/boot/dts/am5748.dtsi -@@ -25,6 +25,10 @@ - status = "disabled"; - }; - -+&usb4_tm { -+ status = "disabled"; -+}; -+ - &atl_tm { - status = "disabled"; - }; -diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts -index 2e94f32d9dfca..5de82729eb7ed 100644 ---- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts -+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts -@@ -527,7 +527,7 @@ - - interrupt-parent = <&gpio1>; - interrupts = <31 0>; -- pendown-gpio = <&gpio1 31 0>; -+ pendown-gpio = <&gpio1 31 GPIO_ACTIVE_LOW>; - - - ti,x-min = /bits/ 16 <0x0>; -diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi -index 46e6d3ed8f35a..c042c416a94a3 100644 ---- a/arch/arm/boot/dts/armada-370.dtsi -+++ b/arch/arm/boot/dts/armada-370.dtsi -@@ -74,7 +74,7 @@ - - pcie2: pcie@2,0 { - device_type = "pci"; -- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; -+ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; - reg = <0x1000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi -index 7f2f24a29e6c1..352a2f7ba3114 100644 ---- a/arch/arm/boot/dts/armada-375.dtsi -+++ b/arch/arm/boot/dts/armada-375.dtsi -@@ -582,7 +582,7 @@ - - pcie1: pcie@2,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; -+ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; - reg = <0x1000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi -index cff1269f3fbfd..7146cc8f082af 100644 ---- a/arch/arm/boot/dts/armada-380.dtsi -+++ b/arch/arm/boot/dts/armada-380.dtsi -@@ -79,7 +79,7 @@ - /* x1 port */ - pcie@2,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; -+ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; - reg = <0x1000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -98,7 +98,7 @@ - /* x1 port */ - pcie@3,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; -+ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; - reg = <0x1800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts -index 5bd6a66d2c2b4..e7649c795699c 100644 ---- a/arch/arm/boot/dts/armada-385-turris-omnia.dts -+++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts -@@ -23,6 +23,12 @@ - stdout-path = &uart0; - }; - -+ aliases { -+ ethernet0 = ð0; -+ ethernet1 = ð1; -+ ethernet2 = ð2; -+ }; -+ - memory { - device_type = "memory"; - reg = <0x00000000 0x40000000>; /* 1024 MB */ -@@ -450,7 +456,17 @@ - }; - }; - -- /* port 6 is connected to eth0 */ -+ ports@6 { -+ reg = <6>; -+ label = "cpu"; -+ ethernet = <ð0>; -+ phy-mode = "rgmii-id"; -+ -+ fixed-link { -+ speed = <1000>; -+ full-duplex; -+ }; -+ }; - }; - }; - }; -@@ -471,7 +487,7 @@ - marvell,function = "spi0"; - }; - -- spi0cs1_pins: spi0cs1-pins { -+ spi0cs2_pins: spi0cs2-pins { - marvell,pins = "mpp26"; - marvell,function = "spi0"; - }; -@@ -506,7 +522,7 @@ - }; - }; - -- /* MISO, MOSI, SCLK and CS1 are routed to pin header CN11 */ -+ /* MISO, MOSI, SCLK and CS2 are routed to pin header CN11 */ - }; - - &uart0 { -diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi -index f0022d10c7159..f081f7cb66e5f 100644 ---- a/arch/arm/boot/dts/armada-385.dtsi -+++ b/arch/arm/boot/dts/armada-385.dtsi -@@ -84,7 +84,7 @@ - /* x1 port */ - pcie2: pcie@2,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; -+ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; - reg = <0x1000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -103,7 +103,7 @@ - /* x1 port */ - pcie3: pcie@3,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; -+ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; - reg = <0x1800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -125,7 +125,7 @@ - */ - pcie4: pcie@4,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; -+ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; - reg = <0x2000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi -index 9b1a24cc5e91f..df3c8d1d8f641 100644 ---- a/arch/arm/boot/dts/armada-38x.dtsi -+++ b/arch/arm/boot/dts/armada-38x.dtsi -@@ -168,7 +168,7 @@ - }; - - uart0: serial@12000 { -- compatible = "marvell,armada-38x-uart"; -+ compatible = "marvell,armada-38x-uart", "ns16550a"; - reg = <0x12000 0x100>; - reg-shift = <2>; - interrupts = ; -@@ -178,7 +178,7 @@ - }; - - uart1: serial@12100 { -- compatible = "marvell,armada-38x-uart"; -+ compatible = "marvell,armada-38x-uart", "ns16550a"; - reg = <0x12100 0x100>; - reg-shift = <2>; - interrupts = ; -diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi -index e0b7c20998312..9525e7b7f4360 100644 ---- a/arch/arm/boot/dts/armada-39x.dtsi -+++ b/arch/arm/boot/dts/armada-39x.dtsi -@@ -453,7 +453,7 @@ - /* x1 port */ - pcie@2,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; -+ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; - reg = <0x1000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -472,7 +472,7 @@ - /* x1 port */ - pcie@3,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; -+ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; - reg = <0x1800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -494,7 +494,7 @@ - */ - pcie@4,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; -+ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; - reg = <0x2000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi -index 8558bf6bb54c6..d55fe162fc7f0 100644 ---- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi -+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi -@@ -97,7 +97,7 @@ - - pcie2: pcie@2,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; -+ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; - reg = <0x1000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -115,7 +115,7 @@ - - pcie3: pcie@3,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; -+ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; - reg = <0x1800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -133,7 +133,7 @@ - - pcie4: pcie@4,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; -+ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; - reg = <0x2000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -151,7 +151,7 @@ - - pcie5: pcie@5,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; -+ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; - reg = <0x2800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi -index 2d85fe8ac3272..fdcc818199401 100644 ---- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi -+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi -@@ -112,7 +112,7 @@ - - pcie2: pcie@2,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; -+ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; - reg = <0x1000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -130,7 +130,7 @@ - - pcie3: pcie@3,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; -+ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; - reg = <0x1800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -148,7 +148,7 @@ - - pcie4: pcie@4,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; -+ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; - reg = <0x2000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -166,7 +166,7 @@ - - pcie5: pcie@5,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; -+ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; - reg = <0x2800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -184,7 +184,7 @@ - - pcie6: pcie@6,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x84000 0 0x2000>; -+ assigned-addresses = <0x82003000 0 0x84000 0 0x2000>; - reg = <0x3000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -202,7 +202,7 @@ - - pcie7: pcie@7,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x88000 0 0x2000>; -+ assigned-addresses = <0x82003800 0 0x88000 0 0x2000>; - reg = <0x3800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -220,7 +220,7 @@ - - pcie8: pcie@8,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>; -+ assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>; - reg = <0x4000 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -@@ -238,7 +238,7 @@ - - pcie9: pcie@9,0 { - device_type = "pci"; -- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; -+ assigned-addresses = <0x82004800 0 0x42000 0 0x2000>; - reg = <0x4800 0 0 0 0>; - #address-cells = <3>; - #size-cells = <2>; -diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts -index 1d24b394ea4c3..a497dd135491b 100644 ---- a/arch/arm/boot/dts/aspeed-ast2500-evb.dts -+++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts -@@ -5,7 +5,7 @@ - - / { - model = "AST2500 EVB"; -- compatible = "aspeed,ast2500"; -+ compatible = "aspeed,ast2500-evb", "aspeed,ast2500"; - - aliases { - serial4 = &uart5; -diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb-a1.dts b/arch/arm/boot/dts/aspeed-ast2600-evb-a1.dts -index dd7148060c4a3..d0a5c2ff0fec4 100644 ---- a/arch/arm/boot/dts/aspeed-ast2600-evb-a1.dts -+++ b/arch/arm/boot/dts/aspeed-ast2600-evb-a1.dts -@@ -5,6 +5,7 @@ - - / { - model = "AST2600 A1 EVB"; -+ compatible = "aspeed,ast2600-evb-a1", "aspeed,ast2600"; - - /delete-node/regulator-vcc-sdhci0; - /delete-node/regulator-vcc-sdhci1; -diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb.dts b/arch/arm/boot/dts/aspeed-ast2600-evb.dts -index b7eb552640cbf..b8e55bf167aa8 100644 ---- a/arch/arm/boot/dts/aspeed-ast2600-evb.dts -+++ b/arch/arm/boot/dts/aspeed-ast2600-evb.dts -@@ -8,7 +8,7 @@ - - / { - model = "AST2600 EVB"; -- compatible = "aspeed,ast2600"; -+ compatible = "aspeed,ast2600-evb-a1", "aspeed,ast2600"; - - aliases { - serial4 = &uart5; -@@ -103,7 +103,7 @@ - &mac0 { - status = "okay"; - -- phy-mode = "rgmii"; -+ phy-mode = "rgmii-rxid"; - phy-handle = <ðphy0>; - - pinctrl-names = "default"; -@@ -114,7 +114,7 @@ - &mac1 { - status = "okay"; - -- phy-mode = "rgmii"; -+ phy-mode = "rgmii-rxid"; - phy-handle = <ðphy1>; - - pinctrl-names = "default"; -diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts -index 9b4cf5ebe6d5f..c62aff908ab48 100644 ---- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts -+++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts -@@ -63,7 +63,7 @@ - status = "okay"; - m25p,fast-read; - label = "bmc"; -- spi-max-frequency = <100000000>; /* 100 MHz */ -+ spi-max-frequency = <50000000>; /* 50 MHz */ - #include "openbmc-flash-layout.dtsi" - }; - }; -diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts -index 2efd70666738c..af7ea7cab8cfa 100644 ---- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts -+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts -@@ -231,6 +231,21 @@ - gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>; - }; - }; -+ -+ iio-hwmon { -+ compatible = "iio-hwmon"; -+ io-channels = <&adc1 7>; -+ }; -+}; -+ -+&adc1 { -+ status = "okay"; -+ aspeed,int-vref-microvolt = <2500000>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default -+ &pinctrl_adc10_default &pinctrl_adc11_default -+ &pinctrl_adc12_default &pinctrl_adc13_default -+ &pinctrl_adc14_default &pinctrl_adc15_default>; - }; - - &gpio0 { -diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts -index 6419c9762c0b6..6c9f34396a3ae 100644 ---- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts -+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts -@@ -246,6 +246,21 @@ - linux,code = <11>; - }; - }; -+ -+ iio-hwmon { -+ compatible = "iio-hwmon"; -+ io-channels = <&adc1 7>; -+ }; -+}; -+ -+&adc1 { -+ status = "okay"; -+ aspeed,int-vref-microvolt = <2500000>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default -+ &pinctrl_adc10_default &pinctrl_adc11_default -+ &pinctrl_adc12_default &pinctrl_adc13_default -+ &pinctrl_adc14_default &pinctrl_adc15_default>; - }; - - &ehci1 { -diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi -index 6dde51c2aed3f..ac07c240419a2 100644 ---- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi -+++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi -@@ -117,11 +117,6 @@ - groups = "FWSPID"; - }; - -- pinctrl_fwqspid_default: fwqspid_default { -- function = "FWQSPID"; -- groups = "FWQSPID"; -- }; -- - pinctrl_fwspiwp_default: fwspiwp_default { - function = "FWSPIWP"; - groups = "FWSPIWP"; -@@ -653,12 +648,12 @@ - }; - - pinctrl_qspi1_default: qspi1_default { -- function = "QSPI1"; -+ function = "SPI1"; - groups = "QSPI1"; - }; - - pinctrl_qspi2_default: qspi2_default { -- function = "QSPI2"; -+ function = "SPI2"; - groups = "QSPI2"; - }; - -diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi -index 1b47be1704f83..e5724b1a2e20d 100644 ---- a/arch/arm/boot/dts/aspeed-g6.dtsi -+++ b/arch/arm/boot/dts/aspeed-g6.dtsi -@@ -364,6 +364,41 @@ - status = "disabled"; - }; - -+ adc0: adc@1e6e9000 { -+ compatible = "aspeed,ast2600-adc0"; -+ reg = <0x1e6e9000 0x100>; -+ clocks = <&syscon ASPEED_CLK_APB2>; -+ resets = <&syscon ASPEED_RESET_ADC>; -+ interrupts = ; -+ #io-channel-cells = <1>; -+ status = "disabled"; -+ }; -+ -+ adc1: adc@1e6e9100 { -+ compatible = "aspeed,ast2600-adc1"; -+ reg = <0x1e6e9100 0x100>; -+ clocks = <&syscon ASPEED_CLK_APB2>; -+ resets = <&syscon ASPEED_RESET_ADC>; -+ interrupts = ; -+ #io-channel-cells = <1>; -+ status = "disabled"; -+ }; -+ -+ sbc: secure-boot-controller@1e6f2000 { -+ compatible = "aspeed,ast2600-sbc"; -+ reg = <0x1e6f2000 0x1000>; -+ }; -+ -+ video: video@1e700000 { -+ compatible = "aspeed,ast2600-video-engine"; -+ reg = <0x1e700000 0x1000>; -+ clocks = <&syscon ASPEED_CLK_GATE_VCLK>, -+ <&syscon ASPEED_CLK_GATE_ECLK>; -+ clock-names = "vclk", "eclk"; -+ interrupts = ; -+ status = "disabled"; -+ }; -+ - gpio0: gpio@1e780000 { - #gpio-cells = <2>; - gpio-controller; -diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts -index b1068cca42287..fd8dc1183b3e8 100644 ---- a/arch/arm/boot/dts/at91-sam9x60ek.dts -+++ b/arch/arm/boot/dts/at91-sam9x60ek.dts -@@ -233,10 +233,9 @@ - status = "okay"; - - eeprom@53 { -- compatible = "atmel,24c32"; -+ compatible = "atmel,24c02"; - reg = <0x53>; - pagesize = <16>; -- size = <128>; - status = "okay"; - }; - }; -diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi -index 025a78310e3ab..a818e8ebd638f 100644 ---- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi -+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi -@@ -68,8 +68,8 @@ - regulators { - vdd_3v3: VDD_IO { - regulator-name = "VDD_IO"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -87,8 +87,8 @@ - - vddio_ddr: VDD_DDR { - regulator-name = "VDD_DDR"; -- regulator-min-microvolt = <600000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1200000>; -+ regulator-max-microvolt = <1200000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -110,8 +110,8 @@ - - vdd_core: VDD_CORE { - regulator-name = "VDD_CORE"; -- regulator-min-microvolt = <600000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1250000>; -+ regulator-max-microvolt = <1250000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -152,8 +152,8 @@ - - LDO1 { - regulator-name = "LDO1"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-always-on; - - regulator-state-standby { -@@ -167,9 +167,8 @@ - - LDO2 { - regulator-name = "LDO2"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -- regulator-always-on; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <3300000>; - - regulator-state-standby { - regulator-on-in-suspend; -diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts -index e06b58724ca83..4ebbbe65c0cee 100644 ---- a/arch/arm/boot/dts/at91-sama5d2_icp.dts -+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts -@@ -197,8 +197,8 @@ - regulators { - vdd_io_reg: VDD_IO { - regulator-name = "VDD_IO"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -216,8 +216,8 @@ - - VDD_DDR { - regulator-name = "VDD_DDR"; -- regulator-min-microvolt = <600000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1350000>; -+ regulator-max-microvolt = <1350000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -235,8 +235,8 @@ - - VDD_CORE { - regulator-name = "VDD_CORE"; -- regulator-min-microvolt = <600000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1250000>; -+ regulator-max-microvolt = <1250000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -258,7 +258,6 @@ - regulator-max-microvolt = <1850000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; -- regulator-always-on; - - regulator-state-standby { - regulator-on-in-suspend; -@@ -273,8 +272,8 @@ - - LDO1 { - regulator-name = "LDO1"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <2500000>; -+ regulator-max-microvolt = <2500000>; - regulator-always-on; - - regulator-state-standby { -@@ -288,8 +287,8 @@ - - LDO2 { - regulator-name = "LDO2"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-always-on; - - regulator-state-standby { -@@ -323,21 +322,21 @@ - status = "okay"; - - eeprom@50 { -- compatible = "atmel,24c32"; -+ compatible = "atmel,24c02"; - reg = <0x50>; - pagesize = <16>; - status = "okay"; - }; - - eeprom@52 { -- compatible = "atmel,24c32"; -+ compatible = "atmel,24c02"; - reg = <0x52>; - pagesize = <16>; - status = "disabled"; - }; - - eeprom@53 { -- compatible = "atmel,24c32"; -+ compatible = "atmel,24c02"; - reg = <0x53>; - pagesize = <16>; - status = "disabled"; -diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts -index d72c042f28507..a49c2966b41e2 100644 ---- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts -+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts -@@ -57,8 +57,8 @@ - }; - - spi0: spi@f0004000 { -- pinctrl-names = "default"; -- pinctrl-0 = <&pinctrl_spi0_cs>; -+ pinctrl-names = "default", "cs"; -+ pinctrl-1 = <&pinctrl_spi0_cs>; - cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>; - status = "okay"; - }; -@@ -171,8 +171,8 @@ - }; - - spi1: spi@f8008000 { -- pinctrl-names = "default"; -- pinctrl-0 = <&pinctrl_spi1_cs>; -+ pinctrl-names = "default", "cs"; -+ pinctrl-1 = <&pinctrl_spi1_cs>; - cs-gpios = <&pioC 25 0>; - status = "okay"; - }; -diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts -index d241c24f0d836..e519d27479362 100644 ---- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts -+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts -@@ -81,8 +81,8 @@ - }; - - spi1: spi@fc018000 { -- pinctrl-names = "default"; -- pinctrl-0 = <&pinctrl_spi0_cs>; -+ pinctrl-names = "default", "cs"; -+ pinctrl-1 = <&pinctrl_spi1_cs>; - cs-gpios = <&pioB 21 0>; - status = "okay"; - }; -@@ -140,7 +140,7 @@ - atmel,pins = - ; - }; -- pinctrl_spi0_cs: spi0_cs_default { -+ pinctrl_spi1_cs: spi1_cs_default { - atmel,pins = - ; - }; -diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts -index f3d6aaa3a78dc..0ba856066ffb2 100644 ---- a/arch/arm/boot/dts/at91-sama7g5ek.dts -+++ b/arch/arm/boot/dts/at91-sama7g5ek.dts -@@ -169,8 +169,8 @@ - regulators { - vdd_3v3: VDD_IO { - regulator-name = "VDD_IO"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -188,8 +188,8 @@ - - vddioddr: VDD_DDR { - regulator-name = "VDD_DDR"; -- regulator-min-microvolt = <1300000>; -- regulator-max-microvolt = <1450000>; -+ regulator-min-microvolt = <1350000>; -+ regulator-max-microvolt = <1350000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -209,8 +209,8 @@ - - vddcore: VDD_CORE { - regulator-name = "VDD_CORE"; -- regulator-min-microvolt = <1100000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1150000>; -+ regulator-max-microvolt = <1150000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -228,8 +228,8 @@ - - vddcpu: VDD_OTHER { - regulator-name = "VDD_OTHER"; -- regulator-min-microvolt = <1125000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1050000>; -+ regulator-max-microvolt = <1250000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-ramp-delay = <3125>; -@@ -248,8 +248,8 @@ - - vldo1: LDO1 { - regulator-name = "LDO1"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; - regulator-always-on; - - regulator-state-standby { -@@ -403,7 +403,7 @@ - pinctrl_flx3_default: flx3_default { - pinmux = , - ; -- bias-disable; -+ bias-pull-up; - }; - - pinctrl_flx4_default: flx4_default { -@@ -659,7 +659,7 @@ - }; - - &shdwc { -- atmel,shdwc-debouncer = <976>; -+ debounce-delay-us = <976>; - status = "okay"; - - input@0 { -diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts -index 3ca97b47c69ce..7e5c598e7e68f 100644 ---- a/arch/arm/boot/dts/at91-tse850-3.dts -+++ b/arch/arm/boot/dts/at91-tse850-3.dts -@@ -262,7 +262,7 @@ - &macb1 { - status = "okay"; - -- phy-mode = "rgmii"; -+ phy-mode = "rmii"; - - #address-cells = <1>; - #size-cells = <0>; -diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi -index d1181ead18e5a..21344fbc89e5e 100644 ---- a/arch/arm/boot/dts/at91rm9200.dtsi -+++ b/arch/arm/boot/dts/at91rm9200.dtsi -@@ -660,7 +660,7 @@ - compatible = "atmel,at91rm9200-udc"; - reg = <0xfffb0000 0x4000>; - interrupts = <11 IRQ_TYPE_LEVEL_HIGH 2>; -- clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 2>; -+ clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 1>; - clock-names = "pclk", "hclk"; - status = "disabled"; - }; -diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts -index beed819609e8d..8f3b483bb64dd 100644 ---- a/arch/arm/boot/dts/at91sam9261ek.dts -+++ b/arch/arm/boot/dts/at91sam9261ek.dts -@@ -156,7 +156,7 @@ - compatible = "ti,ads7843"; - interrupts-extended = <&pioC 2 IRQ_TYPE_EDGE_BOTH>; - spi-max-frequency = <3000000>; -- pendown-gpio = <&pioC 2 GPIO_ACTIVE_HIGH>; -+ pendown-gpio = <&pioC 2 GPIO_ACTIVE_LOW>; - - ti,x-min = /bits/ 16 <150>; - ti,x-max = /bits/ 16 <3830>; -diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi -index 87bb39060e8be..4783e657b4cb6 100644 ---- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi -+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi -@@ -39,6 +39,13 @@ - - }; - -+ usb1 { -+ pinctrl_usb1_vbus_gpio: usb1_vbus_gpio { -+ atmel,pins = -+ ; /* PC5 GPIO */ -+ }; -+ }; -+ - mmc0_slot1 { - pinctrl_board_mmc0_slot1: mmc0_slot1-board { - atmel,pins = -@@ -84,6 +91,8 @@ - }; - - usb1: gadget@fffa4000 { -+ pinctrl-0 = <&pinctrl_usb1_vbus_gpio>; -+ pinctrl-names = "default"; - atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>; - status = "okay"; - }; -@@ -219,6 +228,12 @@ - wm8731: wm8731@1b { - compatible = "wm8731"; - reg = <0x1b>; -+ -+ /* PCK0 at 12MHz */ -+ clocks = <&pmc PMC_TYPE_SYSTEM 8>; -+ clock-names = "mclk"; -+ assigned-clocks = <&pmc PMC_TYPE_SYSTEM 8>; -+ assigned-clock-rates = <12000000>; - }; - }; - -diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi -index 748df7955ae67..e96ddb2e26e2c 100644 ---- a/arch/arm/boot/dts/bcm-nsp.dtsi -+++ b/arch/arm/boot/dts/bcm-nsp.dtsi -@@ -77,7 +77,7 @@ - interrupt-affinity = <&cpu0>, <&cpu1>; - }; - -- mpcore@19000000 { -+ mpcore-bus@19000000 { - compatible = "simple-bus"; - ranges = <0x00000000 0x19000000 0x00023000>; - #address-cells = <1>; -@@ -219,7 +219,7 @@ - status = "disabled"; - }; - -- sdio: sdhci@21000 { -+ sdio: mmc@21000 { - compatible = "brcm,sdhci-iproc-cygnus"; - reg = <0x21000 0x100>; - interrupts = ; -diff --git a/arch/arm/boot/dts/bcm2711-rpi-400.dts b/arch/arm/boot/dts/bcm2711-rpi-400.dts -index f4d2fc20397c7..c53d9eb0b8027 100644 ---- a/arch/arm/boot/dts/bcm2711-rpi-400.dts -+++ b/arch/arm/boot/dts/bcm2711-rpi-400.dts -@@ -28,12 +28,12 @@ - &expgpio { - gpio-line-names = "BT_ON", - "WL_ON", -- "", -+ "PWR_LED_OFF", - "GLOBAL_RESET", - "VDD_SD_IO_SEL", -- "CAM_GPIO", -+ "GLOBAL_SHUTDOWN", - "SD_PWR_ON", -- "SD_OC_N"; -+ "SHUTDOWN_REQUEST"; - }; - - &genet_mdio { -diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi -index 3b60297af7f60..89af57482bc8f 100644 ---- a/arch/arm/boot/dts/bcm2711.dtsi -+++ b/arch/arm/boot/dts/bcm2711.dtsi -@@ -290,6 +290,7 @@ - - hvs: hvs@7e400000 { - compatible = "brcm,bcm2711-hvs"; -+ reg = <0x7e400000 0x8000>; - interrupts = ; - }; - -@@ -458,12 +459,26 @@ - #size-cells = <0>; - enable-method = "brcm,bcm2836-smp"; // for ARM 32-bit - -+ /* Source for d/i-cache-line-size and d/i-cache-sets -+ * https://developer.arm.com/documentation/100095/0003 -+ * /Level-1-Memory-System/About-the-L1-memory-system?lang=en -+ * Source for d/i-cache-size -+ * https://www.raspberrypi.com/documentation/computers -+ * /processors.html#bcm2711 -+ */ - cpu0: cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a72"; - reg = <0>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x000000d8>; -+ d-cache-size = <0x8000>; -+ d-cache-line-size = <64>; -+ d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set -+ i-cache-size = <0xc000>; -+ i-cache-line-size = <64>; -+ i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set -+ next-level-cache = <&l2>; - }; - - cpu1: cpu@1 { -@@ -472,6 +487,13 @@ - reg = <1>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x000000e0>; -+ d-cache-size = <0x8000>; -+ d-cache-line-size = <64>; -+ d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set -+ i-cache-size = <0xc000>; -+ i-cache-line-size = <64>; -+ i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set -+ next-level-cache = <&l2>; - }; - - cpu2: cpu@2 { -@@ -480,6 +502,13 @@ - reg = <2>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x000000e8>; -+ d-cache-size = <0x8000>; -+ d-cache-line-size = <64>; -+ d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set -+ i-cache-size = <0xc000>; -+ i-cache-line-size = <64>; -+ i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set -+ next-level-cache = <&l2>; - }; - - cpu3: cpu@3 { -@@ -488,6 +517,28 @@ - reg = <3>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x000000f0>; -+ d-cache-size = <0x8000>; -+ d-cache-line-size = <64>; -+ d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set -+ i-cache-size = <0xc000>; -+ i-cache-line-size = <64>; -+ i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set -+ next-level-cache = <&l2>; -+ }; -+ -+ /* Source for d/i-cache-line-size and d/i-cache-sets -+ * https://developer.arm.com/documentation/100095/0003 -+ * /Level-2-Memory-System/About-the-L2-memory-system?lang=en -+ * Source for d/i-cache-size -+ * https://www.raspberrypi.com/documentation/computers -+ * /processors.html#bcm2711 -+ */ -+ l2: l2-cache0 { -+ compatible = "cache"; -+ cache-size = <0x100000>; -+ cache-line-size = <64>; -+ cache-sets = <1024>; // 1MiB(size)/64(line-size)=16384ways/16-way set -+ cache-level = <2>; - }; - }; - -@@ -506,11 +557,17 @@ - #address-cells = <3>; - #interrupt-cells = <1>; - #size-cells = <2>; -- interrupts = , -+ interrupts = , - ; - interrupt-names = "pcie", "msi"; - interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143 -+ IRQ_TYPE_LEVEL_HIGH>, -+ <0 0 0 2 &gicv2 GIC_SPI 144 -+ IRQ_TYPE_LEVEL_HIGH>, -+ <0 0 0 3 &gicv2 GIC_SPI 145 -+ IRQ_TYPE_LEVEL_HIGH>, -+ <0 0 0 4 &gicv2 GIC_SPI 146 - IRQ_TYPE_LEVEL_HIGH>; - msi-controller; - msi-parent = <&pcie0>; -@@ -576,6 +633,8 @@ - , - ; - -+ gpio-ranges = <&gpio 0 0 58>; -+ - gpclk0_gpio49: gpclk0_gpio49 { - pin-gpclk { - pins = "gpio49"; -diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts -index 1b63d6b19750b..25d87212cefd3 100644 ---- a/arch/arm/boot/dts/bcm2835-rpi-b.dts -+++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts -@@ -53,18 +53,17 @@ - "GPIO18", - "NC", /* GPIO19 */ - "NC", /* GPIO20 */ -- "GPIO21", -+ "CAM_GPIO0", - "GPIO22", - "GPIO23", - "GPIO24", - "GPIO25", - "NC", /* GPIO26 */ -- "CAM_GPIO0", -- /* Binary number representing build/revision */ -- "CONFIG0", -- "CONFIG1", -- "CONFIG2", -- "CONFIG3", -+ "GPIO27", -+ "GPIO28", -+ "GPIO29", -+ "GPIO30", -+ "GPIO31", - "NC", /* GPIO32 */ - "NC", /* GPIO33 */ - "NC", /* GPIO34 */ -diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts -index 33b2b77aa47db..00582eb2c12e2 100644 ---- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts -+++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts -@@ -74,16 +74,18 @@ - "GPIO27", - "SDA0", - "SCL0", -- "NC", /* GPIO30 */ -- "NC", /* GPIO31 */ -- "NC", /* GPIO32 */ -- "NC", /* GPIO33 */ -- "NC", /* GPIO34 */ -- "NC", /* GPIO35 */ -- "NC", /* GPIO36 */ -- "NC", /* GPIO37 */ -- "NC", /* GPIO38 */ -- "NC", /* GPIO39 */ -+ /* Used by BT module */ -+ "CTS0", -+ "RTS0", -+ "TXD0", -+ "RXD0", -+ /* Used by Wifi */ -+ "SD1_CLK", -+ "SD1_CMD", -+ "SD1_DATA0", -+ "SD1_DATA1", -+ "SD1_DATA2", -+ "SD1_DATA3", - "CAM_GPIO1", /* GPIO40 */ - "WL_ON", /* GPIO41 */ - "NC", /* GPIO42 */ -diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts -index 61010266ca9a3..90472e76a313e 100644 ---- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts -+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts -@@ -45,7 +45,7 @@ - #gpio-cells = <2>; - gpio-line-names = "BT_ON", - "WL_ON", -- "STATUS_LED_R", -+ "PWR_LED_R", - "LAN_RUN", - "", - "CAM_GPIO0", -diff --git a/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts b/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts -index 588d9411ceb61..3dfce4312dfc4 100644 ---- a/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts -+++ b/arch/arm/boot/dts/bcm2837-rpi-cm3-io3.dts -@@ -63,8 +63,8 @@ - "GPIO43", - "GPIO44", - "GPIO45", -- "GPIO46", -- "GPIO47", -+ "SMPS_SCL", -+ "SMPS_SDA", - /* Used by eMMC */ - "SD_CLK_R", - "SD_CMD_R", -diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi -index 0199ec98cd616..5dbdebc462594 100644 ---- a/arch/arm/boot/dts/bcm2837.dtsi -+++ b/arch/arm/boot/dts/bcm2837.dtsi -@@ -40,12 +40,26 @@ - #size-cells = <0>; - enable-method = "brcm,bcm2836-smp"; // for ARM 32-bit - -+ /* Source for d/i-cache-line-size and d/i-cache-sets -+ * https://developer.arm.com/documentation/ddi0500/e/level-1-memory-system -+ * /about-the-l1-memory-system?lang=en -+ * -+ * Source for d/i-cache-size -+ * https://magpi.raspberrypi.com/articles/raspberry-pi-3-specs-benchmarks -+ */ - cpu0: cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a53"; - reg = <0>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x000000d8>; -+ d-cache-size = <0x8000>; -+ d-cache-line-size = <64>; -+ d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set -+ i-cache-size = <0x8000>; -+ i-cache-line-size = <64>; -+ i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set -+ next-level-cache = <&l2>; - }; - - cpu1: cpu@1 { -@@ -54,6 +68,13 @@ - reg = <1>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x000000e0>; -+ d-cache-size = <0x8000>; -+ d-cache-line-size = <64>; -+ d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set -+ i-cache-size = <0x8000>; -+ i-cache-line-size = <64>; -+ i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set -+ next-level-cache = <&l2>; - }; - - cpu2: cpu@2 { -@@ -62,6 +83,13 @@ - reg = <2>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x000000e8>; -+ d-cache-size = <0x8000>; -+ d-cache-line-size = <64>; -+ d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set -+ i-cache-size = <0x8000>; -+ i-cache-line-size = <64>; -+ i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set -+ next-level-cache = <&l2>; - }; - - cpu3: cpu@3 { -@@ -70,6 +98,27 @@ - reg = <3>; - enable-method = "spin-table"; - cpu-release-addr = <0x0 0x000000f0>; -+ d-cache-size = <0x8000>; -+ d-cache-line-size = <64>; -+ d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set -+ i-cache-size = <0x8000>; -+ i-cache-line-size = <64>; -+ i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set -+ next-level-cache = <&l2>; -+ }; -+ -+ /* Source for cache-line-size + cache-sets -+ * https://developer.arm.com/documentation/ddi0500 -+ * /e/level-2-memory-system/about-the-l2-memory-system?lang=en -+ * Source for cache-size -+ * https://datasheets.raspberrypi.com/cm/cm1-and-cm3-datasheet.pdf -+ */ -+ l2: l2-cache0 { -+ compatible = "cache"; -+ cache-size = <0x80000>; -+ cache-line-size = <64>; -+ cache-sets = <512>; // 512KiB(size)/64(line-size)=8192ways/16-way set -+ cache-level = <2>; - }; - }; - }; -diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi -index a3e06b6809476..c113661a6668f 100644 ---- a/arch/arm/boot/dts/bcm283x.dtsi -+++ b/arch/arm/boot/dts/bcm283x.dtsi -@@ -126,6 +126,8 @@ - interrupt-controller; - #interrupt-cells = <2>; - -+ gpio-ranges = <&gpio 0 0 54>; -+ - /* Defines common pin muxing groups - * - * While each pin can have its mux selected -diff --git a/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts b/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts -index cd797b4202ad8..01c48faabfade 100644 ---- a/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts -+++ b/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts -@@ -19,7 +19,8 @@ - - memory@0 { - device_type = "memory"; -- reg = <0x00000000 0x08000000>; -+ reg = <0x00000000 0x08000000>, -+ <0x88000000 0x08000000>; - }; - - gpio-keys { -diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts -index 61c7b137607e5..7900aac4f35a9 100644 ---- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts -+++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts -@@ -20,7 +20,7 @@ - bootargs = "console=ttyS0,115200 earlycon"; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>, - <0x88000000 0x08000000>; -diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts -index 6c6bb7b17d27a..7546c8d07bcd7 100644 ---- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts -+++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts -@@ -19,7 +19,7 @@ - bootargs = "console=ttyS0,115200"; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>, - <0x88000000 0x08000000>; -diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts -index d29e7f80ea6aa..beae9eab9cb8c 100644 ---- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts -+++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts -@@ -19,7 +19,7 @@ - bootargs = "console=ttyS0,115200"; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>, - <0x88000000 0x18000000>; -diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts -index 9b6887d477d86..7879f7d7d9c33 100644 ---- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts -+++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts -@@ -16,7 +16,7 @@ - bootargs = "console=ttyS0,115200"; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>, - <0x88000000 0x08000000>; -diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts -index 7989a53597d4f..56d309dbc6b0d 100644 ---- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts -+++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts -@@ -19,7 +19,7 @@ - bootargs = "console=ttyS0,115200"; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>, - <0x88000000 0x08000000>; -diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts -index 87b655be674c5..184e3039aa864 100644 ---- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts -+++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts -@@ -30,7 +30,7 @@ - bootargs = "console=ttyS0,115200"; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>, - <0x88000000 0x08000000>; -diff --git a/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts b/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts -index f806be5da7237..c2a266a439d05 100644 ---- a/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts -+++ b/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts -@@ -15,7 +15,7 @@ - bootargs = "console=ttyS0,115200 earlycon"; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>; - }; -diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts -index 05d4f2931772b..9bef6b9bfa8d9 100644 ---- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts -+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts -@@ -129,7 +129,7 @@ - }; - }; - -- mdio-bus-mux@18003000 { -+ mdio-mux@18003000 { - - /* BIT(9) = 1 => external mdio */ - mdio@200 { -diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts -index 452b8d0ab180e..b0d8a688141d3 100644 ---- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts -+++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts -@@ -16,7 +16,7 @@ - bootargs = "earlycon"; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>, - <0x88000000 0x18000000>; -diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts -index 57ca1cfaecd8e..00e688b45d981 100644 ---- a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts -+++ b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts -@@ -46,3 +46,16 @@ - }; - }; - }; -+ -+&gmac0 { -+ phy-mode = "rgmii"; -+ phy-handle = <&bcm54210e>; -+ -+ mdio { -+ /delete-node/ switch@1e; -+ -+ bcm54210e: ethernet-phy@0 { -+ reg = <0>; -+ }; -+ }; -+}; -diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts -index 2e1a7e382cb7a..78c80a5d3f4fa 100644 ---- a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts -+++ b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts -@@ -83,3 +83,16 @@ - }; - }; - }; -+ -+&gmac0 { -+ phy-mode = "rgmii"; -+ phy-handle = <&bcm54210e>; -+ -+ mdio { -+ /delete-node/ switch@1e; -+ -+ bcm54210e: ethernet-phy@0 { -+ reg = <0>; -+ }; -+ }; -+}; -diff --git a/arch/arm/boot/dts/bcm53015-meraki-mr26.dts b/arch/arm/boot/dts/bcm53015-meraki-mr26.dts -new file mode 100644 -index 0000000000000..ca2266b936ee2 ---- /dev/null -+++ b/arch/arm/boot/dts/bcm53015-meraki-mr26.dts -@@ -0,0 +1,166 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -+/* -+ * Broadcom BCM470X / BCM5301X ARM platform code. -+ * DTS for Meraki MR26 / Codename: Venom -+ * -+ * Copyright (C) 2022 Christian Lamparter -+ */ -+ -+/dts-v1/; -+ -+#include "bcm4708.dtsi" -+#include "bcm5301x-nand-cs0-bch8.dtsi" -+#include -+ -+/ { -+ compatible = "meraki,mr26", "brcm,bcm53015", "brcm,bcm4708"; -+ model = "Meraki MR26"; -+ -+ memory@0 { -+ reg = <0x00000000 0x08000000>; -+ device_type = "memory"; -+ }; -+ -+ leds { -+ compatible = "gpio-leds"; -+ -+ led-0 { -+ function = LED_FUNCTION_FAULT; -+ color = ; -+ gpios = <&chipcommon 13 GPIO_ACTIVE_HIGH>; -+ panic-indicator; -+ }; -+ led-1 { -+ function = LED_FUNCTION_INDICATOR; -+ color = ; -+ gpios = <&chipcommon 12 GPIO_ACTIVE_HIGH>; -+ }; -+ }; -+ -+ keys { -+ compatible = "gpio-keys"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ key-restart { -+ label = "Reset"; -+ linux,code = ; -+ gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>; -+ }; -+ }; -+}; -+ -+&uart0 { -+ clock-frequency = <50000000>; -+ /delete-property/ clocks; -+}; -+ -+&uart1 { -+ status = "disabled"; -+}; -+ -+&gmac0 { -+ status = "okay"; -+}; -+ -+&gmac1 { -+ status = "disabled"; -+}; -+&gmac2 { -+ status = "disabled"; -+}; -+&gmac3 { -+ status = "disabled"; -+}; -+ -+&nandcs { -+ nand-ecc-algo = "hw"; -+ -+ partitions { -+ compatible = "fixed-partitions"; -+ #address-cells = <0x1>; -+ #size-cells = <0x1>; -+ -+ partition@0 { -+ label = "u-boot"; -+ reg = <0x0 0x200000>; -+ read-only; -+ }; -+ -+ partition@200000 { -+ label = "u-boot-env"; -+ reg = <0x200000 0x200000>; -+ /* empty */ -+ }; -+ -+ partition@400000 { -+ label = "u-boot-backup"; -+ reg = <0x400000 0x200000>; -+ /* empty */ -+ }; -+ -+ partition@600000 { -+ label = "u-boot-env-backup"; -+ reg = <0x600000 0x200000>; -+ /* empty */ -+ }; -+ -+ partition@800000 { -+ label = "ubi"; -+ reg = <0x800000 0x7780000>; -+ }; -+ }; -+}; -+ -+&srab { -+ status = "okay"; -+ -+ ports { -+ port@0 { -+ reg = <0>; -+ label = "poe"; -+ }; -+ -+ port@5 { -+ reg = <5>; -+ label = "cpu"; -+ ethernet = <&gmac0>; -+ -+ fixed-link { -+ speed = <1000>; -+ full-duplex; -+ }; -+ }; -+ }; -+}; -+ -+&i2c0 { -+ status = "okay"; -+ -+ pinctrl-names = "default"; -+ pinctrl-0 = <&pinmux_i2c>; -+ -+ clock-frequency = <100000>; -+ -+ ina219@40 { -+ compatible = "ti,ina219"; /* PoE power */ -+ reg = <0x40>; -+ shunt-resistor = <60000>; /* = 60 mOhms */ -+ }; -+ -+ eeprom@56 { -+ compatible = "atmel,24c64"; -+ reg = <0x56>; -+ pagesize = <32>; -+ read-only; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ -+ /* it's empty */ -+ }; -+}; -+ -+&thermal { -+ status = "disabled"; -+ /* does not work, reads 418 degree Celsius */ -+}; -diff --git a/arch/arm/boot/dts/bcm53016-meraki-mr32.dts b/arch/arm/boot/dts/bcm53016-meraki-mr32.dts -index 3b978dc8997a4..edf9910100b02 100644 ---- a/arch/arm/boot/dts/bcm53016-meraki-mr32.dts -+++ b/arch/arm/boot/dts/bcm53016-meraki-mr32.dts -@@ -20,7 +20,7 @@ - bootargs = " console=ttyS0,115200n8 earlycon"; - }; - -- memory { -+ memory@0 { - reg = <0x00000000 0x08000000>; - device_type = "memory"; - }; -@@ -195,3 +195,25 @@ - }; - }; - }; -+ -+&srab { -+ status = "okay"; -+ -+ ports { -+ port@0 { -+ reg = <0>; -+ label = "poe"; -+ }; -+ -+ port@5 { -+ reg = <5>; -+ label = "cpu"; -+ ethernet = <&gmac0>; -+ -+ fixed-link { -+ speed = <1000>; -+ full-duplex; -+ }; -+ }; -+ }; -+}; -diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi -index f92089290ccd5..b4b73ab996264 100644 ---- a/arch/arm/boot/dts/bcm5301x.dtsi -+++ b/arch/arm/boot/dts/bcm5301x.dtsi -@@ -19,7 +19,7 @@ - #size-cells = <1>; - interrupt-parent = <&gic>; - -- chipcommonA@18000000 { -+ chipcommon-a-bus@18000000 { - compatible = "simple-bus"; - ranges = <0x00000000 0x18000000 0x00001000>; - #address-cells = <1>; -@@ -44,7 +44,7 @@ - }; - }; - -- mpcore@19000000 { -+ mpcore-bus@19000000 { - compatible = "simple-bus"; - ranges = <0x00000000 0x19000000 0x00023000>; - #address-cells = <1>; -@@ -242,6 +242,8 @@ - - gpio-controller; - #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; - }; - - pcie0: pcie@12000 { -@@ -369,8 +371,8 @@ - #address-cells = <1>; - }; - -- mdio-bus-mux@18003000 { -- compatible = "mdio-mux-mmioreg"; -+ mdio-mux@18003000 { -+ compatible = "mdio-mux-mmioreg", "mdio-mux"; - mdio-parent-bus = <&mdio>; - #address-cells = <1>; - #size-cells = <0>; -@@ -408,27 +410,27 @@ - i2c0: i2c@18009000 { - compatible = "brcm,iproc-i2c"; - reg = <0x18009000 0x50>; -- interrupts = ; -+ interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - clock-frequency = <100000>; - status = "disabled"; - }; - -- dmu@1800c000 { -+ dmu-bus@1800c000 { - compatible = "simple-bus"; - ranges = <0 0x1800c000 0x1000>; - #address-cells = <1>; - #size-cells = <1>; - -- cru@100 { -- compatible = "simple-bus"; -+ cru-bus@100 { -+ compatible = "brcm,ns-cru", "simple-mfd"; - reg = <0x100 0x1a4>; - ranges; - #address-cells = <1>; - #size-cells = <1>; - -- lcpll0: lcpll0@100 { -+ lcpll0: clock-controller@100 { - #clock-cells = <1>; - compatible = "brcm,nsp-lcpll0"; - reg = <0x100 0x14>; -@@ -437,7 +439,7 @@ - "sdio", "ddr_phy"; - }; - -- genpll: genpll@140 { -+ genpll: clock-controller@140 { - #clock-cells = <1>; - compatible = "brcm,nsp-genpll"; - reg = <0x140 0x24>; -@@ -448,7 +450,12 @@ - "sata1", "sata2"; - }; - -- pinctrl: pin-controller@1c0 { -+ syscon@180 { -+ compatible = "brcm,cru-clkset", "syscon"; -+ reg = <0x180 0x4>; -+ }; -+ -+ pinctrl: pinctrl@1c0 { - compatible = "brcm,bcm4708-pinmux"; - reg = <0x1c0 0x24>; - reg-names = "cru_gpio_control"; -@@ -535,7 +542,6 @@ - "spi_lr_session_done", - "spi_lr_overread"; - clocks = <&iprocmed>; -- clock-names = "iprocmed"; - num-cs = <2>; - #address-cells = <1>; - #size-cells = <0>; -diff --git a/arch/arm/boot/dts/bcm53573.dtsi b/arch/arm/boot/dts/bcm53573.dtsi -index 51546fccc6168..933b6a380c367 100644 ---- a/arch/arm/boot/dts/bcm53573.dtsi -+++ b/arch/arm/boot/dts/bcm53573.dtsi -@@ -127,6 +127,9 @@ - - pcie0: pcie@2000 { - reg = <0x00002000 0x1000>; -+ -+ #address-cells = <3>; -+ #size-cells = <2>; - }; - - usb2: usb2@4000 { -diff --git a/arch/arm/boot/dts/bcm94708.dts b/arch/arm/boot/dts/bcm94708.dts -index 3d13e46c69494..d9eb2040b9631 100644 ---- a/arch/arm/boot/dts/bcm94708.dts -+++ b/arch/arm/boot/dts/bcm94708.dts -@@ -38,7 +38,7 @@ - model = "NorthStar SVK (BCM94708)"; - compatible = "brcm,bcm94708", "brcm,bcm4708"; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>; - }; -diff --git a/arch/arm/boot/dts/bcm94709.dts b/arch/arm/boot/dts/bcm94709.dts -index 5017b7b259cbe..618c812eef73e 100644 ---- a/arch/arm/boot/dts/bcm94709.dts -+++ b/arch/arm/boot/dts/bcm94709.dts -@@ -38,7 +38,7 @@ - model = "NorthStar SVK (BCM94709)"; - compatible = "brcm,bcm94709", "brcm,bcm4709", "brcm,bcm4708"; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>; - }; -diff --git a/arch/arm/boot/dts/bcm947189acdbmr.dts b/arch/arm/boot/dts/bcm947189acdbmr.dts -index b0b8c774a37f9..1f0be30e54435 100644 ---- a/arch/arm/boot/dts/bcm947189acdbmr.dts -+++ b/arch/arm/boot/dts/bcm947189acdbmr.dts -@@ -60,9 +60,9 @@ - spi { - compatible = "spi-gpio"; - num-chipselects = <1>; -- gpio-sck = <&chipcommon 21 0>; -- gpio-miso = <&chipcommon 22 0>; -- gpio-mosi = <&chipcommon 23 0>; -+ sck-gpios = <&chipcommon 21 0>; -+ miso-gpios = <&chipcommon 22 0>; -+ mosi-gpios = <&chipcommon 23 0>; - cs-gpios = <&chipcommon 24 0>; - #address-cells = <1>; - #size-cells = <0>; -diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi -index 89e0bdaf3a85f..726d353eda686 100644 ---- a/arch/arm/boot/dts/dove.dtsi -+++ b/arch/arm/boot/dts/dove.dtsi -@@ -129,7 +129,7 @@ - pcie1: pcie@2 { - device_type = "pci"; - status = "disabled"; -- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; -+ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; - reg = <0x1000 0 0 0 0>; - clocks = <&gate_clk 5>; - marvell,pcie-port = <1>; -diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi -index 956a26d52a4c3..5733e3a4ea8e7 100644 ---- a/arch/arm/boot/dts/dra7-l4.dtsi -+++ b/arch/arm/boot/dts/dra7-l4.dtsi -@@ -3482,8 +3482,7 @@ - ti,timer-pwm; - }; - }; -- -- target-module@2c000 { /* 0x4882c000, ap 17 02.0 */ -+ timer15_target: target-module@2c000 { /* 0x4882c000, ap 17 02.0 */ - compatible = "ti,sysc-omap4-timer", "ti,sysc"; - reg = <0x2c000 0x4>, - <0x2c010 0x4>; -@@ -3511,7 +3510,7 @@ - }; - }; - -- target-module@2e000 { /* 0x4882e000, ap 19 14.0 */ -+ timer16_target: target-module@2e000 { /* 0x4882e000, ap 19 14.0 */ - compatible = "ti,sysc-omap4-timer", "ti,sysc"; - reg = <0x2e000 0x4>, - <0x2e010 0x4>; -@@ -4189,11 +4188,11 @@ - reg = <0x1d0010 0x4>; - reg-names = "sysc"; - ti,sysc-midle = , -- , -- ; -+ ; - ti,sysc-sidle = , - , - ; -+ power-domains = <&prm_vpe>; - clocks = <&vpe_clkctrl DRA7_VPE_VPE_CLKCTRL 0>; - clock-names = "fck"; - #address-cells = <1>; -diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi -index dfc1ef8ef6aea..61a3fb3e2a2f9 100644 ---- a/arch/arm/boot/dts/dra7.dtsi -+++ b/arch/arm/boot/dts/dra7.dtsi -@@ -1320,20 +1320,20 @@ - }; - - /* Local timers, see ARM architected timer wrap erratum i940 */ --&timer3_target { -+&timer15_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { -- assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>; -+ assigned-clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>; - assigned-clock-parents = <&timer_sys_clk_div>; - }; - }; - --&timer4_target { -+&timer16_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { -- assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>; -+ assigned-clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>; - assigned-clock-parents = <&timer_sys_clk_div>; - }; - }; -diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi -index cfb239d5186ac..54b4de6a5925d 100644 ---- a/arch/arm/boot/dts/e60k02.dtsi -+++ b/arch/arm/boot/dts/e60k02.dtsi -@@ -302,6 +302,7 @@ - - &usbotg1 { - pinctrl-names = "default"; -+ pinctrl-0 = <&pinctrl_usbotg1>; - disable-over-current; - srp-disable; - hnp-disable; -diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts -index f6ba5e4260404..7562497c45dd8 100644 ---- a/arch/arm/boot/dts/exynos3250-rinato.dts -+++ b/arch/arm/boot/dts/exynos3250-rinato.dts -@@ -249,7 +249,7 @@ - i80-if-timings { - cs-setup = <0>; - wr-setup = <0>; -- wr-act = <1>; -+ wr-active = <1>; - wr-hold = <0>; - }; - }; -diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi -index 021d9fc1b4923..27a1a89526655 100644 ---- a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi -+++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi -@@ -10,7 +10,7 @@ - / { - thermal-zones { - cpu_thermal: cpu-thermal { -- thermal-sensors = <&tmu 0>; -+ thermal-sensors = <&tmu>; - polling-delay-passive = <0>; - polling-delay = <0>; - trips { -diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi -index eab77a66ae8f2..201e2fe7ed0cc 100644 ---- a/arch/arm/boot/dts/exynos4.dtsi -+++ b/arch/arm/boot/dts/exynos4.dtsi -@@ -605,7 +605,7 @@ - status = "disabled"; - - hdmi_i2c_phy: hdmiphy@38 { -- compatible = "exynos4210-hdmiphy"; -+ compatible = "samsung,exynos4210-hdmiphy"; - reg = <0x38>; - }; - }; -diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts -index 55922176807e6..93880bdbcad98 100644 ---- a/arch/arm/boot/dts/exynos4210-i9100.dts -+++ b/arch/arm/boot/dts/exynos4210-i9100.dts -@@ -200,8 +200,8 @@ - power-on-delay = <10>; - reset-delay = <10>; - -- panel-width-mm = <90>; -- panel-height-mm = <154>; -+ panel-width-mm = <56>; -+ panel-height-mm = <93>; - - display-timings { - timing { -@@ -827,7 +827,7 @@ - compatible = "brcm,bcm4330-bt"; - - shutdown-gpios = <&gpl0 4 GPIO_ACTIVE_HIGH>; -- reset-gpios = <&gpl1 0 GPIO_ACTIVE_HIGH>; -+ reset-gpios = <&gpl1 0 GPIO_ACTIVE_LOW>; - device-wakeup-gpios = <&gpx3 1 GPIO_ACTIVE_HIGH>; - host-wakeup-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>; - }; -diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi -index 7e7d65ce6585f..ac62d8dc70b19 100644 ---- a/arch/arm/boot/dts/exynos4210.dtsi -+++ b/arch/arm/boot/dts/exynos4210.dtsi -@@ -393,7 +393,6 @@ - &cpu_thermal { - polling-delay-passive = <0>; - polling-delay = <0>; -- thermal-sensors = <&tmu 0>; - }; - - &gic { -diff --git a/arch/arm/boot/dts/exynos4412-itop-elite.dts b/arch/arm/boot/dts/exynos4412-itop-elite.dts -index 47431307cb3cf..fbfc04f9a04cc 100644 ---- a/arch/arm/boot/dts/exynos4412-itop-elite.dts -+++ b/arch/arm/boot/dts/exynos4412-itop-elite.dts -@@ -179,7 +179,7 @@ - compatible = "wlf,wm8960"; - reg = <0x1a>; - clocks = <&pmu_system_controller 0>; -- clock-names = "MCLK1"; -+ clock-names = "mclk"; - wlf,shared-lrclk; - #sound-dai-cells = <0>; - }; -diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi -index 968c7943653e2..49843e016828e 100644 ---- a/arch/arm/boot/dts/exynos4412-midas.dtsi -+++ b/arch/arm/boot/dts/exynos4412-midas.dtsi -@@ -585,7 +585,7 @@ - clocks = <&camera 1>; - clock-names = "extclk"; - samsung,camclk-out = <1>; -- gpios = <&gpm1 6 GPIO_ACTIVE_HIGH>; -+ gpios = <&gpm1 6 GPIO_ACTIVE_LOW>; - - port { - is_s5k6a3_ep: endpoint { -diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts -index 5479ef09f9f36..0acb05f0a2b7c 100644 ---- a/arch/arm/boot/dts/exynos4412-origen.dts -+++ b/arch/arm/boot/dts/exynos4412-origen.dts -@@ -95,7 +95,7 @@ - }; - - &ehci { -- samsung,vbus-gpio = <&gpx3 5 1>; -+ samsung,vbus-gpio = <&gpx3 5 GPIO_ACTIVE_HIGH>; - status = "okay"; - phys = <&exynos_usbphy 2>, <&exynos_usbphy 3>; - phy-names = "hsic0", "hsic1"; -diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi -index d31a68672bfac..d7d756614edd1 100644 ---- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi -+++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi -@@ -260,7 +260,7 @@ - }; - - uart3_data: uart3-data { -- samsung,pins = "gpa1-4", "gpa1-4"; -+ samsung,pins = "gpa1-4", "gpa1-5"; - samsung,pin-function = ; - samsung,pin-pud = ; - samsung,pin-drv = ; -diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts -index 39bbe18145cf2..e4861415a0fe5 100644 ---- a/arch/arm/boot/dts/exynos5250-smdk5250.dts -+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts -@@ -118,6 +118,9 @@ - status = "okay"; - ddc = <&i2c_2>; - hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>; -+ vdd-supply = <&ldo8_reg>; -+ vdd_osc-supply = <&ldo10_reg>; -+ vdd_pll-supply = <&ldo8_reg>; - }; - - &i2c_0 { -@@ -126,7 +129,7 @@ - samsung,i2c-max-bus-freq = <20000>; - - eeprom@50 { -- compatible = "samsung,s524ad0xd1"; -+ compatible = "samsung,s524ad0xd1", "atmel,24c128"; - reg = <0x50>; - }; - -@@ -286,7 +289,7 @@ - samsung,i2c-max-bus-freq = <20000>; - - eeprom@51 { -- compatible = "samsung,s524ad0xd1"; -+ compatible = "samsung,s524ad0xd1", "atmel,24c128"; - reg = <0x51>; - }; - -diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi -index 4ffa9253b566c..de0275df807fb 100644 ---- a/arch/arm/boot/dts/exynos5250.dtsi -+++ b/arch/arm/boot/dts/exynos5250.dtsi -@@ -1119,7 +1119,7 @@ - &cpu_thermal { - polling-delay-passive = <0>; - polling-delay = <0>; -- thermal-sensors = <&tmu 0>; -+ thermal-sensors = <&tmu>; - - cooling-maps { - map0 { -diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts -index 884fef55836cf..3765f5ba03f25 100644 ---- a/arch/arm/boot/dts/exynos5410-odroidxu.dts -+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts -@@ -120,7 +120,6 @@ - }; - - &cpu0_thermal { -- thermal-sensors = <&tmu_cpu0 0>; - polling-delay-passive = <0>; - polling-delay = <0>; - -diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts -index a4f0e3ffedbd3..07f65213aae65 100644 ---- a/arch/arm/boot/dts/exynos5420-smdk5420.dts -+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts -@@ -124,6 +124,9 @@ - hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&hdmi_hpd_irq>; -+ vdd-supply = <&ldo6_reg>; -+ vdd_osc-supply = <&ldo7_reg>; -+ vdd_pll-supply = <&ldo6_reg>; - }; - - &hsi2c_4 { -diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi -index e23e8ffb093fa..4fb4804830afe 100644 ---- a/arch/arm/boot/dts/exynos5420.dtsi -+++ b/arch/arm/boot/dts/exynos5420.dtsi -@@ -602,7 +602,7 @@ - }; - - mipi_phy: mipi-video-phy { -- compatible = "samsung,s5pv210-mipi-video-phy"; -+ compatible = "samsung,exynos5420-mipi-video-phy"; - syscon = <&pmu_system_controller>; - #phy-cells = <1>; - }; -diff --git a/arch/arm/boot/dts/exynos5422-odroidhc1.dts b/arch/arm/boot/dts/exynos5422-odroidhc1.dts -index d91f7fa2cf808..e57d3e464434f 100644 ---- a/arch/arm/boot/dts/exynos5422-odroidhc1.dts -+++ b/arch/arm/boot/dts/exynos5422-odroidhc1.dts -@@ -29,7 +29,7 @@ - - thermal-zones { - cpu0_thermal: cpu0-thermal { -- thermal-sensors = <&tmu_cpu0 0>; -+ thermal-sensors = <&tmu_cpu0>; - trips { - cpu0_alert0: cpu-alert-0 { - temperature = <70000>; /* millicelsius */ -@@ -84,7 +84,7 @@ - }; - }; - cpu1_thermal: cpu1-thermal { -- thermal-sensors = <&tmu_cpu1 0>; -+ thermal-sensors = <&tmu_cpu1>; - trips { - cpu1_alert0: cpu-alert-0 { - temperature = <70000>; -@@ -128,7 +128,7 @@ - }; - }; - cpu2_thermal: cpu2-thermal { -- thermal-sensors = <&tmu_cpu2 0>; -+ thermal-sensors = <&tmu_cpu2>; - trips { - cpu2_alert0: cpu-alert-0 { - temperature = <70000>; -@@ -172,7 +172,7 @@ - }; - }; - cpu3_thermal: cpu3-thermal { -- thermal-sensors = <&tmu_cpu3 0>; -+ thermal-sensors = <&tmu_cpu3>; - trips { - cpu3_alert0: cpu-alert-0 { - temperature = <70000>; -@@ -216,7 +216,7 @@ - }; - }; - gpu_thermal: gpu-thermal { -- thermal-sensors = <&tmu_gpu 0>; -+ thermal-sensors = <&tmu_gpu>; - trips { - gpu_alert0: gpu-alert-0 { - temperature = <70000>; -diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi -index e35af40a55cb8..0b27e968c6fd2 100644 ---- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi -+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi -@@ -50,7 +50,7 @@ - - thermal-zones { - cpu0_thermal: cpu0-thermal { -- thermal-sensors = <&tmu_cpu0 0>; -+ thermal-sensors = <&tmu_cpu0>; - polling-delay-passive = <250>; - polling-delay = <0>; - trips { -@@ -139,7 +139,7 @@ - }; - }; - cpu1_thermal: cpu1-thermal { -- thermal-sensors = <&tmu_cpu1 0>; -+ thermal-sensors = <&tmu_cpu1>; - polling-delay-passive = <250>; - polling-delay = <0>; - trips { -@@ -212,7 +212,7 @@ - }; - }; - cpu2_thermal: cpu2-thermal { -- thermal-sensors = <&tmu_cpu2 0>; -+ thermal-sensors = <&tmu_cpu2>; - polling-delay-passive = <250>; - polling-delay = <0>; - trips { -@@ -285,7 +285,7 @@ - }; - }; - cpu3_thermal: cpu3-thermal { -- thermal-sensors = <&tmu_cpu3 0>; -+ thermal-sensors = <&tmu_cpu3>; - polling-delay-passive = <250>; - polling-delay = <0>; - trips { -@@ -358,7 +358,7 @@ - }; - }; - gpu_thermal: gpu-thermal { -- thermal-sensors = <&tmu_gpu 0>; -+ thermal-sensors = <&tmu_gpu>; - polling-delay-passive = <250>; - polling-delay = <0>; - trips { -diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts -index 13112a8a5dd88..6544c730340fa 100644 ---- a/arch/arm/boot/dts/gemini-nas4220b.dts -+++ b/arch/arm/boot/dts/gemini-nas4220b.dts -@@ -84,7 +84,7 @@ - partitions { - compatible = "redboot-fis"; - /* Eraseblock at 0xfe0000 */ -- fis-index-block = <0x1fc>; -+ fis-index-block = <0x7f>; - }; - }; - -diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts -index 8cbaf1c811745..3b609d987d883 100644 ---- a/arch/arm/boot/dts/imx23-evk.dts -+++ b/arch/arm/boot/dts/imx23-evk.dts -@@ -79,7 +79,6 @@ - MX23_PAD_LCD_RESET__GPIO_1_18 - MX23_PAD_PWM3__GPIO_1_29 - MX23_PAD_PWM4__GPIO_1_30 -- MX23_PAD_SSP1_DETECT__SSP1_DETECT - >; - fsl,drive-strength = ; - fsl,voltage = ; -diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi -index 7f4c602454a5f..ce3d6360a7efb 100644 ---- a/arch/arm/boot/dts/imx23.dtsi -+++ b/arch/arm/boot/dts/imx23.dtsi -@@ -59,7 +59,7 @@ - reg = <0x80000000 0x2000>; - }; - -- dma_apbh: dma-apbh@80004000 { -+ dma_apbh: dma-controller@80004000 { - compatible = "fsl,imx23-dma-apbh"; - reg = <0x80004000 0x2000>; - interrupts = <0 14 20 0 -diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi -index fdcca82c9986f..bd8ea2ec24575 100644 ---- a/arch/arm/boot/dts/imx25.dtsi -+++ b/arch/arm/boot/dts/imx25.dtsi -@@ -515,7 +515,7 @@ - #interrupt-cells = <2>; - }; - -- sdma: sdma@53fd4000 { -+ sdma: dma-controller@53fd4000 { - compatible = "fsl,imx25-sdma"; - reg = <0x53fd4000 0x4000>; - clocks = <&clks 112>, <&clks 68>; -diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts -index 7e2b0f198dfad..1053b7c584d81 100644 ---- a/arch/arm/boot/dts/imx28-evk.dts -+++ b/arch/arm/boot/dts/imx28-evk.dts -@@ -129,7 +129,7 @@ - pinctrl-0 = <&spi2_pins_a>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "sst,sst25vf016b", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts -index f3bddc5ada4b8..13acdc7916b9b 100644 ---- a/arch/arm/boot/dts/imx28-m28evk.dts -+++ b/arch/arm/boot/dts/imx28-m28evk.dts -@@ -33,7 +33,7 @@ - pinctrl-0 = <&spi2_pins_a>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "m25p80", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts -index 43be7a6a769bc..90928db0df701 100644 ---- a/arch/arm/boot/dts/imx28-sps1.dts -+++ b/arch/arm/boot/dts/imx28-sps1.dts -@@ -51,7 +51,7 @@ - pinctrl-0 = <&spi2_pins_a>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "everspin,mr25h256", "mr25h256"; -diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi -index 84d0176d51933..10eab221bc053 100644 ---- a/arch/arm/boot/dts/imx28.dtsi -+++ b/arch/arm/boot/dts/imx28.dtsi -@@ -78,7 +78,7 @@ - status = "disabled"; - }; - -- dma_apbh: dma-apbh@80004000 { -+ dma_apbh: dma-controller@80004000 { - compatible = "fsl,imx28-dma-apbh"; - reg = <0x80004000 0x2000>; - interrupts = <82 83 84 85 -diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi -index 948d2a543f8d1..c85866e73a7b9 100644 ---- a/arch/arm/boot/dts/imx31.dtsi -+++ b/arch/arm/boot/dts/imx31.dtsi -@@ -297,7 +297,7 @@ - #interrupt-cells = <2>; - }; - -- sdma: sdma@53fd4000 { -+ sdma: dma-controller@53fd4000 { - compatible = "fsl,imx31-sdma"; - reg = <0x53fd4000 0x4000>; - interrupts = <34>; -diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi -index 8e41c8b7bd705..d650f54c3fc6b 100644 ---- a/arch/arm/boot/dts/imx35.dtsi -+++ b/arch/arm/boot/dts/imx35.dtsi -@@ -284,7 +284,7 @@ - #interrupt-cells = <2>; - }; - -- sdma: sdma@53fd4000 { -+ sdma: dma-controller@53fd4000 { - compatible = "fsl,imx35-sdma"; - reg = <0x53fd4000 0x4000>; - clocks = <&clks 9>, <&clks 65>; -diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi -index a969f335b2402..2560f8514ebed 100644 ---- a/arch/arm/boot/dts/imx50.dtsi -+++ b/arch/arm/boot/dts/imx50.dtsi -@@ -421,7 +421,7 @@ - status = "disabled"; - }; - -- sdma: sdma@63fb0000 { -+ sdma: dma-controller@63fb0000 { - compatible = "fsl,imx50-sdma", "fsl,imx35-sdma"; - reg = <0x63fb0000 0x4000>; - interrupts = <6>; -diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi -index 01cfcbe5928e8..b3ab0c000d9d1 100644 ---- a/arch/arm/boot/dts/imx51.dtsi -+++ b/arch/arm/boot/dts/imx51.dtsi -@@ -498,7 +498,7 @@ - status = "disabled"; - }; - -- sdma: sdma@83fb0000 { -+ sdma: dma-controller@83fb0000 { - compatible = "fsl,imx51-sdma", "fsl,imx35-sdma"; - reg = <0x83fb0000 0x4000>; - interrupts = <6>; -diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts -index 4f88e96d81ddb..d5c68d1ea707c 100644 ---- a/arch/arm/boot/dts/imx53-m53menlo.dts -+++ b/arch/arm/boot/dts/imx53-m53menlo.dts -@@ -53,6 +53,31 @@ - }; - }; - -+ lvds-decoder { -+ compatible = "ti,ds90cf364a", "lvds-decoder"; -+ -+ ports { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ port@0 { -+ reg = <0>; -+ -+ lvds_decoder_in: endpoint { -+ remote-endpoint = <&lvds0_out>; -+ }; -+ }; -+ -+ port@1 { -+ reg = <1>; -+ -+ lvds_decoder_out: endpoint { -+ remote-endpoint = <&panel_in>; -+ }; -+ }; -+ }; -+ }; -+ - panel { - compatible = "edt,etm0700g0dh6"; - pinctrl-0 = <&pinctrl_display_gpio>; -@@ -61,7 +86,7 @@ - - port { - panel_in: endpoint { -- remote-endpoint = <&lvds0_out>; -+ remote-endpoint = <&lvds_decoder_out>; - }; - }; - }; -@@ -450,7 +475,7 @@ - reg = <2>; - - lvds0_out: endpoint { -- remote-endpoint = <&panel_in>; -+ remote-endpoint = <&lvds_decoder_in>; - }; - }; - }; -diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts -index 37d0cffea99c5..70c4a4852256c 100644 ---- a/arch/arm/boot/dts/imx53-ppd.dts -+++ b/arch/arm/boot/dts/imx53-ppd.dts -@@ -488,7 +488,7 @@ - scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; - status = "okay"; - -- i2c-switch@70 { -+ i2c-mux@70 { - compatible = "nxp,pca9547"; - #address-cells = <1>; - #size-cells = <0>; -diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi -index 2cf3909cca2f8..ca1bea42cc0e2 100644 ---- a/arch/arm/boot/dts/imx53.dtsi -+++ b/arch/arm/boot/dts/imx53.dtsi -@@ -710,7 +710,7 @@ - status = "disabled"; - }; - -- sdma: sdma@63fb0000 { -+ sdma: dma-controller@63fb0000 { - compatible = "fsl,imx53-sdma", "fsl,imx35-sdma"; - reg = <0x63fb0000 0x4000>; - interrupts = <6>; -diff --git a/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts b/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts -index b4a9523e325b4..864dc5018451f 100644 ---- a/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts -+++ b/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts -@@ -297,7 +297,11 @@ - phy-mode = "rmii"; - phy-reset-gpios = <&gpio1 18 GPIO_ACTIVE_LOW>; - phy-handle = <&phy>; -- clocks = <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET>, <&rmii_clk>; -+ clocks = <&clks IMX6QDL_CLK_ENET>, -+ <&clks IMX6QDL_CLK_ENET>, -+ <&rmii_clk>, -+ <&clks IMX6QDL_CLK_ENET_REF>; -+ clock-names = "ipg", "ahb", "ptp", "enet_out"; - status = "okay"; - - mdio { -diff --git a/arch/arm/boot/dts/imx6dl-prtrvt.dts b/arch/arm/boot/dts/imx6dl-prtrvt.dts -index 5ac84445e9cc1..90e01de8c2c15 100644 ---- a/arch/arm/boot/dts/imx6dl-prtrvt.dts -+++ b/arch/arm/boot/dts/imx6dl-prtrvt.dts -@@ -126,6 +126,10 @@ - status = "disabled"; - }; - -+&usbotg { -+ disable-over-current; -+}; -+ - &vpu { - status = "disabled"; - }; -diff --git a/arch/arm/boot/dts/imx6dl-rex-basic.dts b/arch/arm/boot/dts/imx6dl-rex-basic.dts -index 0f1616bfa9a80..b72f8ea1e6f6c 100644 ---- a/arch/arm/boot/dts/imx6dl-rex-basic.dts -+++ b/arch/arm/boot/dts/imx6dl-rex-basic.dts -@@ -19,7 +19,7 @@ - }; - - &ecspi3 { -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "sst,sst25vf016b", "jedec,spi-nor"; - spi-max-frequency = <20000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi -index fdd81fdc3f357..cd3183c36488a 100644 ---- a/arch/arm/boot/dts/imx6dl.dtsi -+++ b/arch/arm/boot/dts/imx6dl.dtsi -@@ -84,6 +84,9 @@ - ocram: sram@900000 { - compatible = "mmio-sram"; - reg = <0x00900000 0x20000>; -+ ranges = <0 0x00900000 0x20000>; -+ #address-cells = <1>; -+ #size-cells = <1>; - clocks = <&clks IMX6QDL_CLK_OCRAM>; - }; - -diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi -index 6330d75f8f390..f266f1b7e0cfc 100644 ---- a/arch/arm/boot/dts/imx6q-ba16.dtsi -+++ b/arch/arm/boot/dts/imx6q-ba16.dtsi -@@ -142,7 +142,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: n25q032@0 { -+ flash: flash@0 { - compatible = "jedec,spi-nor"; - #address-cells = <1>; - #size-cells = <1>; -diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi -index 10922375c51e1..ead83091e193a 100644 ---- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi -+++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi -@@ -160,7 +160,7 @@ - pinctrl-0 = <&pinctrl_ecspi5>; - status = "okay"; - -- m25_eeprom: m25p80@0 { -+ m25_eeprom: flash@0 { - compatible = "atmel,at25"; - spi-max-frequency = <10000000>; - size = <0x8000>; -diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts -index bfb530f29d9de..1ad41c944b4b9 100644 ---- a/arch/arm/boot/dts/imx6q-cm-fx6.dts -+++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts -@@ -260,7 +260,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- m25p80@0 { -+ flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "st,m25p", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts -index c713ac03b3b92..9591848cbd37c 100644 ---- a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts -+++ b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts -@@ -102,7 +102,7 @@ - cs-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "m25p80", "jedec,spi-nor"; - spi-max-frequency = <40000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6q-dms-ba16.dts b/arch/arm/boot/dts/imx6q-dms-ba16.dts -index 48fb47e715f6d..137db38f0d27b 100644 ---- a/arch/arm/boot/dts/imx6q-dms-ba16.dts -+++ b/arch/arm/boot/dts/imx6q-dms-ba16.dts -@@ -47,7 +47,7 @@ - pinctrl-0 = <&pinctrl_ecspi5>; - status = "okay"; - -- m25_eeprom: m25p80@0 { -+ m25_eeprom: flash@0 { - compatible = "atmel,at25256B", "atmel,at25"; - spi-max-frequency = <20000000>; - size = <0x8000>; -diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts -index 4cde45d5c90c8..e894faba571f9 100644 ---- a/arch/arm/boot/dts/imx6q-gw5400-a.dts -+++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts -@@ -137,7 +137,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "sst,w25q256", "jedec,spi-nor"; - spi-max-frequency = <30000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6q-marsboard.dts b/arch/arm/boot/dts/imx6q-marsboard.dts -index 05ee283882290..cc18010023942 100644 ---- a/arch/arm/boot/dts/imx6q-marsboard.dts -+++ b/arch/arm/boot/dts/imx6q-marsboard.dts -@@ -100,7 +100,7 @@ - cs-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; - status = "okay"; - -- m25p80@0 { -+ flash@0 { - compatible = "microchip,sst25vf016b"; - spi-max-frequency = <20000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6q-prti6q.dts b/arch/arm/boot/dts/imx6q-prti6q.dts -index b4605edfd2ab8..d8fa83effd638 100644 ---- a/arch/arm/boot/dts/imx6q-prti6q.dts -+++ b/arch/arm/boot/dts/imx6q-prti6q.dts -@@ -364,8 +364,8 @@ - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_wifi>; - interrupts-extended = <&gpio1 30 IRQ_TYPE_LEVEL_HIGH>; -- ref-clock-frequency = "38400000"; -- tcxo-clock-frequency = "19200000"; -+ ref-clock-frequency = <38400000>; -+ tcxo-clock-frequency = <19200000>; - }; - }; - -diff --git a/arch/arm/boot/dts/imx6q-rex-pro.dts b/arch/arm/boot/dts/imx6q-rex-pro.dts -index 1767e1a3cd53a..271f4b2d9b9f0 100644 ---- a/arch/arm/boot/dts/imx6q-rex-pro.dts -+++ b/arch/arm/boot/dts/imx6q-rex-pro.dts -@@ -19,7 +19,7 @@ - }; - - &ecspi3 { -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "sst,sst25vf032b", "jedec,spi-nor"; - spi-max-frequency = <20000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi -index 9caba4529c718..a8069e0a8fe82 100644 ---- a/arch/arm/boot/dts/imx6q.dtsi -+++ b/arch/arm/boot/dts/imx6q.dtsi -@@ -163,6 +163,9 @@ - ocram: sram@900000 { - compatible = "mmio-sram"; - reg = <0x00900000 0x40000>; -+ ranges = <0 0x00900000 0x40000>; -+ #address-cells = <1>; -+ #size-cells = <1>; - clocks = <&clks IMX6QDL_CLK_OCRAM>; - }; - -diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi -index 30fa349f9d054..a696873dc1abe 100644 ---- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi -@@ -286,6 +286,8 @@ - codec: sgtl5000@a { - compatible = "fsl,sgtl5000"; - reg = <0x0a>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&pinctrl_sgtl5000>; - clocks = <&clks IMX6QDL_CLK_CKO>; - VDDA-supply = <®_module_3v3_audio>; - VDDIO-supply = <®_module_3v3>; -@@ -516,8 +518,6 @@ - MX6QDL_PAD_DISP0_DAT21__AUD4_TXD 0x130b0 - MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0 - MX6QDL_PAD_DISP0_DAT23__AUD4_RXD 0x130b0 -- /* SGTL5000 sys_mclk */ -- MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0 - >; - }; - -@@ -810,6 +810,12 @@ - >; - }; - -+ pinctrl_sgtl5000: sgtl5000grp { -+ fsl,pins = < -+ MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0 -+ >; -+ }; -+ - pinctrl_spdif: spdifgrp { - fsl,pins = < - MX6QDL_PAD_GPIO_16__SPDIF_IN 0x1b0b0 -diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi -index e21f6ac864e54..baa197c90060e 100644 ---- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi -@@ -96,7 +96,7 @@ - pinctrl-0 = <&pinctrl_ecspi4>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "micron,n25q128a11", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi -index 563bf9d44fe0d..2ba577e602e7f 100644 ---- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi -@@ -131,7 +131,7 @@ - pinctrl-0 = <&pinctrl_ecspi4>; - status = "okay"; - -- flash: m25p80@1 { -+ flash: flash@1 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "micron,n25q128a11", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/imx6qdl-colibri.dtsi -index 4e2a309c93fa8..1e86b38147080 100644 ---- a/arch/arm/boot/dts/imx6qdl-colibri.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-colibri.dtsi -@@ -1,6 +1,6 @@ - // SPDX-License-Identifier: GPL-2.0+ OR MIT - /* -- * Copyright 2014-2020 Toradex -+ * Copyright 2014-2022 Toradex - * Copyright 2012 Freescale Semiconductor, Inc. - * Copyright 2011 Linaro Ltd. - */ -@@ -132,7 +132,7 @@ - clock-frequency = <100000>; - pinctrl-names = "default", "gpio"; - pinctrl-0 = <&pinctrl_i2c2>; -- pinctrl-0 = <&pinctrl_i2c2_gpio>; -+ pinctrl-1 = <&pinctrl_i2c2_gpio>; - scl-gpios = <&gpio2 30 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; - sda-gpios = <&gpio3 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; - status = "okay"; -@@ -488,7 +488,7 @@ - >; - }; - -- pinctrl_i2c2_gpio: i2c2grp { -+ pinctrl_i2c2_gpio: i2c2gpiogrp { - fsl,pins = < - MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x4001b8b1 - MX6QDL_PAD_EIM_D16__GPIO3_IO16 0x4001b8b1 -diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi -index 648f5fcb72e65..2c1d6f28e6950 100644 ---- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi -@@ -35,7 +35,7 @@ - pinctrl-0 = <&pinctrl_ecspi3>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "sst,sst25vf040b", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi -index 4bc4371e6bae5..4b81a975c979d 100644 ---- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi -@@ -632,7 +632,6 @@ - &uart1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_uart1>; -- uart-has-rtscts; - rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; - status = "okay"; - }; -diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi -index 68e5ab2e27e22..6bb4855d13ce5 100644 ---- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi -@@ -29,7 +29,7 @@ - - user-pb { - label = "user_pb"; -- gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; -+ gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; - linux,code = ; - }; - -diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi -index 8e23cec7149e5..696427b487f01 100644 ---- a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi -@@ -26,7 +26,7 @@ - - user-pb { - label = "user_pb"; -- gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; -+ gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; - linux,code = ; - }; - -diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi -index b167b33bd108d..683f6e58ab230 100644 ---- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi -@@ -51,16 +51,6 @@ - vin-supply = <®_3p3v_s5>; - }; - -- reg_3p3v_s0: regulator-3p3v-s0 { -- compatible = "regulator-fixed"; -- regulator-name = "V_3V3_S0"; -- regulator-min-microvolt = <3300000>; -- regulator-max-microvolt = <3300000>; -- regulator-always-on; -- regulator-boot-on; -- vin-supply = <®_3p3v_s5>; -- }; -- - reg_3p3v_s5: regulator-3p3v-s5 { - compatible = "regulator-fixed"; - regulator-name = "V_3V3_S5"; -@@ -258,8 +248,8 @@ - status = "okay"; - - /* default boot source: workaround #1 for errata ERR006282 */ -- smarc_flash: spi-flash@0 { -- compatible = "winbond,w25q16dw", "jedec,spi-nor"; -+ smarc_flash: flash@0 { -+ compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <20000000>; - }; -@@ -273,6 +263,10 @@ - phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; - }; - -+&hdmi { -+ ddc-i2c-bus = <&i2c2>; -+}; -+ - &i2c_intern { - pmic@8 { - compatible = "fsl,pfuze100"; -@@ -397,7 +391,7 @@ - - /* HDMI_CTRL */ - &i2c2 { -- clock-frequency = <375000>; -+ clock-frequency = <100000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c2>; - }; -diff --git a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi -index ac34709e97413..0ad4cb4f1e828 100644 ---- a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi -@@ -179,7 +179,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "microchip,sst25vf016b"; - spi-max-frequency = <20000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi -index c96f4d7e1e0d8..beaa2dcd436ce 100644 ---- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi -@@ -321,7 +321,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "microchip,sst25vf016b"; - spi-max-frequency = <20000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi -index 92d09a3ebe0ee..ee7e2371f94bd 100644 ---- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi -@@ -252,7 +252,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "microchip,sst25vf016b"; - spi-max-frequency = <20000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi -index 49da30d7510c4..904d5d051d63c 100644 ---- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi -@@ -237,7 +237,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "sst,sst25vf016b", "jedec,spi-nor"; - spi-max-frequency = <20000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi -index 19578f660b092..70dfa07a16981 100644 ---- a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi -@@ -69,6 +69,7 @@ - vbus-supply = <®_usb_h1_vbus>; - phy_type = "utmi"; - dr_mode = "host"; -+ disable-over-current; - status = "okay"; - }; - -@@ -78,10 +79,18 @@ - pinctrl-0 = <&pinctrl_usbotg>; - phy_type = "utmi"; - dr_mode = "host"; -- disable-over-current; -+ over-current-active-low; - status = "okay"; - }; - -+&usbphynop1 { -+ status = "disabled"; -+}; -+ -+&usbphynop2 { -+ status = "disabled"; -+}; -+ - &usdhc1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usdhc1>; -diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi -index 5e58740d40c5b..1368a47620372 100644 ---- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi -@@ -272,7 +272,7 @@ - pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1_cs>; - status = "disabled"; /* pin conflict with WEIM NOR */ - -- flash: m25p80@0 { -+ flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "st,m25p32", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi -index eb9a0b104f1c3..901b9a761b66e 100644 ---- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi -@@ -313,7 +313,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "sst,sst25vf016b", "jedec,spi-nor"; - spi-max-frequency = <20000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi -index 0c0105468a2fe..37482a9023fce 100644 ---- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi -@@ -197,7 +197,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "st,m25p32", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi -index fded07f370b39..d6ba4b2a60f6f 100644 ---- a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi -@@ -226,7 +226,7 @@ - reg = <0x28>; - #gpio-cells = <2>; - gpio-controller; -- ngpio = <32>; -+ ngpios = <62>; - }; - - sgtl5000: codec@a { -diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi -index d07d8f83456d2..ccfa8e320be62 100644 ---- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi -@@ -5,6 +5,8 @@ - * Author: Fabio Estevam - */ - -+#include -+ - / { - aliases { - backlight = &backlight; -@@ -226,6 +228,7 @@ - MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059 - MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059 - MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059 -+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0 - >; - }; - -@@ -304,7 +307,7 @@ - &usdhc3 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usdhc3>; -- non-removable; -+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; - status = "okay"; - }; - -diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi -index b62a0dbb033ff..ec6fba5ee8fde 100644 ---- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi -@@ -309,6 +309,7 @@ - - ethphy: ethernet-phy@1 { - reg = <1>; -+ qca,clk-out-frequency = <125000000>; - }; - }; - }; -diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi -index 89c342f3a7c2f..8b6327e64819c 100644 ---- a/arch/arm/boot/dts/imx6qdl.dtsi -+++ b/arch/arm/boot/dts/imx6qdl.dtsi -@@ -150,7 +150,7 @@ - interrupt-parent = <&gpc>; - ranges; - -- dma_apbh: dma-apbh@110000 { -+ dma_apbh: dma-controller@110000 { - compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh"; - reg = <0x00110000 0x2000>; - interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>, -@@ -763,7 +763,7 @@ - regulator-name = "vddpu"; - regulator-min-microvolt = <725000>; - regulator-max-microvolt = <1450000>; -- regulator-enable-ramp-delay = <150>; -+ regulator-enable-ramp-delay = <380>; - anatop-reg-offset = <0x140>; - anatop-vol-bit-shift = <9>; - anatop-vol-bit-width = <5>; -@@ -930,7 +930,7 @@ - interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>; - }; - -- sdma: sdma@20ec000 { -+ sdma: dma-controller@20ec000 { - compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma"; - reg = <0x020ec000 0x4000>; - interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; -diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi -index b310f13a53f22..4d23c92aa8a6b 100644 ---- a/arch/arm/boot/dts/imx6qp.dtsi -+++ b/arch/arm/boot/dts/imx6qp.dtsi -@@ -9,12 +9,18 @@ - ocram2: sram@940000 { - compatible = "mmio-sram"; - reg = <0x00940000 0x20000>; -+ ranges = <0 0x00940000 0x20000>; -+ #address-cells = <1>; -+ #size-cells = <1>; - clocks = <&clks IMX6QDL_CLK_OCRAM>; - }; - - ocram3: sram@960000 { - compatible = "mmio-sram"; - reg = <0x00960000 0x20000>; -+ ranges = <0 0x00960000 0x20000>; -+ #address-cells = <1>; -+ #size-cells = <1>; - clocks = <&clks IMX6QDL_CLK_OCRAM>; - }; - -diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts -index 25f6f2fb1555e..f16c830f1e918 100644 ---- a/arch/arm/boot/dts/imx6sl-evk.dts -+++ b/arch/arm/boot/dts/imx6sl-evk.dts -@@ -137,7 +137,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "st,m25p32", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts -index a17b8bbbdb956..f2231cb1e32df 100644 ---- a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts -+++ b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts -@@ -597,6 +597,7 @@ - - &usbotg1 { - pinctrl-names = "default"; -+ pinctrl-0 = <&pinctrl_usbotg1>; - disable-over-current; - srp-disable; - hnp-disable; -diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi -index 997b96c1c47b9..0e0139246ad21 100644 ---- a/arch/arm/boot/dts/imx6sl.dtsi -+++ b/arch/arm/boot/dts/imx6sl.dtsi -@@ -117,6 +117,9 @@ - ocram: sram@900000 { - compatible = "mmio-sram"; - reg = <0x00900000 0x20000>; -+ ranges = <0 0x00900000 0x20000>; -+ #address-cells = <1>; -+ #size-cells = <1>; - clocks = <&clks IMX6SL_CLK_OCRAM>; - }; - -@@ -749,7 +752,7 @@ - interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>; - }; - -- sdma: sdma@20ec000 { -+ sdma: dma-controller@20ec000 { - compatible = "fsl,imx6sl-sdma", "fsl,imx6q-sdma"; - reg = <0x020ec000 0x4000>; - interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; -diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi -index 04f8d637a5019..3659fd5ecfa62 100644 ---- a/arch/arm/boot/dts/imx6sll.dtsi -+++ b/arch/arm/boot/dts/imx6sll.dtsi -@@ -51,20 +51,18 @@ - device_type = "cpu"; - reg = <0>; - next-level-cache = <&L2>; -- operating-points = < -+ operating-points = - /* kHz uV */ -- 996000 1275000 -- 792000 1175000 -- 396000 1075000 -- 198000 975000 -- >; -- fsl,soc-operating-points = < -+ <996000 1275000>, -+ <792000 1175000>, -+ <396000 1075000>, -+ <198000 975000>; -+ fsl,soc-operating-points = - /* ARM kHz SOC-PU uV */ -- 996000 1175000 -- 792000 1175000 -- 396000 1175000 -- 198000 1175000 -- >; -+ <996000 1175000>, -+ <792000 1175000>, -+ <396000 1175000>, -+ <198000 1175000>; - clock-latency = <61036>; /* two CLK32 periods */ - #cooling-cells = <2>; - clocks = <&clks IMX6SLL_CLK_ARM>, -@@ -117,6 +115,9 @@ - ocram: sram@900000 { - compatible = "mmio-sram"; - reg = <0x00900000 0x20000>; -+ ranges = <0 0x00900000 0x20000>; -+ #address-cells = <1>; -+ #size-cells = <1>; - }; - - intc: interrupt-controller@a01000 { -@@ -551,7 +552,7 @@ - reg = <0x020ca000 0x1000>; - interrupts = ; - clocks = <&clks IMX6SLL_CLK_USBPHY2>; -- phy-reg_3p0-supply = <®_3p0>; -+ phy-3p0-supply = <®_3p0>; - fsl,anatop = <&anatop>; - }; - -diff --git a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts -index 66af78e83b701..a2c79bcf9a11c 100644 ---- a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts -+++ b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts -@@ -107,7 +107,7 @@ - pinctrl-0 = <&pinctrl_ecspi1>; - status = "okay"; - -- flash: m25p80@0 { -+ flash: flash@0 { - compatible = "microchip,sst25vf016b"; - spi-max-frequency = <20000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6sx-sdb-reva.dts b/arch/arm/boot/dts/imx6sx-sdb-reva.dts -index dce5dcf96c255..7dda42553f4bc 100644 ---- a/arch/arm/boot/dts/imx6sx-sdb-reva.dts -+++ b/arch/arm/boot/dts/imx6sx-sdb-reva.dts -@@ -123,7 +123,7 @@ - pinctrl-0 = <&pinctrl_qspi2>; - status = "okay"; - -- flash0: s25fl128s@0 { -+ flash0: flash@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; -@@ -133,7 +133,7 @@ - spi-tx-bus-width = <4>; - }; - -- flash1: s25fl128s@2 { -+ flash1: flash@2 { - reg = <2>; - #address-cells = <1>; - #size-cells = <1>; -diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts -index 99f4cf777a384..969cfe920d252 100644 ---- a/arch/arm/boot/dts/imx6sx-sdb.dts -+++ b/arch/arm/boot/dts/imx6sx-sdb.dts -@@ -108,7 +108,7 @@ - pinctrl-0 = <&pinctrl_qspi2>; - status = "okay"; - -- flash0: n25q256a@0 { -+ flash0: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "micron,n25q256a", "jedec,spi-nor"; -@@ -118,7 +118,7 @@ - reg = <0>; - }; - -- flash1: n25q256a@2 { -+ flash1: flash@2 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "micron,n25q256a", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi -index 8516730778df8..7a3d85e7a5fa7 100644 ---- a/arch/arm/boot/dts/imx6sx.dtsi -+++ b/arch/arm/boot/dts/imx6sx.dtsi -@@ -164,12 +164,18 @@ - ocram_s: sram@8f8000 { - compatible = "mmio-sram"; - reg = <0x008f8000 0x4000>; -+ ranges = <0 0x008f8000 0x4000>; -+ #address-cells = <1>; -+ #size-cells = <1>; - clocks = <&clks IMX6SX_CLK_OCRAM_S>; - }; - - ocram: sram@900000 { - compatible = "mmio-sram"; - reg = <0x00900000 0x20000>; -+ ranges = <0 0x00900000 0x20000>; -+ #address-cells = <1>; -+ #size-cells = <1>; - clocks = <&clks IMX6SX_CLK_OCRAM>; - }; - -@@ -203,7 +209,7 @@ - power-domains = <&pd_pu>; - }; - -- dma_apbh: dma-apbh@1804000 { -+ dma_apbh: dma-controller@1804000 { - compatible = "fsl,imx6sx-dma-apbh", "fsl,imx28-dma-apbh"; - reg = <0x01804000 0x2000>; - interrupts = , -@@ -842,7 +848,7 @@ - reg = <0x020e4000 0x4000>; - }; - -- sdma: sdma@20ec000 { -+ sdma: dma-controller@20ec000 { - compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma"; - reg = <0x020ec000 0x4000>; - interrupts = ; -@@ -975,6 +981,8 @@ - <&clks IMX6SX_CLK_USDHC1>; - clock-names = "ipg", "ahb", "per"; - bus-width = <4>; -+ fsl,tuning-start-tap = <20>; -+ fsl,tuning-step= <2>; - status = "disabled"; - }; - -@@ -987,6 +995,8 @@ - <&clks IMX6SX_CLK_USDHC2>; - clock-names = "ipg", "ahb", "per"; - bus-width = <4>; -+ fsl,tuning-start-tap = <20>; -+ fsl,tuning-step= <2>; - status = "disabled"; - }; - -@@ -999,6 +1009,8 @@ - <&clks IMX6SX_CLK_USDHC3>; - clock-names = "ipg", "ahb", "per"; - bus-width = <4>; -+ fsl,tuning-start-tap = <20>; -+ fsl,tuning-step= <2>; - status = "disabled"; - }; - -diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi -index a3fde3316c736..1a18c41ce385a 100644 ---- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi -+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi -@@ -286,7 +286,7 @@ - pinctrl-0 = <&pinctrl_qspi>; - status = "okay"; - -- flash0: n25q256a@0 { -+ flash0: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "micron,n25q256a", "jedec,spi-nor"; -diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi -index 47d3ce5d255fa..acd936540d898 100644 ---- a/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi -+++ b/arch/arm/boot/dts/imx6ul-kontron-n6310-som.dtsi -@@ -19,7 +19,7 @@ - }; - - &qspi { -- spi-flash@0 { -+ flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "spi-nand"; -diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi -index a095a7654ac65..29ed38dce5802 100644 ---- a/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi -+++ b/arch/arm/boot/dts/imx6ul-kontron-n6311-som.dtsi -@@ -18,7 +18,7 @@ - }; - - &qspi { -- spi-flash@0 { -+ flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "spi-nand"; -diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi -index 2a449a3c1ae27..09a83dbdf6510 100644 ---- a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi -+++ b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi -@@ -19,7 +19,7 @@ - pinctrl-0 = <&pinctrl_ecspi2>; - status = "okay"; - -- spi-flash@0 { -+ flash@0 { - compatible = "mxicy,mx25v8035f", "jedec,spi-nor"; - spi-max-frequency = <50000000>; - reg = <0>; -diff --git a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts -index 162dc259edc8c..5a74c7f68eb62 100644 ---- a/arch/arm/boot/dts/imx6ul-pico-dwarf.dts -+++ b/arch/arm/boot/dts/imx6ul-pico-dwarf.dts -@@ -32,7 +32,7 @@ - }; - - &i2c2 { -- clock_frequency = <100000>; -+ clock-frequency = <100000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c2>; - status = "okay"; -diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi -index afeec01f65228..ad92409349fa2 100644 ---- a/arch/arm/boot/dts/imx6ul.dtsi -+++ b/arch/arm/boot/dts/imx6ul.dtsi -@@ -64,20 +64,18 @@ - clock-frequency = <696000000>; - clock-latency = <61036>; /* two CLK32 periods */ - #cooling-cells = <2>; -- operating-points = < -+ operating-points = - /* kHz uV */ -- 696000 1275000 -- 528000 1175000 -- 396000 1025000 -- 198000 950000 -- >; -- fsl,soc-operating-points = < -+ <696000 1275000>, -+ <528000 1175000>, -+ <396000 1025000>, -+ <198000 950000>; -+ fsl,soc-operating-points = - /* KHz uV */ -- 696000 1275000 -- 528000 1175000 -- 396000 1175000 -- 198000 1175000 -- >; -+ <696000 1275000>, -+ <528000 1175000>, -+ <396000 1175000>, -+ <198000 1175000>; - clocks = <&clks IMX6UL_CLK_ARM>, - <&clks IMX6UL_CLK_PLL2_BUS>, - <&clks IMX6UL_CLK_PLL2_PFD2>, -@@ -149,6 +147,9 @@ - ocram: sram@900000 { - compatible = "mmio-sram"; - reg = <0x00900000 0x20000>; -+ ranges = <0 0x00900000 0x20000>; -+ #address-cells = <1>; -+ #size-cells = <1>; - }; - - intc: interrupt-controller@a01000 { -@@ -163,7 +164,7 @@ - <0x00a06000 0x2000>; - }; - -- dma_apbh: dma-apbh@1804000 { -+ dma_apbh: dma-controller@1804000 { - compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh"; - reg = <0x01804000 0x2000>; - interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>, -@@ -543,7 +544,7 @@ - }; - - kpp: keypad@20b8000 { -- compatible = "fsl,imx6ul-kpp", "fsl,imx6q-kpp", "fsl,imx21-kpp"; -+ compatible = "fsl,imx6ul-kpp", "fsl,imx21-kpp"; - reg = <0x020b8000 0x4000>; - interrupts = ; - clocks = <&clks IMX6UL_CLK_KPP>; -@@ -743,7 +744,7 @@ - status = "disabled"; - }; - -- sdma: sdma@20ec000 { -+ sdma: dma-controller@20ec000 { - compatible = "fsl,imx6ul-sdma", "fsl,imx6q-sdma", - "fsl,imx35-sdma"; - reg = <0x020ec000 0x4000>; -@@ -998,7 +999,7 @@ - }; - - csi: csi@21c4000 { -- compatible = "fsl,imx6ul-csi", "fsl,imx7-csi"; -+ compatible = "fsl,imx6ul-csi"; - reg = <0x021c4000 0x4000>; - interrupts = ; - clocks = <&clks IMX6UL_CLK_CSI>; -@@ -1007,7 +1008,7 @@ - }; - - lcdif: lcdif@21c8000 { -- compatible = "fsl,imx6ul-lcdif", "fsl,imx28-lcdif"; -+ compatible = "fsl,imx6ul-lcdif", "fsl,imx6sx-lcdif"; - reg = <0x021c8000 0x4000>; - interrupts = ; - clocks = <&clks IMX6UL_CLK_LCDIF_PIX>, -@@ -1028,7 +1029,7 @@ - qspi: spi@21e0000 { - #address-cells = <1>; - #size-cells = <0>; -- compatible = "fsl,imx6ul-qspi", "fsl,imx6sx-qspi"; -+ compatible = "fsl,imx6ul-qspi"; - reg = <0x021e0000 0x4000>, <0x60000000 0x10000000>; - reg-names = "QuadSPI", "QuadSPI-memory"; - interrupts = ; -diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi -index 0cdbf7b6e7285..b6fc879e9dbe6 100644 ---- a/arch/arm/boot/dts/imx6ull-colibri.dtsi -+++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi -@@ -37,7 +37,7 @@ - - reg_sd1_vmmc: regulator-sd1-vmmc { - compatible = "regulator-gpio"; -- gpio = <&gpio5 9 GPIO_ACTIVE_HIGH>; -+ gpios = <&gpio5 9 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_snvs_reg_sd>; - regulator-always-on; -diff --git a/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi b/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi -index b7e984284e1ad..d000606c07049 100644 ---- a/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi -+++ b/arch/arm/boot/dts/imx6ull-kontron-n6411-som.dtsi -@@ -18,7 +18,7 @@ - }; - - &qspi { -- spi-flash@0 { -+ flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "spi-nand"; -diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h -index eb025a9d47592..7328d4ef8559f 100644 ---- a/arch/arm/boot/dts/imx6ull-pinfunc.h -+++ b/arch/arm/boot/dts/imx6ull-pinfunc.h -@@ -82,6 +82,6 @@ - #define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0 - #define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0 - #define MX6ULL_PAD_CSI_DATA06__ESAI_TX5_RX0 0x01FC 0x0488 0x0000 0x9 0x0 --#define MX6ULL_PAD_CSI_DATA07__ESAI_T0 0x0200 0x048C 0x0000 0x9 0x0 -+#define MX6ULL_PAD_CSI_DATA07__ESAI_TX0 0x0200 0x048C 0x0000 0x9 0x0 - - #endif /* __DTS_IMX6ULL_PINFUNC_H */ -diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi -index 62b771c1d5a9a..f1c60b0cb143e 100644 ---- a/arch/arm/boot/dts/imx7-colibri.dtsi -+++ b/arch/arm/boot/dts/imx7-colibri.dtsi -@@ -40,7 +40,7 @@ - - dailink_master: simple-audio-card,codec { - sound-dai = <&codec>; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - }; - }; - }; -@@ -293,7 +293,7 @@ - compatible = "fsl,sgtl5000"; - #sound-dai-cells = <0>; - reg = <0x0a>; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_sai1_mclk>; - VDDA-supply = <®_module_3v3_avdd>; -diff --git a/arch/arm/boot/dts/imx7-mba7.dtsi b/arch/arm/boot/dts/imx7-mba7.dtsi -index 5e6bef230dc75..b55a7792a8391 100644 ---- a/arch/arm/boot/dts/imx7-mba7.dtsi -+++ b/arch/arm/boot/dts/imx7-mba7.dtsi -@@ -264,7 +264,7 @@ - tlv320aic32x4: audio-codec@18 { - compatible = "ti,tlv320aic32x4"; - reg = <0x18>; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - clock-names = "mclk"; - ldoin-supply = <®_audio_3v3>; - iov-supply = <®_audio_3v3>; -diff --git a/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi b/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi -index af39e5370fa12..045e4413d3390 100644 ---- a/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi -+++ b/arch/arm/boot/dts/imx7d-colibri-emmc.dtsi -@@ -13,6 +13,10 @@ - }; - }; - -+&cpu1 { -+ cpu-supply = <®_DCDC2>; -+}; -+ - &gpio6 { - gpio-line-names = "", - "", -diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts -index e0751e6ba3c0f..a31de900139d6 100644 ---- a/arch/arm/boot/dts/imx7d-nitrogen7.dts -+++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts -@@ -288,7 +288,7 @@ - codec: wm8960@1a { - compatible = "wlf,wm8960"; - reg = <0x1a>; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - clock-names = "mclk"; - wlf,shared-lrclk; - }; -diff --git a/arch/arm/boot/dts/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/imx7d-pico-dwarf.dts -index 5162fe227d1ea..fdc10563f1473 100644 ---- a/arch/arm/boot/dts/imx7d-pico-dwarf.dts -+++ b/arch/arm/boot/dts/imx7d-pico-dwarf.dts -@@ -32,7 +32,7 @@ - }; - - &i2c1 { -- clock_frequency = <100000>; -+ clock-frequency = <100000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c1>; - status = "okay"; -@@ -52,7 +52,7 @@ - }; - - &i2c4 { -- clock_frequency = <100000>; -+ clock-frequency = <100000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c1>; - status = "okay"; -diff --git a/arch/arm/boot/dts/imx7d-pico-hobbit.dts b/arch/arm/boot/dts/imx7d-pico-hobbit.dts -index 7b2198a9372c6..6ad39dca70096 100644 ---- a/arch/arm/boot/dts/imx7d-pico-hobbit.dts -+++ b/arch/arm/boot/dts/imx7d-pico-hobbit.dts -@@ -31,7 +31,7 @@ - - dailink_master: simple-audio-card,codec { - sound-dai = <&sgtl5000>; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - }; - }; - }; -@@ -41,7 +41,7 @@ - #sound-dai-cells = <0>; - reg = <0x0a>; - compatible = "fsl,sgtl5000"; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - VDDA-supply = <®_2p5v>; - VDDIO-supply = <®_vref_1v8>; - }; -@@ -64,7 +64,7 @@ - interrupt-parent = <&gpio2>; - interrupts = <7 0>; - spi-max-frequency = <1000000>; -- pendown-gpio = <&gpio2 7 0>; -+ pendown-gpio = <&gpio2 7 GPIO_ACTIVE_LOW>; - vcc-supply = <®_3p3v>; - ti,x-min = /bits/ 16 <0>; - ti,x-max = /bits/ 16 <4095>; -diff --git a/arch/arm/boot/dts/imx7d-pico-nymph.dts b/arch/arm/boot/dts/imx7d-pico-nymph.dts -index 104a85254adbb..5afb1674e0125 100644 ---- a/arch/arm/boot/dts/imx7d-pico-nymph.dts -+++ b/arch/arm/boot/dts/imx7d-pico-nymph.dts -@@ -43,7 +43,7 @@ - }; - - &i2c1 { -- clock_frequency = <100000>; -+ clock-frequency = <100000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c1>; - status = "okay"; -@@ -64,7 +64,7 @@ - }; - - &i2c2 { -- clock_frequency = <100000>; -+ clock-frequency = <100000>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c2>; - status = "okay"; -diff --git a/arch/arm/boot/dts/imx7d-pico-pi.dts b/arch/arm/boot/dts/imx7d-pico-pi.dts -index 70bea95c06d83..f263e391e24cb 100644 ---- a/arch/arm/boot/dts/imx7d-pico-pi.dts -+++ b/arch/arm/boot/dts/imx7d-pico-pi.dts -@@ -31,7 +31,7 @@ - - dailink_master: simple-audio-card,codec { - sound-dai = <&sgtl5000>; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - }; - }; - }; -@@ -41,7 +41,7 @@ - #sound-dai-cells = <0>; - reg = <0x0a>; - compatible = "fsl,sgtl5000"; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - VDDA-supply = <®_2p5v>; - VDDIO-supply = <®_vref_1v8>; - }; -diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts -index 4a0d83784d7d1..4e62ed2df11dd 100644 ---- a/arch/arm/boot/dts/imx7d-sdb.dts -+++ b/arch/arm/boot/dts/imx7d-sdb.dts -@@ -205,13 +205,8 @@ - pinctrl-0 = <&pinctrl_tsc2046_pendown>; - interrupt-parent = <&gpio2>; - interrupts = <29 0>; -- pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>; -- ti,x-min = /bits/ 16 <0>; -- ti,x-max = /bits/ 16 <0>; -- ti,y-min = /bits/ 16 <0>; -- ti,y-max = /bits/ 16 <0>; -- ti,pressure-max = /bits/ 16 <0>; -- ti,x-plate-ohms = /bits/ 16 <400>; -+ pendown-gpio = <&gpio2 29 GPIO_ACTIVE_LOW>; -+ touchscreen-max-pressure = <255>; - wakeup-source; - }; - }; -@@ -385,14 +380,14 @@ - codec: wm8960@1a { - compatible = "wlf,wm8960"; - reg = <0x1a>; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - clock-names = "mclk"; - wlf,shared-lrclk; - wlf,hp-cfg = <2 2 3>; - wlf,gpio-cfg = <1 3>; - assigned-clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_SRC>, - <&clks IMX7D_PLL_AUDIO_POST_DIV>, -- <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>; - assigned-clock-rates = <0>, <884736000>, <12288000>; - }; -diff --git a/arch/arm/boot/dts/imx7s-warp.dts b/arch/arm/boot/dts/imx7s-warp.dts -index 569bbd84e371a..558b064da743c 100644 ---- a/arch/arm/boot/dts/imx7s-warp.dts -+++ b/arch/arm/boot/dts/imx7s-warp.dts -@@ -75,7 +75,7 @@ - - dailink_master: simple-audio-card,codec { - sound-dai = <&codec>; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - }; - }; - }; -@@ -232,7 +232,7 @@ - #sound-dai-cells = <0>; - reg = <0x0a>; - compatible = "fsl,sgtl5000"; -- clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; -+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_sai1_mclk>; - VDDA-supply = <&vgen4_reg>; -diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi -index 1843fc0538709..c978aab1d0e3d 100644 ---- a/arch/arm/boot/dts/imx7s.dtsi -+++ b/arch/arm/boot/dts/imx7s.dtsi -@@ -104,6 +104,7 @@ - compatible = "usb-nop-xceiv"; - clocks = <&clks IMX7D_USB_HSIC_ROOT_CLK>; - clock-names = "main_clk"; -+ power-domains = <&pgc_hsic_phy>; - #phy-cells = <0>; - }; - -@@ -496,7 +497,7 @@ - - mux: mux-controller { - compatible = "mmio-mux"; -- #mux-control-cells = <0>; -+ #mux-control-cells = <1>; - mux-reg-masks = <0x14 0x00000010>; - }; - -@@ -1135,7 +1136,6 @@ - compatible = "fsl,imx7d-usb", "fsl,imx27-usb"; - reg = <0x30b30000 0x200>; - interrupts = ; -- power-domains = <&pgc_hsic_phy>; - clocks = <&clks IMX7D_USB_CTRL_CLK>; - fsl,usbphy = <&usbphynop3>; - fsl,usbmisc = <&usbmisc3 0>; -@@ -1166,6 +1166,8 @@ - <&clks IMX7D_USDHC1_ROOT_CLK>; - clock-names = "ipg", "ahb", "per"; - bus-width = <4>; -+ fsl,tuning-step = <2>; -+ fsl,tuning-start-tap = <20>; - status = "disabled"; - }; - -@@ -1178,6 +1180,8 @@ - <&clks IMX7D_USDHC2_ROOT_CLK>; - clock-names = "ipg", "ahb", "per"; - bus-width = <4>; -+ fsl,tuning-step = <2>; -+ fsl,tuning-start-tap = <20>; - status = "disabled"; - }; - -@@ -1190,6 +1194,8 @@ - <&clks IMX7D_USDHC3_ROOT_CLK>; - clock-names = "ipg", "ahb", "per"; - bus-width = <4>; -+ fsl,tuning-step = <2>; -+ fsl,tuning-start-tap = <20>; - status = "disabled"; - }; - -@@ -1206,7 +1212,7 @@ - status = "disabled"; - }; - -- sdma: sdma@30bd0000 { -+ sdma: dma-controller@30bd0000 { - compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma"; - reg = <0x30bd0000 0x10000>; - interrupts = ; -@@ -1239,14 +1245,13 @@ - }; - }; - -- dma_apbh: dma-apbh@33000000 { -+ dma_apbh: dma-controller@33000000 { - compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh"; - reg = <0x33000000 0x2000>; - interrupts = , - , - , - ; -- interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3"; - #dma-cells = <1>; - dma-channels = <4>; - clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>; -diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi -index b7ea37ad4e55c..bcec98b964114 100644 ---- a/arch/arm/boot/dts/imx7ulp.dtsi -+++ b/arch/arm/boot/dts/imx7ulp.dtsi -@@ -259,7 +259,7 @@ - interrupts = ; - clocks = <&pcc2 IMX7ULP_CLK_WDG1>; - assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>; -- assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>; -+ assigned-clock-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>; - timeout-sec = <40>; - }; - -diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts -index 67d1f9b24a52f..8600c0548525e 100644 ---- a/arch/arm/boot/dts/integratorap.dts -+++ b/arch/arm/boot/dts/integratorap.dts -@@ -153,6 +153,7 @@ - - pci: pciv3@62000000 { - compatible = "arm,integrator-ap-pci", "v3,v360epc-pci"; -+ device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; -diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi -index bc857676d1910..c13d2f6e1a38f 100644 ---- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi -+++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi -@@ -49,7 +49,7 @@ - lcd_backlight: backlight { - compatible = "pwm-backlight"; - -- pwms = <&pwm3 0 5000000 0>; -+ pwms = <&pwm3 0 5000000>; - brightness-levels = <0 4 8 16 32 64 128 255>; - default-brightness-level = <7>; - enable-gpios = <&gpio5 14 GPIO_ACTIVE_HIGH>; -diff --git a/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/kirkwood-lsxl.dtsi -index 7b151acb99846..88b70ba1c8fee 100644 ---- a/arch/arm/boot/dts/kirkwood-lsxl.dtsi -+++ b/arch/arm/boot/dts/kirkwood-lsxl.dtsi -@@ -10,6 +10,11 @@ - - ocp@f1000000 { - pinctrl: pin-controller@10000 { -+ /* Non-default UART pins */ -+ pmx_uart0: pmx-uart0 { -+ marvell,pins = "mpp4", "mpp5"; -+ }; -+ - pmx_power_hdd: pmx-power-hdd { - marvell,pins = "mpp10"; - marvell,function = "gpo"; -@@ -213,22 +218,11 @@ - &mdio { - status = "okay"; - -- ethphy0: ethernet-phy@0 { -- reg = <0>; -- }; -- - ethphy1: ethernet-phy@8 { - reg = <8>; - }; - }; - --ð0 { -- status = "okay"; -- ethernet0-port@0 { -- phy-handle = <ðphy0>; -- }; --}; -- - ð1 { - status = "okay"; - ethernet1-port@0 { -diff --git a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts -index 2a0a98fe67f06..3240c67e0c392 100644 ---- a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts -+++ b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts -@@ -11,3 +11,18 @@ - model = "LogicPD Zoom OMAP35xx SOM-LV Development Kit"; - compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3430", "ti,omap3"; - }; -+ -+&omap3_pmx_core2 { -+ pinctrl-names = "default"; -+ pinctrl-0 = <&hsusb2_2_pins>; -+ hsusb2_2_pins: pinmux_hsusb2_2_pins { -+ pinctrl-single,pins = < -+ OMAP3430_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */ -+ OMAP3430_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ -+ OMAP3430_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */ -+ OMAP3430_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ -+ OMAP3430_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ -+ OMAP3430_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ -+ >; -+ }; -+}; -diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts -index a604d92221a4f..c757f0d7781c1 100644 ---- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts -+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts -@@ -11,3 +11,18 @@ - model = "LogicPD Zoom DM3730 SOM-LV Development Kit"; - compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3630", "ti,omap3"; - }; -+ -+&omap3_pmx_core2 { -+ pinctrl-names = "default"; -+ pinctrl-0 = <&hsusb2_2_pins>; -+ hsusb2_2_pins: pinmux_hsusb2_2_pins { -+ pinctrl-single,pins = < -+ OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */ -+ OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ -+ OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */ -+ OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ -+ OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ -+ OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ -+ >; -+ }; -+}; -diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi -index b56524cc7fe27..55b619c99e24d 100644 ---- a/arch/arm/boot/dts/logicpd-som-lv.dtsi -+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi -@@ -265,21 +265,6 @@ - }; - }; - --&omap3_pmx_core2 { -- pinctrl-names = "default"; -- pinctrl-0 = <&hsusb2_2_pins>; -- hsusb2_2_pins: pinmux_hsusb2_2_pins { -- pinctrl-single,pins = < -- OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */ -- OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ -- OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */ -- OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ -- OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ -- OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ -- >; -- }; --}; -- - &uart2 { - interrupts-extended = <&intc 73 &omap3_pmx_core OMAP3_UART2_RX>; - pinctrl-names = "default"; -diff --git a/arch/arm/boot/dts/ls1021a-tsn.dts b/arch/arm/boot/dts/ls1021a-tsn.dts -index 9d8f0c2a8aba3..aca78b5eddf20 100644 ---- a/arch/arm/boot/dts/ls1021a-tsn.dts -+++ b/arch/arm/boot/dts/ls1021a-tsn.dts -@@ -251,7 +251,7 @@ - - flash@0 { - /* Rev. A uses 64MB flash, Rev. B & C use 32MB flash */ -- compatible = "jedec,spi-nor", "s25fl256s1", "s25fl512s"; -+ compatible = "jedec,spi-nor"; - spi-max-frequency = <20000000>; - #address-cells = <1>; - #size-cells = <1>; -diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi -index 4fce81422943b..f3b8540750b61 100644 ---- a/arch/arm/boot/dts/ls1021a.dtsi -+++ b/arch/arm/boot/dts/ls1021a.dtsi -@@ -329,39 +329,6 @@ - #thermal-sensor-cells = <1>; - }; - -- thermal-zones { -- cpu_thermal: cpu-thermal { -- polling-delay-passive = <1000>; -- polling-delay = <5000>; -- -- thermal-sensors = <&tmu 0>; -- -- trips { -- cpu_alert: cpu-alert { -- temperature = <85000>; -- hysteresis = <2000>; -- type = "passive"; -- }; -- cpu_crit: cpu-crit { -- temperature = <95000>; -- hysteresis = <2000>; -- type = "critical"; -- }; -- }; -- -- cooling-maps { -- map0 { -- trip = <&cpu_alert>; -- cooling-device = -- <&cpu0 THERMAL_NO_LIMIT -- THERMAL_NO_LIMIT>, -- <&cpu1 THERMAL_NO_LIMIT -- THERMAL_NO_LIMIT>; -- }; -- }; -- }; -- }; -- - dspi0: spi@2100000 { - compatible = "fsl,ls1021a-v1.0-dspi"; - #address-cells = <1>; -@@ -1016,4 +983,37 @@ - big-endian; - }; - }; -+ -+ thermal-zones { -+ cpu_thermal: cpu-thermal { -+ polling-delay-passive = <1000>; -+ polling-delay = <5000>; -+ -+ thermal-sensors = <&tmu 0>; -+ -+ trips { -+ cpu_alert: cpu-alert { -+ temperature = <85000>; -+ hysteresis = <2000>; -+ type = "passive"; -+ }; -+ cpu_crit: cpu-crit { -+ temperature = <95000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; -+ -+ cooling-maps { -+ map0 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu0 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>, -+ <&cpu1 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ }; -+ }; -+ }; - }; -diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi -index 3be7cba603d5a..26eaba3fa96f3 100644 ---- a/arch/arm/boot/dts/meson.dtsi -+++ b/arch/arm/boot/dts/meson.dtsi -@@ -59,7 +59,7 @@ - }; - - uart_A: serial@84c0 { -- compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; -+ compatible = "amlogic,meson6-uart"; - reg = <0x84c0 0x18>; - interrupts = ; - fifo-size = <128>; -@@ -67,7 +67,7 @@ - }; - - uart_B: serial@84dc { -- compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; -+ compatible = "amlogic,meson6-uart"; - reg = <0x84dc 0x18>; - interrupts = ; - status = "disabled"; -@@ -105,7 +105,7 @@ - }; - - uart_C: serial@8700 { -- compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; -+ compatible = "amlogic,meson6-uart"; - reg = <0x8700 0x18>; - interrupts = ; - status = "disabled"; -@@ -228,7 +228,7 @@ - }; - - uart_AO: serial@4c0 { -- compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart"; -+ compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart"; - reg = <0x4c0 0x18>; - interrupts = ; - status = "disabled"; -diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi -index f80ddc98d3a2b..72828b9d4281d 100644 ---- a/arch/arm/boot/dts/meson8.dtsi -+++ b/arch/arm/boot/dts/meson8.dtsi -@@ -736,27 +736,27 @@ - }; - - &uart_AO { -- compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; -- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>; -- clock-names = "baud", "xtal", "pclk"; -+ compatible = "amlogic,meson8-uart", "amlogic,meson-ao-uart"; -+ clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>; -+ clock-names = "xtal", "pclk", "baud"; - }; - - &uart_A { -- compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; -- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>; -- clock-names = "baud", "xtal", "pclk"; -+ compatible = "amlogic,meson8-uart"; -+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; -+ clock-names = "xtal", "pclk", "baud"; - }; - - &uart_B { -- compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; -- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>; -- clock-names = "baud", "xtal", "pclk"; -+ compatible = "amlogic,meson8-uart"; -+ clocks = <&xtal>, <&clkc CLKID_UART1>, <&clkc CLKID_CLK81>; -+ clock-names = "xtal", "pclk", "baud"; - }; - - &uart_C { -- compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; -- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>; -- clock-names = "baud", "xtal", "pclk"; -+ compatible = "amlogic,meson8-uart"; -+ clocks = <&xtal>, <&clkc CLKID_UART2>, <&clkc CLKID_CLK81>; -+ clock-names = "xtal", "pclk", "baud"; - }; - - &usb0 { -diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi -index b49b7cbaed4ee..cfd4a909a7a70 100644 ---- a/arch/arm/boot/dts/meson8b.dtsi -+++ b/arch/arm/boot/dts/meson8b.dtsi -@@ -724,27 +724,27 @@ - }; - - &uart_AO { -- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; -- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>; -- clock-names = "baud", "xtal", "pclk"; -+ compatible = "amlogic,meson8b-uart", "amlogic,meson-ao-uart"; -+ clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>; -+ clock-names = "xtal", "pclk", "baud"; - }; - - &uart_A { -- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; -- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>; -- clock-names = "baud", "xtal", "pclk"; -+ compatible = "amlogic,meson8b-uart"; -+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; -+ clock-names = "xtal", "pclk", "baud"; - }; - - &uart_B { -- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; -- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>; -- clock-names = "baud", "xtal", "pclk"; -+ compatible = "amlogic,meson8b-uart"; -+ clocks = <&xtal>, <&clkc CLKID_UART1>, <&clkc CLKID_CLK81>; -+ clock-names = "xtal", "pclk", "baud"; - }; - - &uart_C { -- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; -- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>; -- clock-names = "baud", "xtal", "pclk"; -+ compatible = "amlogic,meson8b-uart"; -+ clocks = <&xtal>, <&clkc CLKID_UART2>, <&clkc CLKID_CLK81>; -+ clock-names = "xtal", "pclk", "baud"; - }; - - &usb0 { -diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts -index eb5291b0ee3aa..e07b807b4cec5 100644 ---- a/arch/arm/boot/dts/moxart-uc7112lx.dts -+++ b/arch/arm/boot/dts/moxart-uc7112lx.dts -@@ -79,7 +79,7 @@ - clocks = <&ref12>; - }; - --&sdhci { -+&mmc { - status = "okay"; - }; - -diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi -index f5f070a874823..764832ddfa78a 100644 ---- a/arch/arm/boot/dts/moxart.dtsi -+++ b/arch/arm/boot/dts/moxart.dtsi -@@ -93,8 +93,8 @@ - clock-names = "PCLK"; - }; - -- sdhci: sdhci@98e00000 { -- compatible = "moxa,moxart-sdhci"; -+ mmc: mmc@98e00000 { -+ compatible = "moxa,moxart-mmc"; - reg = <0x98e00000 0x5C>; - interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clk_apb>; -diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts -index eb6eb21cb2a44..33c8d5b3d679a 100644 ---- a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts -+++ b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts -@@ -366,7 +366,7 @@ - spi-max-frequency = <20000000>; - spi-rx-bus-width = <2>; - label = "bmc"; -- partitions@80000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts -index d4ff49939a3d9..bbe18618f5c56 100644 ---- a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts -+++ b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts -@@ -142,7 +142,7 @@ - reg = <0>; - spi-rx-bus-width = <2>; - -- partitions@80000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -diff --git a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts -index 82a104b2a65f1..8e3425cb8e8b9 100644 ---- a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts -+++ b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts -@@ -388,7 +388,7 @@ - spi-max-frequency = <5000000>; - spi-rx-bus-width = <2>; - label = "bmc"; -- partitions@80000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -@@ -422,7 +422,7 @@ - reg = <1>; - spi-max-frequency = <5000000>; - spi-rx-bus-width = <2>; -- partitions@88000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -@@ -447,7 +447,7 @@ - reg = <0>; - spi-max-frequency = <5000000>; - spi-rx-bus-width = <2>; -- partitions@A0000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -diff --git a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts -index 0334641f88292..cf274c926711a 100644 ---- a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts -+++ b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts -@@ -74,7 +74,7 @@ - spi-rx-bus-width = <2>; - reg = <0>; - spi-max-frequency = <5000000>; -- partitions@80000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -@@ -135,7 +135,7 @@ - spi-rx-bus-width = <2>; - reg = <0>; - spi-max-frequency = <5000000>; -- partitions@A0000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -diff --git a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts -index 767e0ac0df7c5..7fe7efee28acb 100644 ---- a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts -+++ b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts -@@ -107,7 +107,7 @@ - reg = <0>; - spi-rx-bus-width = <2>; - -- partitions@80000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -@@ -146,7 +146,7 @@ - reg = <1>; - npcm,fiu-rx-bus-width = <2>; - -- partitions@88000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -@@ -173,7 +173,7 @@ - reg = <0>; - spi-rx-bus-width = <2>; - -- partitions@A0000000 { -+ partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; -diff --git a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi -index 7f6aefd134514..e7534fe9c53cf 100644 ---- a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi -+++ b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi -@@ -29,7 +29,7 @@ - compatible = "smsc,lan9221","smsc,lan9115"; - bank-width = <2>; - -- gpmc,mux-add-data; -+ gpmc,mux-add-data = <0>; - gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <42>; - gpmc,cs-wr-off-ns = <36>; -diff --git a/arch/arm/boot/dts/omap3-beagle-ab4.dts b/arch/arm/boot/dts/omap3-beagle-ab4.dts -new file mode 100644 -index 0000000000000..990ff2d846868 ---- /dev/null -+++ b/arch/arm/boot/dts/omap3-beagle-ab4.dts -@@ -0,0 +1,47 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/dts-v1/; -+ -+#include "omap3-beagle.dts" -+ -+/ { -+ model = "TI OMAP3 BeagleBoard A to B4"; -+ compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3"; -+}; -+ -+/* -+ * Workaround for capacitor C70 issue, see "Boards revision A and < B5" -+ * section at https://elinux.org/BeagleBoard_Community -+ */ -+ -+/* Unusable as clocksource because of unreliable oscillator */ -+&counter32k { -+ status = "disabled"; -+}; -+ -+/* Unusable as clockevent because of unreliable oscillator, allow to idle */ -+&timer1_target { -+ /delete-property/ti,no-reset-on-init; -+ /delete-property/ti,no-idle; -+ timer@0 { -+ /delete-property/ti,timer-alwon; -+ }; -+}; -+ -+/* Preferred always-on timer for clocksource */ -+&timer12_target { -+ ti,no-reset-on-init; -+ ti,no-idle; -+ timer@0 { -+ /* Always clocked by secure_32k_fck */ -+ }; -+}; -+ -+/* Preferred timer for clockevent */ -+&timer2_target { -+ ti,no-reset-on-init; -+ ti,no-idle; -+ timer@0 { -+ assigned-clocks = <&gpt2_fck>; -+ assigned-clock-parents = <&sys_ck>; -+ }; -+}; -diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts -index f9f34b8458e91..0548b391334fd 100644 ---- a/arch/arm/boot/dts/omap3-beagle.dts -+++ b/arch/arm/boot/dts/omap3-beagle.dts -@@ -304,39 +304,6 @@ - phys = <0 &hsusb2_phy>; - }; - --/* Unusable as clocksource because of unreliable oscillator */ --&counter32k { -- status = "disabled"; --}; -- --/* Unusable as clockevent because if unreliable oscillator, allow to idle */ --&timer1_target { -- /delete-property/ti,no-reset-on-init; -- /delete-property/ti,no-idle; -- timer@0 { -- /delete-property/ti,timer-alwon; -- }; --}; -- --/* Preferred always-on timer for clocksource */ --&timer12_target { -- ti,no-reset-on-init; -- ti,no-idle; -- timer@0 { -- /* Always clocked by secure_32k_fck */ -- }; --}; -- --/* Preferred timer for clockevent */ --&timer2_target { -- ti,no-reset-on-init; -- ti,no-idle; -- timer@0 { -- assigned-clocks = <&gpt2_fck>; -- assigned-clock-parents = <&sys_ck>; -- }; --}; -- - &twl_gpio { - ti,use-leds; - /* pullups: BIT(1) */ -diff --git a/arch/arm/boot/dts/omap3-cm-t3x.dtsi b/arch/arm/boot/dts/omap3-cm-t3x.dtsi -index e61b8a2bfb7de..51baedf1603bd 100644 ---- a/arch/arm/boot/dts/omap3-cm-t3x.dtsi -+++ b/arch/arm/boot/dts/omap3-cm-t3x.dtsi -@@ -227,7 +227,7 @@ - - interrupt-parent = <&gpio2>; - interrupts = <25 0>; /* gpio_57 */ -- pendown-gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>; -+ pendown-gpio = <&gpio2 25 GPIO_ACTIVE_LOW>; - - ti,x-min = /bits/ 16 <0x0>; - ti,x-max = /bits/ 16 <0x0fff>; -diff --git a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi -index 2c19d6e255bdc..6883ccb45600b 100644 ---- a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi -+++ b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi -@@ -158,6 +158,24 @@ - status = "disabled"; - }; - -+/* Unusable as clockevent because if unreliable oscillator, allow to idle */ -+&timer1_target { -+ /delete-property/ti,no-reset-on-init; -+ /delete-property/ti,no-idle; -+ timer@0 { -+ /delete-property/ti,timer-alwon; -+ }; -+}; -+ -+/* Preferred timer for clockevent */ -+&timer12_target { -+ ti,no-reset-on-init; -+ ti,no-idle; -+ timer@0 { -+ /* Always clocked by secure_32k_fck */ -+ }; -+}; -+ - &twl_gpio { - ti,use-leds; - /* -diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi -index 3decc2d78a6ca..a7f99ae0c1fe9 100644 ---- a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi -+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi -@@ -54,7 +54,7 @@ - - interrupt-parent = <&gpio1>; - interrupts = <27 0>; /* gpio_27 */ -- pendown-gpio = <&gpio1 27 GPIO_ACTIVE_HIGH>; -+ pendown-gpio = <&gpio1 27 GPIO_ACTIVE_LOW>; - - ti,x-min = /bits/ 16 <0x0>; - ti,x-max = /bits/ 16 <0x0fff>; -diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts -index c2995a280729d..162d0726b0080 100644 ---- a/arch/arm/boot/dts/omap3-devkit8000.dts -+++ b/arch/arm/boot/dts/omap3-devkit8000.dts -@@ -14,36 +14,3 @@ - display2 = &tv0; - }; - }; -- --/* Unusable as clocksource because of unreliable oscillator */ --&counter32k { -- status = "disabled"; --}; -- --/* Unusable as clockevent because if unreliable oscillator, allow to idle */ --&timer1_target { -- /delete-property/ti,no-reset-on-init; -- /delete-property/ti,no-idle; -- timer@0 { -- /delete-property/ti,timer-alwon; -- }; --}; -- --/* Preferred always-on timer for clocksource */ --&timer12_target { -- ti,no-reset-on-init; -- ti,no-idle; -- timer@0 { -- /* Always clocked by secure_32k_fck */ -- }; --}; -- --/* Preferred timer for clockevent */ --&timer2_target { -- ti,no-reset-on-init; -- ti,no-idle; -- timer@0 { -- assigned-clocks = <&gpt2_fck>; -- assigned-clock-parents = <&sys_ck>; -- }; --}; -diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi -index 938cc691bb2fe..bb5e00b36d8dc 100644 ---- a/arch/arm/boot/dts/omap3-gta04.dtsi -+++ b/arch/arm/boot/dts/omap3-gta04.dtsi -@@ -31,6 +31,8 @@ - aliases { - display0 = &lcd; - display1 = &tv0; -+ /delete-property/ mmc2; -+ /delete-property/ mmc3; - }; - - ldo_3v3: fixedregulator { -@@ -515,7 +517,7 @@ - compatible = "bosch,bma180"; - reg = <0x41>; - pinctrl-names = "default"; -- pintcrl-0 = <&bma180_pins>; -+ pinctrl-0 = <&bma180_pins>; - interrupt-parent = <&gpio4>; - interrupts = <19 IRQ_TYPE_LEVEL_HIGH>; /* GPIO_115 */ - }; -@@ -607,6 +609,22 @@ - clock-frequency = <100000>; - }; - -+&mcspi1 { -+ status = "disabled"; -+}; -+ -+&mcspi2 { -+ status = "disabled"; -+}; -+ -+&mcspi3 { -+ status = "disabled"; -+}; -+ -+&mcspi4 { -+ status = "disabled"; -+}; -+ - &usb_otg_hs { - interface-type = <0>; - usb-phy = <&usb2_phy>; -diff --git a/arch/arm/boot/dts/omap3-gta04a5one.dts b/arch/arm/boot/dts/omap3-gta04a5one.dts -index 9db9fe67cd63b..95df45cc70c09 100644 ---- a/arch/arm/boot/dts/omap3-gta04a5one.dts -+++ b/arch/arm/boot/dts/omap3-gta04a5one.dts -@@ -5,9 +5,11 @@ - - #include "omap3-gta04a5.dts" - --&omap3_pmx_core { -+/ { - model = "Goldelico GTA04A5/Letux 2804 with OneNAND"; -+}; - -+&omap3_pmx_core { - gpmc_pins: pinmux_gpmc_pins { - pinctrl-single,pins = < - -diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi -index 73d477898ec2a..06e7cf96c6639 100644 ---- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi -+++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi -@@ -311,7 +311,7 @@ - interrupt-parent = <&gpio1>; - interrupts = <8 0>; /* boot6 / gpio_8 */ - spi-max-frequency = <1000000>; -- pendown-gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>; -+ pendown-gpio = <&gpio1 8 GPIO_ACTIVE_LOW>; - vcc-supply = <®_vcc3>; - pinctrl-names = "default"; - pinctrl-0 = <&tsc2048_pins>; -diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts -index 32335d4ce478b..d40c3d2c4914e 100644 ---- a/arch/arm/boot/dts/omap3-n900.dts -+++ b/arch/arm/boot/dts/omap3-n900.dts -@@ -8,6 +8,7 @@ - - #include "omap34xx.dtsi" - #include -+#include - - /* - * Default secure signed bootloader (Nokia X-Loader) does not enable L3 firewall -@@ -630,63 +631,92 @@ - }; - - lp5523: lp5523@32 { -+ #address-cells = <1>; -+ #size-cells = <0>; - compatible = "national,lp5523"; - reg = <0x32>; - clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */ -- enable-gpio = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */ -+ enable-gpios = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */ - -- chan0 { -+ led@0 { -+ reg = <0>; - chan-name = "lp5523:kb1"; - led-cur = /bits/ 8 <50>; - max-cur = /bits/ 8 <100>; -+ color = ; -+ function = LED_FUNCTION_KBD_BACKLIGHT; - }; - -- chan1 { -+ led@1 { -+ reg = <1>; - chan-name = "lp5523:kb2"; - led-cur = /bits/ 8 <50>; - max-cur = /bits/ 8 <100>; -+ color = ; -+ function = LED_FUNCTION_KBD_BACKLIGHT; - }; - -- chan2 { -+ led@2 { -+ reg = <2>; - chan-name = "lp5523:kb3"; - led-cur = /bits/ 8 <50>; - max-cur = /bits/ 8 <100>; -+ color = ; -+ function = LED_FUNCTION_KBD_BACKLIGHT; - }; - -- chan3 { -+ led@3 { -+ reg = <3>; - chan-name = "lp5523:kb4"; - led-cur = /bits/ 8 <50>; - max-cur = /bits/ 8 <100>; -+ color = ; -+ function = LED_FUNCTION_KBD_BACKLIGHT; - }; - -- chan4 { -+ led@4 { -+ reg = <4>; - chan-name = "lp5523:b"; - led-cur = /bits/ 8 <50>; - max-cur = /bits/ 8 <100>; -+ color = ; -+ function = LED_FUNCTION_STATUS; - }; - -- chan5 { -+ led@5 { -+ reg = <5>; - chan-name = "lp5523:g"; - led-cur = /bits/ 8 <50>; - max-cur = /bits/ 8 <100>; -+ color = ; -+ function = LED_FUNCTION_STATUS; - }; - -- chan6 { -+ led@6 { -+ reg = <6>; - chan-name = "lp5523:r"; - led-cur = /bits/ 8 <50>; - max-cur = /bits/ 8 <100>; -+ color = ; -+ function = LED_FUNCTION_STATUS; - }; - -- chan7 { -+ led@7 { -+ reg = <7>; - chan-name = "lp5523:kb5"; - led-cur = /bits/ 8 <50>; - max-cur = /bits/ 8 <100>; -+ color = ; -+ function = LED_FUNCTION_KBD_BACKLIGHT; - }; - -- chan8 { -+ led@8 { -+ reg = <8>; - chan-name = "lp5523:kb6"; - led-cur = /bits/ 8 <50>; - max-cur = /bits/ 8 <100>; -+ color = ; -+ function = LED_FUNCTION_KBD_BACKLIGHT; - }; - }; - -diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi -index 1d6e88f99eb31..c3570acc35fad 100644 ---- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi -+++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi -@@ -149,7 +149,7 @@ - - interrupt-parent = <&gpio4>; - interrupts = <18 0>; /* gpio_114 */ -- pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>; -+ pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>; - - ti,x-min = /bits/ 16 <0x0>; - ti,x-max = /bits/ 16 <0x0fff>; -diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi -index 7e30f9d45790e..d95a0e130058c 100644 ---- a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi -+++ b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi -@@ -160,7 +160,7 @@ - - interrupt-parent = <&gpio4>; - interrupts = <18 0>; /* gpio_114 */ -- pendown-gpio = <&gpio4 18 GPIO_ACTIVE_HIGH>; -+ pendown-gpio = <&gpio4 18 GPIO_ACTIVE_LOW>; - - ti,x-min = /bits/ 16 <0x0>; - ti,x-max = /bits/ 16 <0x0fff>; -diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi -index e5da3bc6f1050..218a10c0d8159 100644 ---- a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi -+++ b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi -@@ -22,7 +22,7 @@ - compatible = "smsc,lan9221","smsc,lan9115"; - bank-width = <2>; - -- gpmc,mux-add-data; -+ gpmc,mux-add-data = <0>; - gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <42>; - gpmc,cs-wr-off-ns = <36>; -diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi -index 37608af6c07f5..ca6d777ebf843 100644 ---- a/arch/arm/boot/dts/omap3-pandora-common.dtsi -+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi -@@ -651,7 +651,7 @@ - pinctrl-0 = <&penirq_pins>; - interrupt-parent = <&gpio3>; - interrupts = <30 IRQ_TYPE_NONE>; /* GPIO_94 */ -- pendown-gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>; -+ pendown-gpio = <&gpio3 30 GPIO_ACTIVE_LOW>; - vcc-supply = <&vaux4>; - - ti,x-min = /bits/ 16 <0>; -diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts -index ca759b7b8a580..e62ea8b6d53fd 100644 ---- a/arch/arm/boot/dts/omap5-cm-t54.dts -+++ b/arch/arm/boot/dts/omap5-cm-t54.dts -@@ -354,7 +354,7 @@ - - interrupt-parent = <&gpio1>; - interrupts = <15 0>; /* gpio1_wk15 */ -- pendown-gpio = <&gpio1 15 GPIO_ACTIVE_HIGH>; -+ pendown-gpio = <&gpio1 15 GPIO_ACTIVE_LOW>; - - - ti,x-min = /bits/ 16 <0x0>; -diff --git a/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi b/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi -index 31f59de5190b8..7af41361c4800 100644 ---- a/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi -+++ b/arch/arm/boot/dts/openbmc-flash-layout-64.dtsi -@@ -28,7 +28,7 @@ partitions { - label = "rofs"; - }; - -- rwfs@6000000 { -+ rwfs@2a00000 { - reg = <0x2a00000 0x1600000>; // 22MB - label = "rwfs"; - }; -diff --git a/arch/arm/boot/dts/openbmc-flash-layout.dtsi b/arch/arm/boot/dts/openbmc-flash-layout.dtsi -index 6c26524e93e11..b47e14063c380 100644 ---- a/arch/arm/boot/dts/openbmc-flash-layout.dtsi -+++ b/arch/arm/boot/dts/openbmc-flash-layout.dtsi -@@ -20,7 +20,7 @@ partitions { - label = "kernel"; - }; - -- rofs@c0000 { -+ rofs@4c0000 { - reg = <0x4c0000 0x1740000>; - label = "rofs"; - }; -diff --git a/arch/arm/boot/dts/ox820.dtsi b/arch/arm/boot/dts/ox820.dtsi -index 90846a7655b49..dde4364892bf0 100644 ---- a/arch/arm/boot/dts/ox820.dtsi -+++ b/arch/arm/boot/dts/ox820.dtsi -@@ -287,7 +287,7 @@ - clocks = <&armclk>; - }; - -- gic: gic@1000 { -+ gic: interrupt-controller@1000 { - compatible = "arm,arm11mp-gic"; - interrupt-controller; - #interrupt-cells = <3>; -diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi -index d1c1c6aab2b87..0e830476fefd2 100644 ---- a/arch/arm/boot/dts/qcom-apq8064.dtsi -+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi -@@ -1571,7 +1571,7 @@ - }; - - etb@1a01000 { -- compatible = "coresight-etb10", "arm,primecell"; -+ compatible = "arm,coresight-etb10", "arm,primecell"; - reg = <0x1a01000 0x1000>; - - clocks = <&rpmcc RPM_QDSS_CLK>; -diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts -index b0f476ff017f9..aadca9bf416cb 100644 ---- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts -+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1-c1.dts -@@ -11,9 +11,9 @@ - dma@7984000 { - status = "okay"; - }; -- -- qpic-nand@79b0000 { -- status = "okay"; -- }; - }; - }; -+ -+&nand { -+ status = "okay"; -+}; -diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi -index 7a337dc087417..726aa30eeef54 100644 ---- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi -+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk04.1.dtsi -@@ -102,10 +102,10 @@ - status = "okay"; - perst-gpio = <&tlmm 38 0x1>; - }; -- -- qpic-nand@79b0000 { -- pinctrl-0 = <&nand_pins>; -- pinctrl-names = "default"; -- }; - }; - }; -+ -+&nand { -+ pinctrl-0 = <&nand_pins>; -+ pinctrl-names = "default"; -+}; -diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi -index 94872518b5a23..9988b9eab8035 100644 ---- a/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi -+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk07.1.dtsi -@@ -65,11 +65,11 @@ - dma@7984000 { - status = "okay"; - }; -- -- qpic-nand@79b0000 { -- pinctrl-0 = <&nand_pins>; -- pinctrl-names = "default"; -- status = "okay"; -- }; - }; - }; -+ -+&nand { -+ pinctrl-0 = <&nand_pins>; -+ pinctrl-names = "default"; -+ status = "okay"; -+}; -diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi -index ff1bdb10ad198..9dcf308b3ad49 100644 ---- a/arch/arm/boot/dts/qcom-ipq4019.dtsi -+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi -@@ -142,7 +142,8 @@ - clocks { - sleep_clk: sleep_clk { - compatible = "fixed-clock"; -- clock-frequency = <32768>; -+ clock-frequency = <32000>; -+ clock-output-names = "gcc_sleep_clk_src"; - #clock-cells = <0>; - }; - -@@ -423,8 +424,8 @@ - #address-cells = <3>; - #size-cells = <2>; - -- ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>, -- <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>; -+ ranges = <0x81000000 0x0 0x00000000 0x40200000 0x0 0x00100000>, -+ <0x82000000 0x0 0x40300000 0x40300000 0x0 0x00d00000>; - - interrupts = ; - interrupt-names = "msi"; -diff --git a/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts b/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts -index f7ea2e5dd1914..971d2e2292600 100644 ---- a/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts -+++ b/arch/arm/boot/dts/qcom-ipq8064-rb3011.dts -@@ -19,12 +19,12 @@ - stdout-path = "serial0:115200n8"; - }; - -- memory@0 { -+ memory@42000000 { - reg = <0x42000000 0x3e000000>; - device_type = "memory"; - }; - -- mdio0: mdio@0 { -+ mdio0: mdio-0 { - status = "okay"; - compatible = "virtual,mdio-gpio"; - gpios = <&qcom_pinmux 1 GPIO_ACTIVE_HIGH>, -@@ -91,7 +91,7 @@ - }; - }; - -- mdio1: mdio@1 { -+ mdio1: mdio-1 { - status = "okay"; - compatible = "virtual,mdio-gpio"; - gpios = <&qcom_pinmux 11 GPIO_ACTIVE_HIGH>, -diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi -index 4139d3817bd6f..f4139411c41ed 100644 ---- a/arch/arm/boot/dts/qcom-ipq8064.dtsi -+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi -@@ -808,8 +808,8 @@ - #address-cells = <3>; - #size-cells = <2>; - -- ranges = <0x81000000 0 0x0fe00000 0x0fe00000 0 0x00100000 /* downstream I/O */ -- 0x82000000 0 0x08000000 0x08000000 0 0x07e00000>; /* non-prefetchable memory */ -+ ranges = <0x81000000 0x0 0x00000000 0x0fe00000 0x0 0x00010000 /* I/O */ -+ 0x82000000 0x0 0x08000000 0x08000000 0x0 0x07e00000>; /* MEM */ - - interrupts = ; - interrupt-names = "msi"; -@@ -859,8 +859,8 @@ - #address-cells = <3>; - #size-cells = <2>; - -- ranges = <0x81000000 0 0x31e00000 0x31e00000 0 0x00100000 /* downstream I/O */ -- 0x82000000 0 0x2e000000 0x2e000000 0 0x03e00000>; /* non-prefetchable memory */ -+ ranges = <0x81000000 0x0 0x00000000 0x31e00000 0x0 0x00010000 /* I/O */ -+ 0x82000000 0x0 0x2e000000 0x2e000000 0x0 0x03e00000>; /* MEM */ - - interrupts = ; - interrupt-names = "msi"; -@@ -910,8 +910,8 @@ - #address-cells = <3>; - #size-cells = <2>; - -- ranges = <0x81000000 0 0x35e00000 0x35e00000 0 0x00100000 /* downstream I/O */ -- 0x82000000 0 0x32000000 0x32000000 0 0x03e00000>; /* non-prefetchable memory */ -+ ranges = <0x81000000 0x0 0x00000000 0x35e00000 0x0 0x00010000 /* I/O */ -+ 0x82000000 0x0 0x32000000 0x32000000 0x0 0x03e00000>; /* MEM */ - - interrupts = ; - interrupt-names = "msi"; -diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi -index dda2ceec6591a..ad9b52d53ef9b 100644 ---- a/arch/arm/boot/dts/qcom-mdm9615.dtsi -+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi -@@ -324,6 +324,7 @@ - - pmicgpio: gpio@150 { - compatible = "qcom,pm8018-gpio", "qcom,ssbi-gpio"; -+ reg = <0x150>; - interrupt-controller; - #interrupt-cells = <2>; - gpio-controller; -diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi -index 172ea3c70eac2..c197927e7435f 100644 ---- a/arch/arm/boot/dts/qcom-msm8960.dtsi -+++ b/arch/arm/boot/dts/qcom-msm8960.dtsi -@@ -146,7 +146,9 @@ - reg = <0x108000 0x1000>; - qcom,ipc = <&l2cc 0x8 2>; - -- interrupts = <0 19 0>, <0 21 0>, <0 22 0>; -+ interrupts = , -+ , -+ ; - interrupt-names = "ack", "err", "wakeup"; - - regulators { -@@ -192,7 +194,7 @@ - compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; - reg = <0x16440000 0x1000>, - <0x16400000 0x1000>; -- interrupts = <0 154 0x0>; -+ interrupts = ; - clocks = <&gcc GSBI5_UART_CLK>, <&gcc GSBI5_H_CLK>; - clock-names = "core", "iface"; - status = "disabled"; -@@ -318,7 +320,7 @@ - #address-cells = <1>; - #size-cells = <0>; - reg = <0x16080000 0x1000>; -- interrupts = <0 147 0>; -+ interrupts = ; - spi-max-frequency = <24000000>; - cs-gpios = <&msmgpio 8 0>; - -diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi -index 78ec496d5bc30..05d51839d40a1 100644 ---- a/arch/arm/boot/dts/qcom-msm8974.dtsi -+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi -@@ -718,7 +718,7 @@ - blsp2_uart7: serial@f995d000 { - compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; - reg = <0xf995d000 0x1000>; -- interrupts = ; -+ interrupts = ; - clocks = <&gcc GCC_BLSP2_UART1_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; - clock-names = "core", "iface"; - status = "disabled"; -@@ -1589,8 +1589,8 @@ - #phy-cells = <0>; - qcom,dsi-phy-index = <0>; - -- clocks = <&mmcc MDSS_AHB_CLK>; -- clock-names = "iface"; -+ clocks = <&mmcc MDSS_AHB_CLK>, <&xo_board>; -+ clock-names = "iface", "ref"; - }; - }; - -diff --git a/arch/arm/boot/dts/qcom-pm8841.dtsi b/arch/arm/boot/dts/qcom-pm8841.dtsi -index 2fd59c440903d..c73e5b149ac5e 100644 ---- a/arch/arm/boot/dts/qcom-pm8841.dtsi -+++ b/arch/arm/boot/dts/qcom-pm8841.dtsi -@@ -25,6 +25,7 @@ - compatible = "qcom,spmi-temp-alarm"; - reg = <0x2400>; - interrupts = <4 0x24 0 IRQ_TYPE_EDGE_RISING>; -+ #thermal-sensor-cells = <0>; - }; - }; - -diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi -index 1e6ce035f76a9..9d62487f6c8ff 100644 ---- a/arch/arm/boot/dts/qcom-sdx55.dtsi -+++ b/arch/arm/boot/dts/qcom-sdx55.dtsi -@@ -205,7 +205,7 @@ - blsp1_uart3: serial@831000 { - compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; - reg = <0x00831000 0x200>; -- interrupts = ; -+ interrupts = ; - clocks = <&gcc 30>, - <&gcc 9>; - clock-names = "core", "iface"; -@@ -334,12 +334,10 @@ - clocks = <&rpmhcc RPMH_IPA_CLK>; - clock-names = "core"; - -- interconnects = <&system_noc MASTER_IPA &system_noc SLAVE_SNOC_MEM_NOC_GC>, -- <&mem_noc MASTER_SNOC_GC_MEM_NOC &mc_virt SLAVE_EBI_CH0>, -+ interconnects = <&system_noc MASTER_IPA &mc_virt SLAVE_EBI_CH0>, - <&system_noc MASTER_IPA &system_noc SLAVE_OCIMEM>, - <&mem_noc MASTER_AMPSS_M0 &system_noc SLAVE_IPA_CFG>; -- interconnect-names = "memory-a", -- "memory-b", -+ interconnect-names = "memory", - "imem", - "config"; - -@@ -504,7 +502,7 @@ - }; - - apps_smmu: iommu@15000000 { -- compatible = "qcom,sdx55-smmu-500", "arm,mmu-500"; -+ compatible = "qcom,sdx55-smmu-500", "qcom,smmu-500", "arm,mmu-500"; - reg = <0x15000000 0x20000>; - #iommu-cells = <2>; - #global-interrupts = <1>; -diff --git a/arch/arm/boot/dts/rk3036-evb.dts b/arch/arm/boot/dts/rk3036-evb.dts -index 2a7e6624efb93..94216f870b57c 100644 ---- a/arch/arm/boot/dts/rk3036-evb.dts -+++ b/arch/arm/boot/dts/rk3036-evb.dts -@@ -31,11 +31,10 @@ - &i2c1 { - status = "okay"; - -- hym8563: hym8563@51 { -+ hym8563: rtc@51 { - compatible = "haoyu,hym8563"; - reg = <0x51>; - #clock-cells = <0>; -- clock-frequency = <32768>; - clock-output-names = "xin32k"; - }; - }; -diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts -index 36c0945f43b22..3718fac62841c 100644 ---- a/arch/arm/boot/dts/rk3188-radxarock.dts -+++ b/arch/arm/boot/dts/rk3188-radxarock.dts -@@ -71,7 +71,7 @@ - #sound-dai-cells = <0>; - }; - -- ir_recv: gpio-ir-receiver { -+ ir_recv: ir-receiver { - compatible = "gpio-ir-receiver"; - gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; - pinctrl-names = "default"; -diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi -index 2c606494b78c4..e07b1d79c470a 100644 ---- a/arch/arm/boot/dts/rk3188.dtsi -+++ b/arch/arm/boot/dts/rk3188.dtsi -@@ -378,7 +378,7 @@ - rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>; - }; - -- lcdc1_rgb24: ldcd1-rgb24 { -+ lcdc1_rgb24: lcdc1-rgb24 { - rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>, - <2 RK_PA1 1 &pcfg_pull_none>, - <2 RK_PA2 1 &pcfg_pull_none>, -@@ -606,7 +606,6 @@ - - &global_timer { - interrupts = ; -- status = "disabled"; - }; - - &local_timer { -diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi -index 75af99c76d7ea..f31cf1df892b2 100644 ---- a/arch/arm/boot/dts/rk322x.dtsi -+++ b/arch/arm/boot/dts/rk322x.dtsi -@@ -718,8 +718,8 @@ - interrupts = ; - assigned-clocks = <&cru SCLK_HDMI_PHY>; - assigned-clock-parents = <&hdmi_phy>; -- clocks = <&cru SCLK_HDMI_HDCP>, <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_CEC>; -- clock-names = "isfr", "iahb", "cec"; -+ clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>; -+ clock-names = "iahb", "isfr", "cec"; - pinctrl-names = "default"; - pinctrl-0 = <&hdmii2c_xfer &hdmi_hpd &hdmi_cec>; - resets = <&cru SRST_HDMI_P>; -diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts -index be695b8c1f672..8a635c2431274 100644 ---- a/arch/arm/boot/dts/rk3288-evb-act8846.dts -+++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts -@@ -54,7 +54,7 @@ - vin-supply = <&vcc_sys>; - }; - -- hym8563@51 { -+ rtc@51 { - compatible = "haoyu,hym8563"; - reg = <0x51>; - -diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi -index 7fb582302b326..74ba7e21850a5 100644 ---- a/arch/arm/boot/dts/rk3288-firefly.dtsi -+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi -@@ -233,11 +233,10 @@ - vin-supply = <&vcc_sys>; - }; - -- hym8563: hym8563@51 { -+ hym8563: rtc@51 { - compatible = "haoyu,hym8563"; - reg = <0x51>; - #clock-cells = <0>; -- clock-frequency = <32768>; - clock-output-names = "xin32k"; - interrupt-parent = <&gpio7>; - interrupts = ; -diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts -index 713f55e143c69..db1eb648e0e1a 100644 ---- a/arch/arm/boot/dts/rk3288-miqi.dts -+++ b/arch/arm/boot/dts/rk3288-miqi.dts -@@ -162,11 +162,10 @@ - vin-supply = <&vcc_sys>; - }; - -- hym8563: hym8563@51 { -+ hym8563: rtc@51 { - compatible = "haoyu,hym8563"; - reg = <0x51>; - #clock-cells = <0>; -- clock-frequency = <32768>; - clock-output-names = "xin32k"; - }; - -diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts -index c4d1d142d8c68..bc44606ca05d8 100644 ---- a/arch/arm/boot/dts/rk3288-rock2-square.dts -+++ b/arch/arm/boot/dts/rk3288-rock2-square.dts -@@ -165,11 +165,10 @@ - }; - - &i2c0 { -- hym8563: hym8563@51 { -+ hym8563: rtc@51 { - compatible = "haoyu,hym8563"; - reg = <0x51>; - #clock-cells = <0>; -- clock-frequency = <32768>; - clock-output-names = "xin32k"; - interrupt-parent = <&gpio0>; - interrupts = ; -diff --git a/arch/arm/boot/dts/rk3288-vmarc-som.dtsi b/arch/arm/boot/dts/rk3288-vmarc-som.dtsi -index 0ae2bd150e372..793951655b73b 100644 ---- a/arch/arm/boot/dts/rk3288-vmarc-som.dtsi -+++ b/arch/arm/boot/dts/rk3288-vmarc-som.dtsi -@@ -241,7 +241,6 @@ - interrupt-parent = <&gpio5>; - interrupts = ; - #clock-cells = <0>; -- clock-frequency = <32768>; - clock-output-names = "hym8563"; - pinctrl-names = "default"; - pinctrl-0 = <&hym8563_int>; -diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi -index 4dcdcf17c9776..2e6138eeacd15 100644 ---- a/arch/arm/boot/dts/rk3288.dtsi -+++ b/arch/arm/boot/dts/rk3288.dtsi -@@ -940,7 +940,7 @@ - status = "disabled"; - }; - -- spdif: sound@ff88b0000 { -+ spdif: sound@ff8b0000 { - compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif"; - reg = <0x0 0xff8b0000 0x0 0x10000>; - #sound-dai-cells = <0>; -@@ -971,7 +971,7 @@ - status = "disabled"; - }; - -- crypto: cypto-controller@ff8a0000 { -+ crypto: crypto@ff8a0000 { - compatible = "rockchip,rk3288-crypto"; - reg = <0x0 0xff8a0000 0x0 0x4000>; - interrupts = ; -@@ -1180,6 +1180,7 @@ - clock-names = "dp", "pclk"; - phys = <&edp_phy>; - phy-names = "dp"; -+ power-domains = <&power RK3288_PD_VIO>; - resets = <&cru SRST_EDP>; - reset-names = "dp"; - rockchip,grf = <&grf>; -diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi -index 616a828e0c6e4..17e89d30de781 100644 ---- a/arch/arm/boot/dts/rk3xxx.dtsi -+++ b/arch/arm/boot/dts/rk3xxx.dtsi -@@ -76,6 +76,13 @@ - reg = <0x1013c200 0x20>; - interrupts = ; - clocks = <&cru CORE_PERI>; -+ status = "disabled"; -+ /* The clock source and the sched_clock provided by the arm_global_timer -+ * on Rockchip rk3066a/rk3188 are quite unstable because their rates -+ * depend on the CPU frequency. -+ * Keep the arm_global_timer disabled in order to have the -+ * DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default. -+ */ - }; - - local_timer: local-timer@1013c600 { -diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts -index 285555b9ed943..0b07b3c319604 100644 ---- a/arch/arm/boot/dts/s3c6410-mini6410.dts -+++ b/arch/arm/boot/dts/s3c6410-mini6410.dts -@@ -51,7 +51,7 @@ - - ethernet@18000000 { - compatible = "davicom,dm9000"; -- reg = <0x18000000 0x2 0x18000004 0x2>; -+ reg = <0x18000000 0x2>, <0x18000004 0x2>; - interrupt-parent = <&gpn>; - interrupts = <7 IRQ_TYPE_LEVEL_HIGH>; - davicom,no-eeprom; -@@ -193,12 +193,12 @@ - }; - - &pinctrl0 { -- gpio_leds: gpio-leds { -+ gpio_leds: gpio-leds-pins { - samsung,pins = "gpk-4", "gpk-5", "gpk-6", "gpk-7"; - samsung,pin-pud = ; - }; - -- gpio_keys: gpio-keys { -+ gpio_keys: gpio-keys-pins { - samsung,pins = "gpn-0", "gpn-1", "gpn-2", "gpn-3", - "gpn-4", "gpn-5", "gpl-11", "gpl-12"; - samsung,pin-pud = ; -diff --git a/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi b/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi -index 8e9594d64b579..0a3186d57cb56 100644 ---- a/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi -+++ b/arch/arm/boot/dts/s3c64xx-pinctrl.dtsi -@@ -16,111 +16,111 @@ - * Pin banks - */ - -- gpa: gpa { -+ gpa: gpa-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpb: gpb { -+ gpb: gpb-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpc: gpc { -+ gpc: gpc-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpd: gpd { -+ gpd: gpd-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpe: gpe { -+ gpe: gpe-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - }; - -- gpf: gpf { -+ gpf: gpf-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpg: gpg { -+ gpg: gpg-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gph: gph { -+ gph: gph-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpi: gpi { -+ gpi: gpi-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - }; - -- gpj: gpj { -+ gpj: gpj-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - }; - -- gpk: gpk { -+ gpk: gpk-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - }; - -- gpl: gpl { -+ gpl: gpl-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpm: gpm { -+ gpm: gpm-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpn: gpn { -+ gpn: gpn-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpo: gpo { -+ gpo: gpo-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpp: gpp { -+ gpp: gpp-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; - }; - -- gpq: gpq { -+ gpq: gpq-gpio-bank { - gpio-controller; - #gpio-cells = <2>; - interrupt-controller; -@@ -131,225 +131,225 @@ - * Pin groups - */ - -- uart0_data: uart0-data { -+ uart0_data: uart0-data-pins { - samsung,pins = "gpa-0", "gpa-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- uart0_fctl: uart0-fctl { -+ uart0_fctl: uart0-fctl-pins { - samsung,pins = "gpa-2", "gpa-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- uart1_data: uart1-data { -+ uart1_data: uart1-data-pins { - samsung,pins = "gpa-4", "gpa-5"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- uart1_fctl: uart1-fctl { -+ uart1_fctl: uart1-fctl-pins { - samsung,pins = "gpa-6", "gpa-7"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- uart2_data: uart2-data { -+ uart2_data: uart2-data-pins { - samsung,pins = "gpb-0", "gpb-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- uart3_data: uart3-data { -+ uart3_data: uart3-data-pins { - samsung,pins = "gpb-2", "gpb-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- ext_dma_0: ext-dma-0 { -+ ext_dma_0: ext-dma-0-pins { - samsung,pins = "gpb-0", "gpb-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- ext_dma_1: ext-dma-1 { -+ ext_dma_1: ext-dma-1-pins { - samsung,pins = "gpb-2", "gpb-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- irda_data_0: irda-data-0 { -+ irda_data_0: irda-data-0-pins { - samsung,pins = "gpb-0", "gpb-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- irda_data_1: irda-data-1 { -+ irda_data_1: irda-data-1-pins { - samsung,pins = "gpb-2", "gpb-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- irda_sdbw: irda-sdbw { -+ irda_sdbw: irda-sdbw-pins { - samsung,pins = "gpb-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- i2c0_bus: i2c0-bus { -+ i2c0_bus: i2c0-bus-pins { - samsung,pins = "gpb-5", "gpb-6"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- i2c1_bus: i2c1-bus { -+ i2c1_bus: i2c1-bus-pins { - /* S3C6410-only */ - samsung,pins = "gpb-2", "gpb-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- spi0_bus: spi0-bus { -+ spi0_bus: spi0-bus-pins { - samsung,pins = "gpc-0", "gpc-1", "gpc-2"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- spi0_cs: spi0-cs { -+ spi0_cs: spi0-cs-pins { - samsung,pins = "gpc-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- spi1_bus: spi1-bus { -+ spi1_bus: spi1-bus-pins { - samsung,pins = "gpc-4", "gpc-5", "gpc-6"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- spi1_cs: spi1-cs { -+ spi1_cs: spi1-cs-pins { - samsung,pins = "gpc-7"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd0_cmd: sd0-cmd { -+ sd0_cmd: sd0-cmd-pins { - samsung,pins = "gpg-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd0_clk: sd0-clk { -+ sd0_clk: sd0-clk-pins { - samsung,pins = "gpg-0"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd0_bus1: sd0-bus1 { -+ sd0_bus1: sd0-bus1-pins { - samsung,pins = "gpg-2"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd0_bus4: sd0-bus4 { -+ sd0_bus4: sd0-bus4-pins { - samsung,pins = "gpg-2", "gpg-3", "gpg-4", "gpg-5"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd0_cd: sd0-cd { -+ sd0_cd: sd0-cd-pins { - samsung,pins = "gpg-6"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd1_cmd: sd1-cmd { -+ sd1_cmd: sd1-cmd-pins { - samsung,pins = "gph-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd1_clk: sd1-clk { -+ sd1_clk: sd1-clk-pins { - samsung,pins = "gph-0"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd1_bus1: sd1-bus1 { -+ sd1_bus1: sd1-bus1-pins { - samsung,pins = "gph-2"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd1_bus4: sd1-bus4 { -+ sd1_bus4: sd1-bus4-pins { - samsung,pins = "gph-2", "gph-3", "gph-4", "gph-5"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd1_bus8: sd1-bus8 { -+ sd1_bus8: sd1-bus8-pins { - samsung,pins = "gph-2", "gph-3", "gph-4", "gph-5", - "gph-6", "gph-7", "gph-8", "gph-9"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd1_cd: sd1-cd { -+ sd1_cd: sd1-cd-pins { - samsung,pins = "gpg-6"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd2_cmd: sd2-cmd { -+ sd2_cmd: sd2-cmd-pins { - samsung,pins = "gpc-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd2_clk: sd2-clk { -+ sd2_clk: sd2-clk-pins { - samsung,pins = "gpc-5"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd2_bus1: sd2-bus1 { -+ sd2_bus1: sd2-bus1-pins { - samsung,pins = "gph-6"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- sd2_bus4: sd2-bus4 { -+ sd2_bus4: sd2-bus4-pins { - samsung,pins = "gph-6", "gph-7", "gph-8", "gph-9"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- i2s0_bus: i2s0-bus { -+ i2s0_bus: i2s0-bus-pins { - samsung,pins = "gpd-0", "gpd-2", "gpd-3", "gpd-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- i2s0_cdclk: i2s0-cdclk { -+ i2s0_cdclk: i2s0-cdclk-pins { - samsung,pins = "gpd-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- i2s1_bus: i2s1-bus { -+ i2s1_bus: i2s1-bus-pins { - samsung,pins = "gpe-0", "gpe-2", "gpe-3", "gpe-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- i2s1_cdclk: i2s1-cdclk { -+ i2s1_cdclk: i2s1-cdclk-pins { - samsung,pins = "gpe-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- i2s2_bus: i2s2-bus { -+ i2s2_bus: i2s2-bus-pins { - /* S3C6410-only */ - samsung,pins = "gpc-4", "gpc-5", "gpc-6", "gph-6", - "gph-8", "gph-9"; -@@ -357,50 +357,50 @@ - samsung,pin-pud = ; - }; - -- i2s2_cdclk: i2s2-cdclk { -+ i2s2_cdclk: i2s2-cdclk-pins { - /* S3C6410-only */ - samsung,pins = "gph-7"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- pcm0_bus: pcm0-bus { -+ pcm0_bus: pcm0-bus-pins { - samsung,pins = "gpd-0", "gpd-2", "gpd-3", "gpd-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- pcm0_extclk: pcm0-extclk { -+ pcm0_extclk: pcm0-extclk-pins { - samsung,pins = "gpd-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- pcm1_bus: pcm1-bus { -+ pcm1_bus: pcm1-bus-pins { - samsung,pins = "gpe-0", "gpe-2", "gpe-3", "gpe-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- pcm1_extclk: pcm1-extclk { -+ pcm1_extclk: pcm1-extclk-pins { - samsung,pins = "gpe-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- ac97_bus_0: ac97-bus-0 { -+ ac97_bus_0: ac97-bus-0-pins { - samsung,pins = "gpd-0", "gpd-1", "gpd-2", "gpd-3", "gpd-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- ac97_bus_1: ac97-bus-1 { -+ ac97_bus_1: ac97-bus-1-pins { - samsung,pins = "gpe-0", "gpe-1", "gpe-2", "gpe-3", "gpe-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- cam_port: cam-port { -+ cam_port: cam-port-pins { - samsung,pins = "gpf-0", "gpf-1", "gpf-2", "gpf-4", - "gpf-5", "gpf-6", "gpf-7", "gpf-8", - "gpf-9", "gpf-10", "gpf-11", "gpf-12"; -@@ -408,242 +408,242 @@ - samsung,pin-pud = ; - }; - -- cam_rst: cam-rst { -+ cam_rst: cam-rst-pins { - samsung,pins = "gpf-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- cam_field: cam-field { -+ cam_field: cam-field-pins { - /* S3C6410-only */ - samsung,pins = "gpb-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- pwm_extclk: pwm-extclk { -+ pwm_extclk: pwm-extclk-pins { - samsung,pins = "gpf-13"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- pwm0_out: pwm0-out { -+ pwm0_out: pwm0-out-pins { - samsung,pins = "gpf-14"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- pwm1_out: pwm1-out { -+ pwm1_out: pwm1-out-pins { - samsung,pins = "gpf-15"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- clkout0: clkout-0 { -+ clkout0: clkout-0-pins { - samsung,pins = "gpf-14"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col0_0: keypad-col0-0 { -+ keypad_col0_0: keypad-col0-0-pins { - samsung,pins = "gph-0"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col1_0: keypad-col1-0 { -+ keypad_col1_0: keypad-col1-0-pins { - samsung,pins = "gph-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col2_0: keypad-col2-0 { -+ keypad_col2_0: keypad-col2-0-pins { - samsung,pins = "gph-2"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col3_0: keypad-col3-0 { -+ keypad_col3_0: keypad-col3-0-pins { - samsung,pins = "gph-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col4_0: keypad-col4-0 { -+ keypad_col4_0: keypad-col4-0-pins { - samsung,pins = "gph-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col5_0: keypad-col5-0 { -+ keypad_col5_0: keypad-col5-0-pins { - samsung,pins = "gph-5"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col6_0: keypad-col6-0 { -+ keypad_col6_0: keypad-col6-0-pins { - samsung,pins = "gph-6"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col7_0: keypad-col7-0 { -+ keypad_col7_0: keypad-col7-0-pins { - samsung,pins = "gph-7"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col0_1: keypad-col0-1 { -+ keypad_col0_1: keypad-col0-1-pins { - samsung,pins = "gpl-0"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col1_1: keypad-col1-1 { -+ keypad_col1_1: keypad-col1-1-pins { - samsung,pins = "gpl-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col2_1: keypad-col2-1 { -+ keypad_col2_1: keypad-col2-1-pins { - samsung,pins = "gpl-2"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col3_1: keypad-col3-1 { -+ keypad_col3_1: keypad-col3-1-pins { - samsung,pins = "gpl-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col4_1: keypad-col4-1 { -+ keypad_col4_1: keypad-col4-1-pins { - samsung,pins = "gpl-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col5_1: keypad-col5-1 { -+ keypad_col5_1: keypad-col5-1-pins { - samsung,pins = "gpl-5"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col6_1: keypad-col6-1 { -+ keypad_col6_1: keypad-col6-1-pins { - samsung,pins = "gpl-6"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_col7_1: keypad-col7-1 { -+ keypad_col7_1: keypad-col7-1-pins { - samsung,pins = "gpl-7"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row0_0: keypad-row0-0 { -+ keypad_row0_0: keypad-row0-0-pins { - samsung,pins = "gpk-8"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row1_0: keypad-row1-0 { -+ keypad_row1_0: keypad-row1-0-pins { - samsung,pins = "gpk-9"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row2_0: keypad-row2-0 { -+ keypad_row2_0: keypad-row2-0-pins { - samsung,pins = "gpk-10"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row3_0: keypad-row3-0 { -+ keypad_row3_0: keypad-row3-0-pins { - samsung,pins = "gpk-11"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row4_0: keypad-row4-0 { -+ keypad_row4_0: keypad-row4-0-pins { - samsung,pins = "gpk-12"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row5_0: keypad-row5-0 { -+ keypad_row5_0: keypad-row5-0-pins { - samsung,pins = "gpk-13"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row6_0: keypad-row6-0 { -+ keypad_row6_0: keypad-row6-0-pins { - samsung,pins = "gpk-14"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row7_0: keypad-row7-0 { -+ keypad_row7_0: keypad-row7-0-pins { - samsung,pins = "gpk-15"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row0_1: keypad-row0-1 { -+ keypad_row0_1: keypad-row0-1-pins { - samsung,pins = "gpn-0"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row1_1: keypad-row1-1 { -+ keypad_row1_1: keypad-row1-1-pins { - samsung,pins = "gpn-1"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row2_1: keypad-row2-1 { -+ keypad_row2_1: keypad-row2-1-pins { - samsung,pins = "gpn-2"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row3_1: keypad-row3-1 { -+ keypad_row3_1: keypad-row3-1-pins { - samsung,pins = "gpn-3"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row4_1: keypad-row4-1 { -+ keypad_row4_1: keypad-row4-1-pins { - samsung,pins = "gpn-4"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row5_1: keypad-row5-1 { -+ keypad_row5_1: keypad-row5-1-pins { - samsung,pins = "gpn-5"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row6_1: keypad-row6-1 { -+ keypad_row6_1: keypad-row6-1-pins { - samsung,pins = "gpn-6"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- keypad_row7_1: keypad-row7-1 { -+ keypad_row7_1: keypad-row7-1-pins { - samsung,pins = "gpn-7"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- lcd_ctrl: lcd-ctrl { -+ lcd_ctrl: lcd-ctrl-pins { - samsung,pins = "gpj-8", "gpj-9", "gpj-10", "gpj-11"; - samsung,pin-function = ; - samsung,pin-pud = ; - }; - -- lcd_data16: lcd-data-width16 { -+ lcd_data16: lcd-data-width16-pins { - samsung,pins = "gpi-3", "gpi-4", "gpi-5", "gpi-6", - "gpi-7", "gpi-10", "gpi-11", "gpi-12", - "gpi-13", "gpi-14", "gpi-15", "gpj-3", -@@ -652,7 +652,7 @@ - samsung,pin-pud = ; - }; - -- lcd_data18: lcd-data-width18 { -+ lcd_data18: lcd-data-width18-pins { - samsung,pins = "gpi-2", "gpi-3", "gpi-4", "gpi-5", - "gpi-6", "gpi-7", "gpi-10", "gpi-11", - "gpi-12", "gpi-13", "gpi-14", "gpi-15", -@@ -662,7 +662,7 @@ - samsung,pin-pud = ; - }; - -- lcd_data24: lcd-data-width24 { -+ lcd_data24: lcd-data-width24-pins { - samsung,pins = "gpi-0", "gpi-1", "gpi-2", "gpi-3", - "gpi-4", "gpi-5", "gpi-6", "gpi-7", - "gpi-8", "gpi-9", "gpi-10", "gpi-11", -@@ -673,7 +673,7 @@ - samsung,pin-pud = ; - }; - -- hsi_bus: hsi-bus { -+ hsi_bus: hsi-bus-pins { - samsung,pins = "gpk-0", "gpk-1", "gpk-2", "gpk-3", - "gpk-4", "gpk-5", "gpk-6", "gpk-7"; - samsung,pin-function = ; -diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi -index 160f8cd9a68da..b6d55a782c208 100644 ---- a/arch/arm/boot/dts/s5pv210-aries.dtsi -+++ b/arch/arm/boot/dts/s5pv210-aries.dtsi -@@ -564,7 +564,6 @@ - reset-gpios = <&mp05 5 GPIO_ACTIVE_LOW>; - vdd3-supply = <&ldo7_reg>; - vci-supply = <&ldo17_reg>; -- spi-cs-high; - spi-max-frequency = <1200000>; - - pinctrl-names = "default"; -@@ -636,7 +635,7 @@ - }; - - &i2s0 { -- dmas = <&pdma0 9>, <&pdma0 10>, <&pdma0 11>; -+ dmas = <&pdma0 10>, <&pdma0 9>, <&pdma0 11>; - status = "okay"; - }; - -@@ -895,7 +894,7 @@ - device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>; - interrupt-parent = <&gph2>; - interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; -- interrupt-names = "host-wake"; -+ interrupt-names = "host-wakeup"; - }; - }; - -diff --git a/arch/arm/boot/dts/s5pv210-smdkv210.dts b/arch/arm/boot/dts/s5pv210-smdkv210.dts -index fbae768d65e27..901e7197b1368 100644 ---- a/arch/arm/boot/dts/s5pv210-smdkv210.dts -+++ b/arch/arm/boot/dts/s5pv210-smdkv210.dts -@@ -41,7 +41,7 @@ - - ethernet@a8000000 { - compatible = "davicom,dm9000"; -- reg = <0xA8000000 0x2 0xA8000002 0x2>; -+ reg = <0xa8000000 0x2>, <0xa8000002 0x2>; - interrupt-parent = <&gph1>; - interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; - local-mac-address = [00 00 de ad be ef]; -@@ -55,6 +55,14 @@ - default-brightness-level = <6>; - pinctrl-names = "default"; - pinctrl-0 = <&pwm3_out>; -+ power-supply = <&dc5v_reg>; -+ }; -+ -+ dc5v_reg: regulator-0 { -+ compatible = "regulator-fixed"; -+ regulator-name = "DC5V"; -+ regulator-min-microvolt = <5000000>; -+ regulator-max-microvolt = <5000000>; - }; - }; - -diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi -index 353ba7b09a0c0..415d0f555858c 100644 ---- a/arch/arm/boot/dts/s5pv210.dtsi -+++ b/arch/arm/boot/dts/s5pv210.dtsi -@@ -239,8 +239,8 @@ - reg = <0xeee30000 0x1000>; - interrupt-parent = <&vic2>; - interrupts = <16>; -- dma-names = "rx", "tx", "tx-sec"; -- dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>; -+ dma-names = "tx", "rx", "tx-sec"; -+ dmas = <&pdma1 10>, <&pdma1 9>, <&pdma1 11>; - clock-names = "iis", - "i2s_opclk0", - "i2s_opclk1"; -@@ -259,8 +259,8 @@ - reg = <0xe2100000 0x1000>; - interrupt-parent = <&vic2>; - interrupts = <17>; -- dma-names = "rx", "tx"; -- dmas = <&pdma1 12>, <&pdma1 13>; -+ dma-names = "tx", "rx"; -+ dmas = <&pdma1 13>, <&pdma1 12>; - clock-names = "iis", "i2s_opclk0"; - clocks = <&clocks CLK_I2S1>, <&clocks SCLK_AUDIO1>; - pinctrl-names = "default"; -@@ -274,8 +274,8 @@ - reg = <0xe2a00000 0x1000>; - interrupt-parent = <&vic2>; - interrupts = <18>; -- dma-names = "rx", "tx"; -- dmas = <&pdma1 14>, <&pdma1 15>; -+ dma-names = "tx", "rx"; -+ dmas = <&pdma1 15>, <&pdma1 14>; - clock-names = "iis", "i2s_opclk0"; - clocks = <&clocks CLK_I2S2>, <&clocks SCLK_AUDIO2>; - pinctrl-names = "default"; -@@ -582,7 +582,7 @@ - interrupts = <29>; - clocks = <&clocks CLK_CSIS>, - <&clocks SCLK_CSIS>; -- clock-names = "clk_csis", -+ clock-names = "csis", - "sclk_csis"; - bus-width = <4>; - status = "disabled"; -diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi -index ec45ced3cde68..e1e0dec8cc1f2 100644 ---- a/arch/arm/boot/dts/sam9x60.dtsi -+++ b/arch/arm/boot/dts/sam9x60.dtsi -@@ -567,7 +567,7 @@ - mpddrc: mpddrc@ffffe800 { - compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc"; - reg = <0xffffe800 0x200>; -- clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>; -+ clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>; - clock-names = "ddrck", "mpddr"; - }; - -diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi -index 801969c113d64..4c87c2aa8fc86 100644 ---- a/arch/arm/boot/dts/sama5d2.dtsi -+++ b/arch/arm/boot/dts/sama5d2.dtsi -@@ -413,7 +413,7 @@ - pmecc: ecc-engine@f8014070 { - compatible = "atmel,sama5d2-pmecc"; - reg = <0xf8014070 0x490>, -- <0xf8014500 0x100>; -+ <0xf8014500 0x200>; - }; - }; - -@@ -1125,7 +1125,7 @@ - clocks = <&pmc PMC_TYPE_PERIPHERAL 55>, <&pmc PMC_TYPE_GCK 55>; - clock-names = "pclk", "gclk"; - assigned-clocks = <&pmc PMC_TYPE_CORE PMC_I2S1_MUX>; -- assigned-parrents = <&pmc PMC_TYPE_GCK 55>; -+ assigned-clock-parents = <&pmc PMC_TYPE_GCK 55>; - status = "disabled"; - }; - -diff --git a/arch/arm/boot/dts/sama7g5-pinfunc.h b/arch/arm/boot/dts/sama7g5-pinfunc.h -index 22fe9e522a97b..6e87f0d4b8fce 100644 ---- a/arch/arm/boot/dts/sama7g5-pinfunc.h -+++ b/arch/arm/boot/dts/sama7g5-pinfunc.h -@@ -261,7 +261,7 @@ - #define PIN_PB2__FLEXCOM6_IO0 PINMUX_PIN(PIN_PB2, 2, 1) - #define PIN_PB2__ADTRG PINMUX_PIN(PIN_PB2, 3, 1) - #define PIN_PB2__A20 PINMUX_PIN(PIN_PB2, 4, 1) --#define PIN_PB2__FLEXCOM11_IO0 PINMUX_PIN(PIN_PB2, 6, 3) -+#define PIN_PB2__FLEXCOM11_IO1 PINMUX_PIN(PIN_PB2, 6, 3) - #define PIN_PB3 35 - #define PIN_PB3__GPIO PINMUX_PIN(PIN_PB3, 0, 0) - #define PIN_PB3__RF1 PINMUX_PIN(PIN_PB3, 1, 1) -@@ -765,7 +765,7 @@ - #define PIN_PD20__PCK0 PINMUX_PIN(PIN_PD20, 1, 3) - #define PIN_PD20__FLEXCOM2_IO3 PINMUX_PIN(PIN_PD20, 2, 2) - #define PIN_PD20__PWMH3 PINMUX_PIN(PIN_PD20, 3, 4) --#define PIN_PD20__CANTX4 PINMUX_PIN(PIN_PD20, 5, 2) -+#define PIN_PD20__CANTX4 PINMUX_PIN(PIN_PD20, 4, 2) - #define PIN_PD20__FLEXCOM5_IO0 PINMUX_PIN(PIN_PD20, 6, 5) - #define PIN_PD21 117 - #define PIN_PD21__GPIO PINMUX_PIN(PIN_PD21, 0, 0) -diff --git a/arch/arm/boot/dts/sama7g5.dtsi b/arch/arm/boot/dts/sama7g5.dtsi -index 6c58c151c6d9e..a63a8e768654f 100644 ---- a/arch/arm/boot/dts/sama7g5.dtsi -+++ b/arch/arm/boot/dts/sama7g5.dtsi -@@ -319,8 +319,6 @@ - dmas = <&dma0 AT91_XDMAC_DT_PERID(7)>, - <&dma0 AT91_XDMAC_DT_PERID(8)>; - dma-names = "rx", "tx"; -- atmel,use-dma-rx; -- atmel,use-dma-tx; - status = "disabled"; - }; - }; -@@ -485,8 +483,6 @@ - dmas = <&dma0 AT91_XDMAC_DT_PERID(21)>, - <&dma0 AT91_XDMAC_DT_PERID(22)>; - dma-names = "rx", "tx"; -- atmel,use-dma-rx; -- atmel,use-dma-tx; - status = "disabled"; - }; - }; -@@ -511,8 +507,6 @@ - dmas = <&dma0 AT91_XDMAC_DT_PERID(23)>, - <&dma0 AT91_XDMAC_DT_PERID(24)>; - dma-names = "rx", "tx"; -- atmel,use-dma-rx; -- atmel,use-dma-tx; - status = "disabled"; - }; - }; -@@ -559,7 +553,6 @@ - #interrupt-cells = <3>; - #address-cells = <0>; - interrupt-controller; -- interrupt-parent; - reg = <0xe8c11000 0x1000>, - <0xe8c12000 0x2000>; - }; -diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi -index 0b021eef0b538..b8c5dd7860cb2 100644 ---- a/arch/arm/boot/dts/socfpga.dtsi -+++ b/arch/arm/boot/dts/socfpga.dtsi -@@ -46,7 +46,7 @@ - <0xff113000 0x1000>; - }; - -- intc: intc@fffed000 { -+ intc: interrupt-controller@fffed000 { - compatible = "arm,cortex-a9-gic"; - #interrupt-cells = <3>; - interrupt-controller; -@@ -782,7 +782,7 @@ - }; - - qspi: spi@ff705000 { -- compatible = "cdns,qspi-nor"; -+ compatible = "intel,socfpga-qspi", "cdns,qspi-nor"; - #address-cells = <1>; - #size-cells = <0>; - reg = <0xff705000 0x1000>, -diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi -index a574ea91d9d3f..f1e50d2e623a3 100644 ---- a/arch/arm/boot/dts/socfpga_arria10.dtsi -+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi -@@ -38,7 +38,7 @@ - <0xff113000 0x1000>; - }; - -- intc: intc@ffffd000 { -+ intc: interrupt-controller@ffffd000 { - compatible = "arm,cortex-a9-gic"; - #interrupt-cells = <3>; - interrupt-controller; -@@ -756,7 +756,7 @@ - }; - - qspi: spi@ff809000 { -- compatible = "cdns,qspi-nor"; -+ compatible = "intel,socfpga-qspi", "cdns,qspi-nor"; - #address-cells = <1>; - #size-cells = <0>; - reg = <0xff809000 0x100>, -diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts -index 2b645642b9352..2a745522404d6 100644 ---- a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts -+++ b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts -@@ -12,7 +12,7 @@ - flash0: n25q00@0 { - #address-cells = <1>; - #size-cells = <1>; -- compatible = "n25q00aa"; -+ compatible = "micron,mt25qu02g", "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <100000000>; - -diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts -index 90e676e7019f2..1b02d46496a85 100644 ---- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts -+++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts -@@ -119,7 +119,7 @@ - flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; -- compatible = "n25q256a"; -+ compatible = "micron,n25q256a", "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <100000000>; - -diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts -index 6f138b2b26163..51bb436784e24 100644 ---- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts -+++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts -@@ -124,7 +124,7 @@ - flash0: n25q00@0 { - #address-cells = <1>; - #size-cells = <1>; -- compatible = "n25q00"; -+ compatible = "micron,mt25qu02g", "jedec,spi-nor"; - reg = <0>; /* chip select */ - spi-max-frequency = <100000000>; - -diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts -index c155ff02eb6e0..cae9ddd5ed38b 100644 ---- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts -+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts -@@ -169,7 +169,7 @@ - flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; -- compatible = "n25q00"; -+ compatible = "micron,mt25qu02g", "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <100000000>; - -diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts -index 8d5d3996f6f27..ca18b959e6559 100644 ---- a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts -+++ b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts -@@ -80,7 +80,7 @@ - flash: flash@0 { - #address-cells = <1>; - #size-cells = <1>; -- compatible = "n25q256a"; -+ compatible = "micron,n25q256a", "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <100000000>; - m25p,fast-read; -diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts -index 99a71757cdf46..3f7aa7bf0863a 100644 ---- a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts -+++ b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts -@@ -116,7 +116,7 @@ - flash0: n25q512a@0 { - #address-cells = <1>; - #size-cells = <1>; -- compatible = "n25q512a"; -+ compatible = "micron,n25q512a", "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <100000000>; - -diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts -index a060718758b67..25874e1b9c829 100644 ---- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts -+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts -@@ -224,7 +224,7 @@ - n25q128@0 { - #address-cells = <1>; - #size-cells = <1>; -- compatible = "n25q128"; -+ compatible = "micron,n25q128", "jedec,spi-nor"; - reg = <0>; /* chip select */ - spi-max-frequency = <100000000>; - m25p,fast-read; -@@ -241,7 +241,7 @@ - n25q00@1 { - #address-cells = <1>; - #size-cells = <1>; -- compatible = "n25q00"; -+ compatible = "micron,mt25qu02g", "jedec,spi-nor"; - reg = <1>; /* chip select */ - spi-max-frequency = <100000000>; - m25p,fast-read; -diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi -index 1a8f5e8b10e3a..66cd473ecb617 100644 ---- a/arch/arm/boot/dts/spear1340.dtsi -+++ b/arch/arm/boot/dts/spear1340.dtsi -@@ -136,9 +136,9 @@ - reg = <0xb4100000 0x1000>; - interrupts = <0 105 0x4>; - status = "disabled"; -- dmas = <&dwdma0 12 0 1>, -- <&dwdma0 13 1 0>; -- dma-names = "tx", "rx"; -+ dmas = <&dwdma0 13 0 1>, -+ <&dwdma0 12 1 0>; -+ dma-names = "rx", "tx"; - }; - - thermal@e07008c4 { -diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi -index c87b881b2c8bb..9135533676879 100644 ---- a/arch/arm/boot/dts/spear13xx.dtsi -+++ b/arch/arm/boot/dts/spear13xx.dtsi -@@ -284,9 +284,9 @@ - #size-cells = <0>; - interrupts = <0 31 0x4>; - status = "disabled"; -- dmas = <&dwdma0 4 0 0>, -- <&dwdma0 5 0 0>; -- dma-names = "tx", "rx"; -+ dmas = <&dwdma0 5 0 0>, -+ <&dwdma0 4 0 0>; -+ dma-names = "rx", "tx"; - }; - - rtc@e0580000 { -diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts -index 367ba48aac3e5..5c562fb4886f4 100644 ---- a/arch/arm/boot/dts/spear320-hmi.dts -+++ b/arch/arm/boot/dts/spear320-hmi.dts -@@ -242,7 +242,7 @@ - irq-trigger = <0x1>; - - stmpegpio: stmpe-gpio { -- compatible = "stmpe,gpio"; -+ compatible = "st,stmpe-gpio"; - reg = <0>; - gpio-controller; - #gpio-cells = <2>; -diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi -index fd41243a0b2c0..9d5a04a46b14e 100644 ---- a/arch/arm/boot/dts/spear600.dtsi -+++ b/arch/arm/boot/dts/spear600.dtsi -@@ -47,7 +47,7 @@ - compatible = "arm,pl110", "arm,primecell"; - reg = <0xfc200000 0x1000>; - interrupt-parent = <&vic1>; -- interrupts = <12>; -+ interrupts = <13>; - status = "disabled"; - }; - -diff --git a/arch/arm/boot/dts/ste-ux500-samsung-codina.dts b/arch/arm/boot/dts/ste-ux500-samsung-codina.dts -index 952606e607ed6..ce62ba877da12 100644 ---- a/arch/arm/boot/dts/ste-ux500-samsung-codina.dts -+++ b/arch/arm/boot/dts/ste-ux500-samsung-codina.dts -@@ -544,8 +544,8 @@ - reg = <0x19>; - vdd-supply = <&ab8500_ldo_aux1_reg>; // 3V - vddio-supply = <&ab8500_ldo_aux2_reg>; // 1.8V -- mount-matrix = "0", "-1", "0", -- "1", "0", "0", -+ mount-matrix = "0", "1", "0", -+ "-1", "0", "0", - "0", "0", "1"; - }; - }; -diff --git a/arch/arm/boot/dts/ste-ux500-samsung-gavini.dts b/arch/arm/boot/dts/ste-ux500-samsung-gavini.dts -index fabc390ccb0cf..6c9e812ef03f4 100644 ---- a/arch/arm/boot/dts/ste-ux500-samsung-gavini.dts -+++ b/arch/arm/boot/dts/ste-ux500-samsung-gavini.dts -@@ -502,8 +502,8 @@ - accelerometer@18 { - compatible = "bosch,bma222e"; - reg = <0x18>; -- mount-matrix = "0", "1", "0", -- "-1", "0", "0", -+ mount-matrix = "0", "-1", "0", -+ "1", "0", "0", - "0", "0", "1"; - vddio-supply = <&ab8500_ldo_aux2_reg>; // 1.8V - vdd-supply = <&ab8500_ldo_aux1_reg>; // 3V -diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts -index 264f3e9b5fce5..7fab746e0570e 100644 ---- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts -+++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts -@@ -181,10 +181,6 @@ - cap-sd-highspeed; - cap-mmc-highspeed; - /* All direction control is used */ -- st,sig-dir-cmd; -- st,sig-dir-dat0; -- st,sig-dir-dat2; -- st,sig-dir-dat31; - st,sig-pin-fbclk; - full-pwr-cycle; - vmmc-supply = <&ab8500_ldo_aux3_reg>; -@@ -292,10 +288,10 @@ - }; - - ab8500_ldo_aux2 { -- /* Supplies the Cypress TMA140 touchscreen only with 3.3V */ -+ /* Supplies the Cypress TMA140 touchscreen only with 3.0V */ - regulator-name = "AUX2"; -- regulator-min-microvolt = <3300000>; -- regulator-max-microvolt = <3300000>; -+ regulator-min-microvolt = <3000000>; -+ regulator-max-microvolt = <3000000>; - }; - - ab8500_ldo_aux3 { -@@ -314,9 +310,9 @@ - - ab8500_ldo_aux5 { - regulator-name = "AUX5"; -+ /* Intended for 1V8 for touchscreen but actually left unused */ - regulator-min-microvolt = <1050000>; - regulator-max-microvolt = <2790000>; -- regulator-always-on; - }; - - ab8500_ldo_aux6 { -diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts -index 075ac57d0bf4a..6435e099c6326 100644 ---- a/arch/arm/boot/dts/stm32f429-disco.dts -+++ b/arch/arm/boot/dts/stm32f429-disco.dts -@@ -192,7 +192,7 @@ - - display: display@1{ - /* Connect panel-ilitek-9341 to ltdc */ -- compatible = "st,sf-tc240t-9370-t"; -+ compatible = "st,sf-tc240t-9370-t", "ilitek,ili9341"; - reg = <1>; - spi-3wire; - spi-max-frequency = <10000000>; -diff --git a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi -index 1cf8a23c26448..7f40b34401a9d 100644 ---- a/arch/arm/boot/dts/stm32f7-pinctrl.dtsi -+++ b/arch/arm/boot/dts/stm32f7-pinctrl.dtsi -@@ -284,6 +284,88 @@ - slew-rate = <2>; - }; - }; -+ -+ can1_pins_a: can1-0 { -+ pins1 { -+ pinmux = ; /* CAN1_TX */ -+ }; -+ pins2 { -+ pinmux = ; /* CAN1_RX */ -+ bias-pull-up; -+ }; -+ }; -+ -+ can1_pins_b: can1-1 { -+ pins1 { -+ pinmux = ; /* CAN1_TX */ -+ }; -+ pins2 { -+ pinmux = ; /* CAN1_RX */ -+ bias-pull-up; -+ }; -+ }; -+ -+ can1_pins_c: can1-2 { -+ pins1 { -+ pinmux = ; /* CAN1_TX */ -+ }; -+ pins2 { -+ pinmux = ; /* CAN1_RX */ -+ bias-pull-up; -+ -+ }; -+ }; -+ -+ can1_pins_d: can1-3 { -+ pins1 { -+ pinmux = ; /* CAN1_TX */ -+ }; -+ pins2 { -+ pinmux = ; /* CAN1_RX */ -+ bias-pull-up; -+ -+ }; -+ }; -+ -+ can2_pins_a: can2-0 { -+ pins1 { -+ pinmux = ; /* CAN2_TX */ -+ }; -+ pins2 { -+ pinmux = ; /* CAN2_RX */ -+ bias-pull-up; -+ }; -+ }; -+ -+ can2_pins_b: can2-1 { -+ pins1 { -+ pinmux = ; /* CAN2_TX */ -+ }; -+ pins2 { -+ pinmux = ; /* CAN2_RX */ -+ bias-pull-up; -+ }; -+ }; -+ -+ can3_pins_a: can3-0 { -+ pins1 { -+ pinmux = ; /* CAN3_TX */ -+ }; -+ pins2 { -+ pinmux = ; /* CAN3_RX */ -+ bias-pull-up; -+ }; -+ }; -+ -+ can3_pins_b: can3-1 { -+ pins1 { -+ pinmux = ; /* CAN3_TX */ -+ }; -+ pins2 { -+ pinmux = ; /* CAN3_RX */ -+ bias-pull-up; -+ }; -+ }; - }; - }; - }; -diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi -index 5b60ecbd718f0..d3553e0f0187e 100644 ---- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi -+++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi -@@ -1179,7 +1179,7 @@ - }; - }; - -- sai2a_pins_c: sai2a-4 { -+ sai2a_pins_c: sai2a-2 { - pins { - pinmux = , /* SAI2_SCK_A */ - , /* SAI2_SD_A */ -@@ -1190,7 +1190,7 @@ - }; - }; - -- sai2a_sleep_pins_c: sai2a-5 { -+ sai2a_sleep_pins_c: sai2a-sleep-2 { - pins { - pinmux = , /* SAI2_SCK_A */ - , /* SAI2_SD_A */ -@@ -1235,14 +1235,14 @@ - }; - }; - -- sai2b_pins_c: sai2a-4 { -+ sai2b_pins_c: sai2b-2 { - pins1 { - pinmux = ; /* SAI2_SD_B */ - bias-disable; - }; - }; - -- sai2b_sleep_pins_c: sai2a-sleep-5 { -+ sai2b_sleep_pins_c: sai2b-sleep-2 { - pins { - pinmux = ; /* SAI2_SD_B */ - }; -diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi -index bd289bf5d2690..e0d4833187988 100644 ---- a/arch/arm/boot/dts/stm32mp151.dtsi -+++ b/arch/arm/boot/dts/stm32mp151.dtsi -@@ -553,7 +553,7 @@ - compatible = "st,stm32-cec"; - reg = <0x40016000 0x400>; - interrupts = ; -- clocks = <&rcc CEC_K>, <&clk_lse>; -+ clocks = <&rcc CEC_K>, <&rcc CEC>; - clock-names = "cec", "hdmi-cec"; - status = "disabled"; - }; -@@ -824,7 +824,7 @@ - #sound-dai-cells = <0>; - - compatible = "st,stm32-sai-sub-a"; -- reg = <0x4 0x1c>; -+ reg = <0x4 0x20>; - clocks = <&rcc SAI1_K>; - clock-names = "sai_ck"; - dmas = <&dmamux1 87 0x400 0x01>; -@@ -834,7 +834,7 @@ - sai1b: audio-controller@4400a024 { - #sound-dai-cells = <0>; - compatible = "st,stm32-sai-sub-b"; -- reg = <0x24 0x1c>; -+ reg = <0x24 0x20>; - clocks = <&rcc SAI1_K>; - clock-names = "sai_ck"; - dmas = <&dmamux1 88 0x400 0x01>; -@@ -855,7 +855,7 @@ - sai2a: audio-controller@4400b004 { - #sound-dai-cells = <0>; - compatible = "st,stm32-sai-sub-a"; -- reg = <0x4 0x1c>; -+ reg = <0x4 0x20>; - clocks = <&rcc SAI2_K>; - clock-names = "sai_ck"; - dmas = <&dmamux1 89 0x400 0x01>; -@@ -865,7 +865,7 @@ - sai2b: audio-controller@4400b024 { - #sound-dai-cells = <0>; - compatible = "st,stm32-sai-sub-b"; -- reg = <0x24 0x1c>; -+ reg = <0x24 0x20>; - clocks = <&rcc SAI2_K>; - clock-names = "sai_ck"; - dmas = <&dmamux1 90 0x400 0x01>; -@@ -886,7 +886,7 @@ - sai3a: audio-controller@4400c004 { - #sound-dai-cells = <0>; - compatible = "st,stm32-sai-sub-a"; -- reg = <0x04 0x1c>; -+ reg = <0x04 0x20>; - clocks = <&rcc SAI3_K>; - clock-names = "sai_ck"; - dmas = <&dmamux1 113 0x400 0x01>; -@@ -896,7 +896,7 @@ - sai3b: audio-controller@4400c024 { - #sound-dai-cells = <0>; - compatible = "st,stm32-sai-sub-b"; -- reg = <0x24 0x1c>; -+ reg = <0x24 0x20>; - clocks = <&rcc SAI3_K>; - clock-names = "sai_ck"; - dmas = <&dmamux1 114 0x400 0x01>; -@@ -1271,7 +1271,7 @@ - sai4a: audio-controller@50027004 { - #sound-dai-cells = <0>; - compatible = "st,stm32-sai-sub-a"; -- reg = <0x04 0x1c>; -+ reg = <0x04 0x20>; - clocks = <&rcc SAI4_K>; - clock-names = "sai_ck"; - dmas = <&dmamux1 99 0x400 0x01>; -@@ -1281,7 +1281,7 @@ - sai4b: audio-controller@50027024 { - #sound-dai-cells = <0>; - compatible = "st,stm32-sai-sub-b"; -- reg = <0x24 0x1c>; -+ reg = <0x24 0x20>; - clocks = <&rcc SAI4_K>; - clock-names = "sai_ck"; - dmas = <&dmamux1 100 0x400 0x01>; -@@ -1452,7 +1452,7 @@ - usbh_ohci: usb@5800c000 { - compatible = "generic-ohci"; - reg = <0x5800c000 0x1000>; -- clocks = <&rcc USBH>; -+ clocks = <&usbphyc>, <&rcc USBH>; - resets = <&rcc USBH_R>; - interrupts = ; - status = "disabled"; -@@ -1461,7 +1461,7 @@ - usbh_ehci: usb@5800d000 { - compatible = "generic-ehci"; - reg = <0x5800d000 0x1000>; -- clocks = <&rcc USBH>; -+ clocks = <&usbphyc>, <&rcc USBH>; - resets = <&rcc USBH_R>; - interrupts = ; - companion = <&usbh_ohci>; -diff --git a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts -index 2e3c9fbb4eb36..275167f26fd9d 100644 ---- a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts -+++ b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts -@@ -13,7 +13,6 @@ - /dts-v1/; - - #include "stm32mp157.dtsi" --#include "stm32mp15xc.dtsi" - #include "stm32mp15xx-dhcor-som.dtsi" - #include "stm32mp15xx-dhcor-avenger96.dtsi" - -diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi -index fbf3826933e4d..7c0aa59accc55 100644 ---- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi -+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi -@@ -137,10 +137,13 @@ - - sound { - compatible = "audio-graph-card"; -- routing = -- "MIC_IN", "Capture", -- "Capture", "Mic Bias", -- "Playback", "HP_OUT"; -+ widgets = "Headphone", "Headphone Jack", -+ "Line", "Line In Jack", -+ "Microphone", "Microphone Jack"; -+ routing = "Headphone Jack", "HP_OUT", -+ "LINE_IN", "Line In Jack", -+ "MIC_IN", "Microphone Jack", -+ "Microphone Jack", "Mic Bias"; - dais = <&sai2a_port &sai2b_port>; - status = "okay"; - }; -diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi -index 6885948f3024e..5f65d96435f6e 100644 ---- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi -+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi -@@ -88,7 +88,7 @@ - - sound { - compatible = "audio-graph-card"; -- label = "STM32MP1-AV96-HDMI"; -+ label = "STM32-AV96-HDMI"; - dais = <&sai2a_port>; - status = "okay"; - }; -@@ -100,7 +100,7 @@ - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - -- gpios = <&gpioz 3 GPIO_ACTIVE_HIGH>; -+ gpio = <&gpioz 3 GPIO_ACTIVE_HIGH>; - enable-active-high; - }; - }; -@@ -141,6 +141,7 @@ - compatible = "snps,dwmac-mdio"; - reset-gpios = <&gpioz 2 GPIO_ACTIVE_LOW>; - reset-delay-us = <1000>; -+ reset-post-delay-us = <1000>; - - phy0: ethernet-phy@7 { - reg = <7>; -@@ -266,6 +267,12 @@ - }; - }; - }; -+ -+ dh_mac_eeprom: eeprom@53 { -+ compatible = "atmel,24c02"; -+ reg = <0x53>; -+ pagesize = <16>; -+ }; - }; - - <dc { -diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi -index 2b0ac605549d7..108d934a186b4 100644 ---- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi -+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi -@@ -171,12 +171,6 @@ - status = "disabled"; - }; - }; -- -- eeprom@53 { -- compatible = "atmel,24c02"; -- reg = <0x53>; -- pagesize = <16>; -- }; - }; - - &iwdg2 { -@@ -202,7 +196,7 @@ - compatible = "jedec,spi-nor"; - reg = <0>; - spi-rx-bus-width = <4>; -- spi-max-frequency = <108000000>; -+ spi-max-frequency = <50000000>; - #address-cells = <1>; - #size-cells = <1>; - }; -diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi -index 899bfe04aeb91..a76173e8a2a17 100644 ---- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi -+++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi -@@ -249,7 +249,7 @@ - stusb1600@28 { - compatible = "st,stusb1600"; - reg = <0x28>; -- interrupts = <11 IRQ_TYPE_EDGE_FALLING>; -+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>; - interrupt-parent = <&gpioi>; - pinctrl-names = "default"; - pinctrl-0 = <&stusb1600_pins_a>; -@@ -438,7 +438,7 @@ - i2s2_port: port { - i2s2_endpoint: endpoint { - remote-endpoint = <&sii9022_tx_endpoint>; -- format = "i2s"; -+ dai-format = "i2s"; - mclk-fs = <256>; - }; - }; -diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi -index 2beddbb3c5183..b3d1bdfb5118e 100644 ---- a/arch/arm/boot/dts/sun8i-a33.dtsi -+++ b/arch/arm/boot/dts/sun8i-a33.dtsi -@@ -46,7 +46,7 @@ - #include - - / { -- cpu0_opp_table: opp_table0 { -+ cpu0_opp_table: opp-table-cpu { - compatible = "operating-points-v2"; - opp-shared; - -@@ -164,7 +164,7 @@ - io-channels = <&ths>; - }; - -- mali_opp_table: gpu-opp-table { -+ mali_opp_table: opp-table-gpu { - compatible = "operating-points-v2"; - - opp-144000000 { -diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi -index ac97eac91349b..82fdb04122caa 100644 ---- a/arch/arm/boot/dts/sun8i-a83t.dtsi -+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi -@@ -200,7 +200,7 @@ - status = "disabled"; - }; - -- cpu0_opp_table: opp_table0 { -+ cpu0_opp_table: opp-table-cluster0 { - compatible = "operating-points-v2"; - opp-shared; - -@@ -253,7 +253,7 @@ - }; - }; - -- cpu1_opp_table: opp_table1 { -+ cpu1_opp_table: opp-table-cluster1 { - compatible = "operating-points-v2"; - opp-shared; - -diff --git a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts -index f19ed981da9d9..3706216ffb40b 100644 ---- a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts -+++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts -@@ -169,7 +169,7 @@ - flash@0 { - #address-cells = <1>; - #size-cells = <1>; -- compatible = "mxicy,mx25l1606e", "winbond,w25q128"; -+ compatible = "mxicy,mx25l1606e", "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <40000000>; - }; -diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts -index 8e7dfcffe1fbe..355f7844fd55e 100644 ---- a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts -+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts -@@ -57,7 +57,7 @@ - regulator-ramp-delay = <50>; /* 4ms */ - - enable-active-high; -- enable-gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */ -+ enable-gpios = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */ - gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */ - gpios-states = <0x1>; - states = <1100000 0>, <1300000 1>; -diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi -index 4e89701df91f8..ae4f933abb895 100644 ---- a/arch/arm/boot/dts/sun8i-h3.dtsi -+++ b/arch/arm/boot/dts/sun8i-h3.dtsi -@@ -44,7 +44,7 @@ - #include - - / { -- cpu0_opp_table: opp_table0 { -+ cpu0_opp_table: opp-table-cpu { - compatible = "operating-points-v2"; - opp-shared; - -@@ -112,7 +112,7 @@ - }; - }; - -- gpu_opp_table: gpu-opp-table { -+ gpu_opp_table: opp-table-gpu { - compatible = "operating-points-v2"; - - opp-120000000 { -diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi -index b30bc1a25ebb9..084323d5c61cb 100644 ---- a/arch/arm/boot/dts/sun8i-v3s.dtsi -+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi -@@ -593,6 +593,17 @@ - #size-cells = <0>; - }; - -+ gic: interrupt-controller@1c81000 { -+ compatible = "arm,gic-400"; -+ reg = <0x01c81000 0x1000>, -+ <0x01c82000 0x2000>, -+ <0x01c84000 0x2000>, -+ <0x01c86000 0x2000>; -+ interrupt-controller; -+ #interrupt-cells = <3>; -+ interrupts = ; -+ }; -+ - csi1: camera@1cb4000 { - compatible = "allwinner,sun8i-v3s-csi"; - reg = <0x01cb4000 0x3000>; -@@ -604,16 +615,5 @@ - resets = <&ccu RST_BUS_CSI>; - status = "disabled"; - }; -- -- gic: interrupt-controller@1c81000 { -- compatible = "arm,gic-400"; -- reg = <0x01c81000 0x1000>, -- <0x01c82000 0x2000>, -- <0x01c84000 0x2000>, -- <0x01c86000 0x2000>; -- interrupt-controller; -- #interrupt-cells = <3>; -- interrupts = ; -- }; - }; - }; -diff --git a/arch/arm/boot/dts/suniv-f1c100s.dtsi b/arch/arm/boot/dts/suniv-f1c100s.dtsi -index 6100d3b75f613..def8301014487 100644 ---- a/arch/arm/boot/dts/suniv-f1c100s.dtsi -+++ b/arch/arm/boot/dts/suniv-f1c100s.dtsi -@@ -104,8 +104,10 @@ - - wdt: watchdog@1c20ca0 { - compatible = "allwinner,suniv-f1c100s-wdt", -- "allwinner,sun4i-a10-wdt"; -+ "allwinner,sun6i-a31-wdt"; - reg = <0x01c20ca0 0x20>; -+ interrupts = <16>; -+ clocks = <&osc32k>; - }; - - uart0: serial@1c25000 { -diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts -index 1d2aac2cb6d03..fdc1d64dfff9d 100644 ---- a/arch/arm/boot/dts/tegra124-nyan-big.dts -+++ b/arch/arm/boot/dts/tegra124-nyan-big.dts -@@ -13,12 +13,15 @@ - "google,nyan-big-rev1", "google,nyan-big-rev0", - "google,nyan-big", "google,nyan", "nvidia,tegra124"; - -- panel: panel { -- compatible = "auo,b133xtn01"; -- -- power-supply = <&vdd_3v3_panel>; -- backlight = <&backlight>; -- ddc-i2c-bus = <&dpaux>; -+ host1x@50000000 { -+ dpaux@545c0000 { -+ aux-bus { -+ panel: panel { -+ compatible = "auo,b133xtn01"; -+ backlight = <&backlight>; -+ }; -+ }; -+ }; - }; - - mmc@700b0400 { /* SD Card on this bus */ -diff --git a/arch/arm/boot/dts/tegra124-nyan-blaze.dts b/arch/arm/boot/dts/tegra124-nyan-blaze.dts -index 677babde6460e..abdf4456826f8 100644 ---- a/arch/arm/boot/dts/tegra124-nyan-blaze.dts -+++ b/arch/arm/boot/dts/tegra124-nyan-blaze.dts -@@ -15,12 +15,15 @@ - "google,nyan-blaze-rev0", "google,nyan-blaze", - "google,nyan", "nvidia,tegra124"; - -- panel: panel { -- compatible = "samsung,ltn140at29-301"; -- -- power-supply = <&vdd_3v3_panel>; -- backlight = <&backlight>; -- ddc-i2c-bus = <&dpaux>; -+ host1x@50000000 { -+ dpaux@545c0000 { -+ aux-bus { -+ panel: panel { -+ compatible = "samsung,ltn140at29-301"; -+ backlight = <&backlight>; -+ }; -+ }; -+ }; - }; - - sound { -diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts -index e6b54ac1ebd1a..84e2d24065e9a 100644 ---- a/arch/arm/boot/dts/tegra124-venice2.dts -+++ b/arch/arm/boot/dts/tegra124-venice2.dts -@@ -48,6 +48,13 @@ - dpaux@545c0000 { - vdd-supply = <&vdd_3v3_panel>; - status = "okay"; -+ -+ aux-bus { -+ panel: panel { -+ compatible = "lg,lp129qe"; -+ backlight = <&backlight>; -+ }; -+ }; - }; - }; - -@@ -1079,13 +1086,6 @@ - }; - }; - -- panel: panel { -- compatible = "lg,lp129qe"; -- power-supply = <&vdd_3v3_panel>; -- backlight = <&backlight>; -- ddc-i2c-bus = <&dpaux>; -- }; -- - vdd_mux: regulator@0 { - compatible = "regulator-fixed"; - regulator-name = "+VDD_MUX"; -diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi -index dd4d506683de7..7f14f0d005c3e 100644 ---- a/arch/arm/boot/dts/tegra20-tamonten.dtsi -+++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi -@@ -183,8 +183,8 @@ - }; - conf_ata { - nvidia,pins = "ata", "atb", "atc", "atd", "ate", -- "cdev1", "cdev2", "dap1", "dtb", "gma", -- "gmb", "gmc", "gmd", "gme", "gpu7", -+ "cdev1", "cdev2", "dap1", "dtb", "dtf", -+ "gma", "gmb", "gmc", "gmd", "gme", "gpu7", - "gpv", "i2cp", "irrx", "irtx", "pta", - "rm", "slxa", "slxk", "spia", "spib", - "uac"; -@@ -203,7 +203,7 @@ - }; - conf_crtp { - nvidia,pins = "crtp", "dap2", "dap3", "dap4", -- "dtc", "dte", "dtf", "gpu", "sdio1", -+ "dtc", "dte", "gpu", "sdio1", - "slxc", "slxd", "spdi", "spdo", "spig", - "uda"; - nvidia,pull = ; -diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi -index e81e5937a60ae..03301ddb3403a 100644 ---- a/arch/arm/boot/dts/uniphier-pxs2.dtsi -+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi -@@ -597,8 +597,8 @@ - compatible = "socionext,uniphier-dwc3", "snps,dwc3"; - status = "disabled"; - reg = <0x65a00000 0xcd00>; -- interrupt-names = "host", "peripheral"; -- interrupts = <0 134 4>, <0 135 4>; -+ interrupt-names = "dwc_usb3"; -+ interrupts = <0 134 4>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usb0>, <&pinctrl_usb2>; - clock-names = "ref", "bus_early", "suspend"; -@@ -693,8 +693,8 @@ - compatible = "socionext,uniphier-dwc3", "snps,dwc3"; - status = "disabled"; - reg = <0x65c00000 0xcd00>; -- interrupt-names = "host", "peripheral"; -- interrupts = <0 137 4>, <0 138 4>; -+ interrupt-names = "dwc_usb3"; -+ interrupts = <0 137 4>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usb1>, <&pinctrl_usb3>; - clock-names = "ref", "bus_early", "suspend"; -diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts -index 3b88209bacea2..ff1f9a1bcfcfc 100644 ---- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts -+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts -@@ -132,6 +132,7 @@ - reg = <0x2c0f0000 0x1000>; - interrupts = <0 84 4>; - cache-level = <2>; -+ cache-unified; - }; - - pmu { -diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts -index 043ddd70372f0..36d5299b2baa8 100644 ---- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts -+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts -@@ -343,7 +343,7 @@ - }; - - &i2c2 { -- tca9548@70 { -+ i2c-mux@70 { - compatible = "nxp,pca9548"; - pinctrl-0 = <&pinctrl_i2c_mux_reset>; - pinctrl-names = "default"; -diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts -index de79dcfd32e62..ba2001f373158 100644 ---- a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts -+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts -@@ -340,7 +340,7 @@ - }; - - &i2c2 { -- tca9548@70 { -+ i2c-mux@70 { - compatible = "nxp,pca9548"; - pinctrl-0 = <&pinctrl_i2c_mux_reset>; - pinctrl-names = "default"; -diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig -index 383c632eba7bd..1e244a9287902 100644 ---- a/arch/arm/configs/bcm2835_defconfig -+++ b/arch/arm/configs/bcm2835_defconfig -@@ -108,6 +108,7 @@ CONFIG_MEDIA_SUPPORT=y - CONFIG_MEDIA_CAMERA_SUPPORT=y - CONFIG_DRM=y - CONFIG_DRM_VC4=y -+CONFIG_FB=y - CONFIG_FB_SIMPLE=y - CONFIG_FRAMEBUFFER_CONSOLE=y - CONFIG_SOUND=y -diff --git a/arch/arm/configs/cm_x300_defconfig b/arch/arm/configs/cm_x300_defconfig -index 502a9d870ca44..45769d0ddd4ef 100644 ---- a/arch/arm/configs/cm_x300_defconfig -+++ b/arch/arm/configs/cm_x300_defconfig -@@ -146,7 +146,6 @@ CONFIG_NFS_V3_ACL=y - CONFIG_NFS_V4=y - CONFIG_ROOT_NFS=y - CONFIG_CIFS=m --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_PARTITION_ADVANCED=y - CONFIG_NLS_CODEPAGE_437=m - CONFIG_NLS_ISO8859_1=m -diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig -index a49e699e52de3..ec84d80096b1c 100644 ---- a/arch/arm/configs/ezx_defconfig -+++ b/arch/arm/configs/ezx_defconfig -@@ -314,7 +314,6 @@ CONFIG_NFSD_V3_ACL=y - CONFIG_SMB_FS=m - CONFIG_CIFS=m - CONFIG_CIFS_STATS=y --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y - CONFIG_NLS_CODEPAGE_437=m -diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig -index 118c4c927f264..6db871d4e0775 100644 ---- a/arch/arm/configs/imote2_defconfig -+++ b/arch/arm/configs/imote2_defconfig -@@ -288,7 +288,6 @@ CONFIG_NFSD_V3_ACL=y - CONFIG_SMB_FS=m - CONFIG_CIFS=m - CONFIG_CIFS_STATS=y --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y - CONFIG_NLS_CODEPAGE_437=m -diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig -index 80a3ae02d7594..e883cb6dc6459 100644 ---- a/arch/arm/configs/multi_v5_defconfig -+++ b/arch/arm/configs/multi_v5_defconfig -@@ -188,6 +188,7 @@ CONFIG_REGULATOR=y - CONFIG_REGULATOR_FIXED_VOLTAGE=y - CONFIG_MEDIA_SUPPORT=y - CONFIG_MEDIA_CAMERA_SUPPORT=y -+CONFIG_MEDIA_PLATFORM_SUPPORT=y - CONFIG_V4L_PLATFORM_DRIVERS=y - CONFIG_VIDEO_ASPEED=m - CONFIG_VIDEO_ATMEL_ISI=m -@@ -195,6 +196,7 @@ CONFIG_DRM=y - CONFIG_DRM_ATMEL_HLCDC=m - CONFIG_DRM_PANEL_SIMPLE=y - CONFIG_DRM_ASPEED_GFX=m -+CONFIG_FB=y - CONFIG_FB_IMX=y - CONFIG_FB_ATMEL=y - CONFIG_BACKLIGHT_ATMEL_LCDC=y -diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig -index 33572998dbbe0..2eafcf1c5d89c 100644 ---- a/arch/arm/configs/multi_v7_defconfig -+++ b/arch/arm/configs/multi_v7_defconfig -@@ -696,7 +696,6 @@ CONFIG_DRM_IMX_LDB=m - CONFIG_DRM_IMX_HDMI=m - CONFIG_DRM_ATMEL_HLCDC=m - CONFIG_DRM_RCAR_DU=m --CONFIG_DRM_RCAR_LVDS=y - CONFIG_DRM_SUN4I=m - CONFIG_DRM_MSM=m - CONFIG_DRM_FSL_DCU=m -diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig -index ca32446b187f5..f53086ddc48b0 100644 ---- a/arch/arm/configs/mxs_defconfig -+++ b/arch/arm/configs/mxs_defconfig -@@ -93,6 +93,7 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y - CONFIG_DRM=y - CONFIG_DRM_PANEL_SEIKO_43WVF1G=y - CONFIG_DRM_MXSFB=y -+CONFIG_FB=y - CONFIG_FB_MODE_HELPERS=y - CONFIG_LCD_CLASS_DEVICE=y - CONFIG_BACKLIGHT_CLASS_DEVICE=y -diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig -index 23595fc5a29a9..907d6512821ad 100644 ---- a/arch/arm/configs/nhk8815_defconfig -+++ b/arch/arm/configs/nhk8815_defconfig -@@ -127,7 +127,6 @@ CONFIG_NFS_FS=y - CONFIG_NFS_V3_ACL=y - CONFIG_ROOT_NFS=y - CONFIG_CIFS=m --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_NLS_CODEPAGE_437=y - CONFIG_NLS_ASCII=y - CONFIG_NLS_ISO8859_1=y -diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig -index 58f4834289e63..dedaaae3d0d8a 100644 ---- a/arch/arm/configs/pxa_defconfig -+++ b/arch/arm/configs/pxa_defconfig -@@ -699,7 +699,6 @@ CONFIG_NFSD_V3_ACL=y - CONFIG_NFSD_V4=y - CONFIG_CIFS=m - CONFIG_CIFS_STATS=y --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y - CONFIG_NLS_DEFAULT="utf8" -diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig -index 3b206a31902ff..065553326b391 100644 ---- a/arch/arm/configs/spear13xx_defconfig -+++ b/arch/arm/configs/spear13xx_defconfig -@@ -61,7 +61,6 @@ CONFIG_SERIAL_AMBA_PL011=y - CONFIG_SERIAL_AMBA_PL011_CONSOLE=y - # CONFIG_HW_RANDOM is not set - CONFIG_RAW_DRIVER=y --CONFIG_MAX_RAW_DEVS=8192 - CONFIG_I2C=y - CONFIG_I2C_DESIGNWARE_PLATFORM=y - CONFIG_SPI=y -diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig -index fc5f71c765edc..afca722d6605c 100644 ---- a/arch/arm/configs/spear3xx_defconfig -+++ b/arch/arm/configs/spear3xx_defconfig -@@ -41,7 +41,6 @@ CONFIG_SERIAL_AMBA_PL011=y - CONFIG_SERIAL_AMBA_PL011_CONSOLE=y - # CONFIG_HW_RANDOM is not set - CONFIG_RAW_DRIVER=y --CONFIG_MAX_RAW_DEVS=8192 - CONFIG_I2C=y - CONFIG_I2C_DESIGNWARE_PLATFORM=y - CONFIG_SPI=y -diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig -index 52a56b8ce6a71..bc32c02cb86b1 100644 ---- a/arch/arm/configs/spear6xx_defconfig -+++ b/arch/arm/configs/spear6xx_defconfig -@@ -36,7 +36,6 @@ CONFIG_INPUT_FF_MEMLESS=y - CONFIG_SERIAL_AMBA_PL011=y - CONFIG_SERIAL_AMBA_PL011_CONSOLE=y - CONFIG_RAW_DRIVER=y --CONFIG_MAX_RAW_DEVS=8192 - CONFIG_I2C=y - CONFIG_I2C_DESIGNWARE_PLATFORM=y - CONFIG_SPI=y -diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig -index 2b575792363e5..149a5bd6b88c1 100644 ---- a/arch/arm/crypto/Kconfig -+++ b/arch/arm/crypto/Kconfig -@@ -63,7 +63,7 @@ config CRYPTO_SHA512_ARM - using optimized ARM assembler and NEON, when available. - - config CRYPTO_BLAKE2S_ARM -- tristate "BLAKE2s digest algorithm (ARM)" -+ bool "BLAKE2s digest algorithm (ARM)" - select CRYPTO_ARCH_HAVE_LIB_BLAKE2S - help - BLAKE2s digest algorithm optimized with ARM scalar instructions. This -@@ -102,6 +102,8 @@ config CRYPTO_AES_ARM_BS - depends on KERNEL_MODE_NEON - select CRYPTO_SKCIPHER - select CRYPTO_LIB_AES -+ select CRYPTO_AES -+ select CRYPTO_CBC - select CRYPTO_SIMD - help - Use a faster and more secure NEON based implementation of AES in CBC, -diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile -index eafa898ba6a73..971e74546fb1b 100644 ---- a/arch/arm/crypto/Makefile -+++ b/arch/arm/crypto/Makefile -@@ -9,7 +9,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o - obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o - obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o - obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o --obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += blake2s-arm.o -+obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += libblake2s-arm.o - obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o - obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o - obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o -@@ -31,7 +31,7 @@ sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o - sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y) - sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o - sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y) --blake2s-arm-y := blake2s-core.o blake2s-glue.o -+libblake2s-arm-y:= blake2s-core.o blake2s-glue.o - blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o - sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o - sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o -diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S -index 86345751bbf3a..df40e46601f10 100644 ---- a/arch/arm/crypto/blake2s-core.S -+++ b/arch/arm/crypto/blake2s-core.S -@@ -167,8 +167,8 @@ - .endm - - // --// void blake2s_compress_arch(struct blake2s_state *state, --// const u8 *block, size_t nblocks, u32 inc); -+// void blake2s_compress(struct blake2s_state *state, -+// const u8 *block, size_t nblocks, u32 inc); - // - // Only the first three fields of struct blake2s_state are used: - // u32 h[8]; (inout) -@@ -176,7 +176,7 @@ - // u32 f[2]; (in) - // - .align 5 --ENTRY(blake2s_compress_arch) -+ENTRY(blake2s_compress) - push {r0-r2,r4-r11,lr} // keep this an even number - - .Lnext_block: -@@ -303,4 +303,4 @@ ENTRY(blake2s_compress_arch) - str r3, [r12], #4 - bne 1b - b .Lcopy_block_done --ENDPROC(blake2s_compress_arch) -+ENDPROC(blake2s_compress) -diff --git a/arch/arm/crypto/blake2s-glue.c b/arch/arm/crypto/blake2s-glue.c -index f2cc1e5fc9ec1..0238a70d9581e 100644 ---- a/arch/arm/crypto/blake2s-glue.c -+++ b/arch/arm/crypto/blake2s-glue.c -@@ -1,78 +1,7 @@ - // SPDX-License-Identifier: GPL-2.0-or-later --/* -- * BLAKE2s digest algorithm, ARM scalar implementation -- * -- * Copyright 2020 Google LLC -- */ - - #include --#include -- - #include - - /* defined in blake2s-core.S */ --EXPORT_SYMBOL(blake2s_compress_arch); -- --static int crypto_blake2s_update_arm(struct shash_desc *desc, -- const u8 *in, unsigned int inlen) --{ -- return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch); --} -- --static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out) --{ -- return crypto_blake2s_final(desc, out, blake2s_compress_arch); --} -- --#define BLAKE2S_ALG(name, driver_name, digest_size) \ -- { \ -- .base.cra_name = name, \ -- .base.cra_driver_name = driver_name, \ -- .base.cra_priority = 200, \ -- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ -- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ -- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ -- .base.cra_module = THIS_MODULE, \ -- .digestsize = digest_size, \ -- .setkey = crypto_blake2s_setkey, \ -- .init = crypto_blake2s_init, \ -- .update = crypto_blake2s_update_arm, \ -- .final = crypto_blake2s_final_arm, \ -- .descsize = sizeof(struct blake2s_state), \ -- } -- --static struct shash_alg blake2s_arm_algs[] = { -- BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE), -- BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE), -- BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE), -- BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE), --}; -- --static int __init blake2s_arm_mod_init(void) --{ -- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? -- crypto_register_shashes(blake2s_arm_algs, -- ARRAY_SIZE(blake2s_arm_algs)) : 0; --} -- --static void __exit blake2s_arm_mod_exit(void) --{ -- if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) -- crypto_unregister_shashes(blake2s_arm_algs, -- ARRAY_SIZE(blake2s_arm_algs)); --} -- --module_init(blake2s_arm_mod_init); --module_exit(blake2s_arm_mod_exit); -- --MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation"); --MODULE_LICENSE("GPL"); --MODULE_AUTHOR("Eric Biggers "); --MODULE_ALIAS_CRYPTO("blake2s-128"); --MODULE_ALIAS_CRYPTO("blake2s-128-arm"); --MODULE_ALIAS_CRYPTO("blake2s-160"); --MODULE_ALIAS_CRYPTO("blake2s-160-arm"); --MODULE_ALIAS_CRYPTO("blake2s-224"); --MODULE_ALIAS_CRYPTO("blake2s-224-arm"); --MODULE_ALIAS_CRYPTO("blake2s-256"); --MODULE_ALIAS_CRYPTO("blake2s-256-arm"); -+EXPORT_SYMBOL(blake2s_compress); -diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h -index 413abfb42989e..f82a819eb0dbb 100644 ---- a/arch/arm/include/asm/arch_gicv3.h -+++ b/arch/arm/include/asm/arch_gicv3.h -@@ -48,6 +48,7 @@ static inline u32 read_ ## a64(void) \ - return read_sysreg(a32); \ - } \ - -+CPUIF_MAP(ICC_EOIR1, ICC_EOIR1_EL1) - CPUIF_MAP(ICC_PMR, ICC_PMR_EL1) - CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1) - CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1) -@@ -63,12 +64,6 @@ CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1) - - /* Low-level accessors */ - --static inline void gic_write_eoir(u32 irq) --{ -- write_sysreg(irq, ICC_EOIR1); -- isb(); --} -- - static inline void gic_write_dir(u32 val) - { - write_sysreg(val, ICC_DIR); -diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h -index e2b1fd558bf3c..5d52da168ab3a 100644 ---- a/arch/arm/include/asm/assembler.h -+++ b/arch/arm/include/asm/assembler.h -@@ -107,6 +107,16 @@ - .endm - #endif - -+#if __LINUX_ARM_ARCH__ < 7 -+ .macro dsb, args -+ mcr p15, 0, r0, c7, c10, 4 -+ .endm -+ -+ .macro isb, args -+ mcr p15, 0, r0, c7, c5, 4 -+ .endm -+#endif -+ - .macro asm_trace_hardirqs_off, save=1 - #if defined(CONFIG_TRACE_IRQFLAGS) - .if \save -@@ -259,6 +269,7 @@ - */ - #define ALT_UP(instr...) \ - .pushsection ".alt.smp.init", "a" ;\ -+ .align 2 ;\ - .long 9998b - . ;\ - 9997: instr ;\ - .if . - 9997b == 2 ;\ -@@ -270,6 +281,7 @@ - .popsection - #define ALT_UP_B(label) \ - .pushsection ".alt.smp.init", "a" ;\ -+ .align 2 ;\ - .long 9998b - . ;\ - W(b) . + (label - 9998b) ;\ - .popsection -@@ -314,6 +326,23 @@ - #endif - .endm - -+/* -+ * Raw SMP data memory barrier -+ */ -+ .macro __smp_dmb mode -+#if __LINUX_ARM_ARCH__ >= 7 -+ .ifeqs "\mode","arm" -+ dmb ish -+ .else -+ W(dmb) ish -+ .endif -+#elif __LINUX_ARM_ARCH__ == 6 -+ mcr p15, 0, r0, c7, c10, 5 @ dmb -+#else -+ .error "Incompatible SMP platform" -+#endif -+ .endm -+ - #if defined(CONFIG_CPU_V7M) - /* - * setmode is used to assert to be in svc mode during boot. For v7-M -diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h -index 97a312ba08401..fe385551edeca 100644 ---- a/arch/arm/include/asm/bugs.h -+++ b/arch/arm/include/asm/bugs.h -@@ -1,7 +1,5 @@ - /* SPDX-License-Identifier: GPL-2.0-only */ - /* -- * arch/arm/include/asm/bugs.h -- * - * Copyright (C) 1995-2003 Russell King - */ - #ifndef __ASM_BUGS_H -@@ -10,10 +8,8 @@ - extern void check_writebuffer_bugs(void); - - #ifdef CONFIG_MMU --extern void check_bugs(void); - extern void check_other_bugs(void); - #else --#define check_bugs() do { } while (0) - #define check_other_bugs() do { } while (0) - #endif - -diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h -index a81dda65c5762..45180a2cc47cb 100644 ---- a/arch/arm/include/asm/dma.h -+++ b/arch/arm/include/asm/dma.h -@@ -10,7 +10,7 @@ - #else - #define MAX_DMA_ADDRESS ({ \ - extern phys_addr_t arm_dma_zone_size; \ -- arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \ -+ arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \ - (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) - #endif - -diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h -index a6f3b179e8a94..27218eabbf9a0 100644 ---- a/arch/arm/include/asm/efi.h -+++ b/arch/arm/include/asm/efi.h -@@ -17,7 +17,6 @@ - - #ifdef CONFIG_EFI - void efi_init(void); --extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); - - int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); - int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); -diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S -index dfc6bfa430121..24486dad9e19b 100644 ---- a/arch/arm/include/asm/entry-macro-multi.S -+++ b/arch/arm/include/asm/entry-macro-multi.S -@@ -13,28 +13,4 @@ - @ - badrne lr, 1b - bne asm_do_IRQ -- --#ifdef CONFIG_SMP -- /* -- * XXX -- * -- * this macro assumes that irqstat (r2) and base (r6) are -- * preserved from get_irqnr_and_base above -- */ -- ALT_SMP(test_for_ipi r0, r2, r6, lr) -- ALT_UP_B(9997f) -- movne r1, sp -- badrne lr, 1b -- bne do_IPI --#endif --9997: -- .endm -- -- .macro arch_irq_handler, symbol_name -- .align 5 -- .global \symbol_name --\symbol_name: -- mov r8, lr -- arch_irq_handler_default -- ret r8 - .endm -diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h -index f74944c6fe8d3..79d246ac93ab6 100644 ---- a/arch/arm/include/asm/io.h -+++ b/arch/arm/include/asm/io.h -@@ -436,6 +436,9 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); - #define ARCH_HAS_VALID_PHYS_ADDR_RANGE - extern int valid_phys_addr_range(phys_addr_t addr, size_t size); - extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); -+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, -+ unsigned long flags); -+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap - #endif - - /* -diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h -index 92282558caf7c..2b8970d8e5a2f 100644 ---- a/arch/arm/include/asm/mach/map.h -+++ b/arch/arm/include/asm/mach/map.h -@@ -27,6 +27,7 @@ enum { - MT_HIGH_VECTORS, - MT_MEMORY_RWX, - MT_MEMORY_RW, -+ MT_MEMORY_RO, - MT_ROM, - MT_MEMORY_RWX_NONCACHED, - MT_MEMORY_RW_DTCM, -diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h -index fe87397c3d8c6..bdbc1e590891e 100644 ---- a/arch/arm/include/asm/perf_event.h -+++ b/arch/arm/include/asm/perf_event.h -@@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); - - #define perf_arch_fetch_caller_regs(regs, __ip) { \ - (regs)->ARM_pc = (__ip); \ -- (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \ -+ frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \ - (regs)->ARM_sp = current_stack_pointer; \ - (regs)->ARM_cpsr = SVC_MODE; \ - } -diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h -index d16aba48fa0a4..090011394477f 100644 ---- a/arch/arm/include/asm/pgtable-nommu.h -+++ b/arch/arm/include/asm/pgtable-nommu.h -@@ -44,12 +44,6 @@ - - typedef pte_t *pte_addr_t; - --/* -- * ZERO_PAGE is a global shared page that is always zero: used -- * for zero-mapped memory areas etc.. -- */ --#define ZERO_PAGE(vaddr) (virt_to_page(0)) -- - /* - * Mark the prot value as uncacheable and unbufferable. - */ -diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h -index cd1f84bb40aea..a25c4303fc0e6 100644 ---- a/arch/arm/include/asm/pgtable.h -+++ b/arch/arm/include/asm/pgtable.h -@@ -10,6 +10,15 @@ - #include - #include - -+#ifndef __ASSEMBLY__ -+/* -+ * ZERO_PAGE is a global shared page that is always zero: used -+ * for zero-mapped memory areas etc.. -+ */ -+extern struct page *empty_zero_page; -+#define ZERO_PAGE(vaddr) (empty_zero_page) -+#endif -+ - #ifndef CONFIG_MMU - - #include -@@ -156,13 +165,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - #define __S111 __PAGE_SHARED_EXEC - - #ifndef __ASSEMBLY__ --/* -- * ZERO_PAGE is a global shared page that is always zero: used -- * for zero-mapped memory areas etc.. -- */ --extern struct page *empty_zero_page; --#define ZERO_PAGE(vaddr) (empty_zero_page) -- - - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; - -diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h -index 9e6b972863077..8aeff55aebfaf 100644 ---- a/arch/arm/include/asm/processor.h -+++ b/arch/arm/include/asm/processor.h -@@ -96,6 +96,7 @@ unsigned long get_wchan(struct task_struct *p); - #define __ALT_SMP_ASM(smp, up) \ - "9998: " smp "\n" \ - " .pushsection \".alt.smp.init\", \"a\"\n" \ -+ " .align 2\n" \ - " .long 9998b - .\n" \ - " " up "\n" \ - " .popsection\n" -diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h -index 93051e2f402c8..1408a6a15d0e0 100644 ---- a/arch/arm/include/asm/ptrace.h -+++ b/arch/arm/include/asm/ptrace.h -@@ -163,5 +163,31 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) - ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \ - }) - -+ -+/* -+ * Update ITSTATE after normal execution of an IT block instruction. -+ * -+ * The 8 IT state bits are split into two parts in CPSR: -+ * ITSTATE<1:0> are in CPSR<26:25> -+ * ITSTATE<7:2> are in CPSR<15:10> -+ */ -+static inline unsigned long it_advance(unsigned long cpsr) -+{ -+ if ((cpsr & 0x06000400) == 0) { -+ /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ -+ cpsr &= ~PSR_IT_MASK; -+ } else { -+ /* We need to shift left ITSTATE<4:0> */ -+ const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */ -+ unsigned long it = cpsr & mask; -+ it <<= 1; -+ it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */ -+ it &= mask; -+ cpsr &= ~mask; -+ cpsr |= it; -+ } -+ return cpsr; -+} -+ - #endif /* __ASSEMBLY__ */ - #endif -diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h -index 5d508f5d56c49..fc11ddf13b8fd 100644 ---- a/arch/arm/include/asm/smp.h -+++ b/arch/arm/include/asm/smp.h -@@ -24,11 +24,6 @@ struct seq_file; - */ - extern void show_ipi_list(struct seq_file *, int); - --/* -- * Called from assembly code, this handles an IPI. -- */ --asmlinkage void do_IPI(int ipinr, struct pt_regs *regs); -- - /* - * Called from C code, this handles an IPI. - */ -diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h -new file mode 100644 -index 0000000000000..85f9e538fb325 ---- /dev/null -+++ b/arch/arm/include/asm/spectre.h -@@ -0,0 +1,38 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+ -+#ifndef __ASM_SPECTRE_H -+#define __ASM_SPECTRE_H -+ -+enum { -+ SPECTRE_UNAFFECTED, -+ SPECTRE_MITIGATED, -+ SPECTRE_VULNERABLE, -+}; -+ -+enum { -+ __SPECTRE_V2_METHOD_BPIALL, -+ __SPECTRE_V2_METHOD_ICIALLU, -+ __SPECTRE_V2_METHOD_SMC, -+ __SPECTRE_V2_METHOD_HVC, -+ __SPECTRE_V2_METHOD_LOOP8, -+}; -+ -+enum { -+ SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL), -+ SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU), -+ SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC), -+ SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC), -+ SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8), -+}; -+ -+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES -+void spectre_v2_update_state(unsigned int state, unsigned int methods); -+#else -+static inline void spectre_v2_update_state(unsigned int state, -+ unsigned int methods) -+{} -+#endif -+ -+int spectre_bhb_update_vectors(unsigned int method); -+ -+#endif -diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h -index 6f5d627c44a3c..f46b3c570f92e 100644 ---- a/arch/arm/include/asm/sync_bitops.h -+++ b/arch/arm/include/asm/sync_bitops.h -@@ -14,14 +14,35 @@ - * ops which are SMP safe even on a UP kernel. - */ - -+/* -+ * Unordered -+ */ -+ - #define sync_set_bit(nr, p) _set_bit(nr, p) - #define sync_clear_bit(nr, p) _clear_bit(nr, p) - #define sync_change_bit(nr, p) _change_bit(nr, p) --#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p) --#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p) --#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p) - #define sync_test_bit(nr, addr) test_bit(nr, addr) --#define arch_sync_cmpxchg arch_cmpxchg - -+/* -+ * Fully ordered -+ */ -+ -+int _sync_test_and_set_bit(int nr, volatile unsigned long * p); -+#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p) -+ -+int _sync_test_and_clear_bit(int nr, volatile unsigned long * p); -+#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p) -+ -+int _sync_test_and_change_bit(int nr, volatile unsigned long * p); -+#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p) -+ -+#define arch_sync_cmpxchg(ptr, old, new) \ -+({ \ -+ __typeof__(*(ptr)) __ret; \ -+ __smp_mb__before_atomic(); \ -+ __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \ -+ __smp_mb__after_atomic(); \ -+ __ret; \ -+}) - - #endif -diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h -index 24c19d63ff0a1..95bf70ebd878e 100644 ---- a/arch/arm/include/asm/syscall.h -+++ b/arch/arm/include/asm/syscall.h -@@ -25,6 +25,9 @@ static inline int syscall_get_nr(struct task_struct *task, - if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT)) - return task_thread_info(task)->abi_syscall; - -+ if (task_thread_info(task)->abi_syscall == -1) -+ return -1; -+ - return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK; - } - -diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h -index 9a18da3e10cc3..b682189a2b5df 100644 ---- a/arch/arm/include/asm/thread_info.h -+++ b/arch/arm/include/asm/thread_info.h -@@ -129,15 +129,16 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, - #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ - #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ - #define TIF_UPROBE 3 /* breakpointed or singlestepping */ --#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ --#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ --#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ --#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ --#define TIF_NOTIFY_SIGNAL 8 /* signal notifications exist */ -+#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ - - #define TIF_USING_IWMMXT 17 - #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ --#define TIF_RESTORE_SIGMASK 20 -+#define TIF_RESTORE_SIGMASK 19 -+#define TIF_SYSCALL_TRACE 20 /* syscall trace active */ -+#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */ -+#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */ -+#define TIF_SECCOMP 23 /* seccomp syscall filtering active */ -+ - - #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) - #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) -diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h -index 7c3b3671d6c25..6d1337c169cd3 100644 ---- a/arch/arm/include/asm/timex.h -+++ b/arch/arm/include/asm/timex.h -@@ -11,5 +11,6 @@ - - typedef unsigned long cycles_t; - #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) -+#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback()) - - #endif -diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h -index 36fbc33292526..32dbfd81f42a4 100644 ---- a/arch/arm/include/asm/uaccess.h -+++ b/arch/arm/include/asm/uaccess.h -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -497,7 +498,10 @@ do { \ - } \ - default: __err = __get_user_bad(); break; \ - } \ -- *(type *)(dst) = __val; \ -+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \ -+ put_unaligned(__val, (type *)(dst)); \ -+ else \ -+ *(type *)(dst) = __val; /* aligned by caller */ \ - if (__err) \ - goto err_label; \ - } while (0) -@@ -507,7 +511,9 @@ do { \ - const type *__pk_ptr = (dst); \ - unsigned long __dst = (unsigned long)__pk_ptr; \ - int __err = 0; \ -- type __val = *(type *)src; \ -+ type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ -+ ? get_unaligned((type *)(src)) \ -+ : *(type *)(src); /* aligned by caller */ \ - switch (sizeof(type)) { \ - case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \ - case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \ -diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h -index 4a91428c324db..fad45c884e988 100644 ---- a/arch/arm/include/asm/vmlinux.lds.h -+++ b/arch/arm/include/asm/vmlinux.lds.h -@@ -26,6 +26,19 @@ - #define ARM_MMU_DISCARD(x) x - #endif - -+/* -+ * ld.lld does not support NOCROSSREFS: -+ * https://github.com/ClangBuiltLinux/linux/issues/1609 -+ */ -+#ifdef CONFIG_LD_IS_LLD -+#define NOCROSSREFS -+#endif -+ -+/* Set start/end symbol names to the LMA for the section */ -+#define ARM_LMA(sym, section) \ -+ sym##_start = LOADADDR(section); \ -+ sym##_end = LOADADDR(section) + SIZEOF(section) -+ - #define PROC_INFO \ - . = ALIGN(4); \ - __proc_info_begin = .; \ -@@ -110,19 +123,31 @@ - * only thing that matters is their relative offsets - */ - #define ARM_VECTORS \ -- __vectors_start = .; \ -- .vectors 0xffff0000 : AT(__vectors_start) { \ -- *(.vectors) \ -+ __vectors_lma = .; \ -+ OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \ -+ .vectors { \ -+ *(.vectors) \ -+ } \ -+ .vectors.bhb.loop8 { \ -+ *(.vectors.bhb.loop8) \ -+ } \ -+ .vectors.bhb.bpiall { \ -+ *(.vectors.bhb.bpiall) \ -+ } \ - } \ -- . = __vectors_start + SIZEOF(.vectors); \ -- __vectors_end = .; \ -+ ARM_LMA(__vectors, .vectors); \ -+ ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \ -+ ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \ -+ . = __vectors_lma + SIZEOF(.vectors) + \ -+ SIZEOF(.vectors.bhb.loop8) + \ -+ SIZEOF(.vectors.bhb.bpiall); \ - \ -- __stubs_start = .; \ -- .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \ -+ __stubs_lma = .; \ -+ .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \ - *(.stubs) \ - } \ -- . = __stubs_start + SIZEOF(.stubs); \ -- __stubs_end = .; \ -+ ARM_LMA(__stubs, .stubs); \ -+ . = __stubs_lma + SIZEOF(.stubs); \ - \ - PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors)); - -diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h -index c8eb83d4b8964..3edbb3c5b42bf 100644 ---- a/arch/arm/include/debug/imx-uart.h -+++ b/arch/arm/include/debug/imx-uart.h -@@ -11,13 +11,6 @@ - #define IMX1_UART_BASE_ADDR(n) IMX1_UART##n##_BASE_ADDR - #define IMX1_UART_BASE(n) IMX1_UART_BASE_ADDR(n) - --#define IMX21_UART1_BASE_ADDR 0x1000a000 --#define IMX21_UART2_BASE_ADDR 0x1000b000 --#define IMX21_UART3_BASE_ADDR 0x1000c000 --#define IMX21_UART4_BASE_ADDR 0x1000d000 --#define IMX21_UART_BASE_ADDR(n) IMX21_UART##n##_BASE_ADDR --#define IMX21_UART_BASE(n) IMX21_UART_BASE_ADDR(n) -- - #define IMX25_UART1_BASE_ADDR 0x43f90000 - #define IMX25_UART2_BASE_ADDR 0x43f94000 - #define IMX25_UART3_BASE_ADDR 0x5000c000 -@@ -26,6 +19,13 @@ - #define IMX25_UART_BASE_ADDR(n) IMX25_UART##n##_BASE_ADDR - #define IMX25_UART_BASE(n) IMX25_UART_BASE_ADDR(n) - -+#define IMX27_UART1_BASE_ADDR 0x1000a000 -+#define IMX27_UART2_BASE_ADDR 0x1000b000 -+#define IMX27_UART3_BASE_ADDR 0x1000c000 -+#define IMX27_UART4_BASE_ADDR 0x1000d000 -+#define IMX27_UART_BASE_ADDR(n) IMX27_UART##n##_BASE_ADDR -+#define IMX27_UART_BASE(n) IMX27_UART_BASE_ADDR(n) -+ - #define IMX31_UART1_BASE_ADDR 0x43f90000 - #define IMX31_UART2_BASE_ADDR 0x43f94000 - #define IMX31_UART3_BASE_ADDR 0x5000c000 -@@ -112,10 +112,10 @@ - - #ifdef CONFIG_DEBUG_IMX1_UART - #define UART_PADDR IMX_DEBUG_UART_BASE(IMX1) --#elif defined(CONFIG_DEBUG_IMX21_IMX27_UART) --#define UART_PADDR IMX_DEBUG_UART_BASE(IMX21) - #elif defined(CONFIG_DEBUG_IMX25_UART) - #define UART_PADDR IMX_DEBUG_UART_BASE(IMX25) -+#elif defined(CONFIG_DEBUG_IMX27_UART) -+#define UART_PADDR IMX_DEBUG_UART_BASE(IMX27) - #elif defined(CONFIG_DEBUG_IMX31_UART) - #define UART_PADDR IMX_DEBUG_UART_BASE(IMX31) - #elif defined(CONFIG_DEBUG_IMX35_UART) -diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile -index ae295a3bcfefd..6ef3b535b7bf7 100644 ---- a/arch/arm/kernel/Makefile -+++ b/arch/arm/kernel/Makefile -@@ -106,4 +106,6 @@ endif - - obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o - -+obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o -+ - extra-y := $(head-y) vmlinux.lds -diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c -index 14c8dbbb7d2df..087bce6ec8e9b 100644 ---- a/arch/arm/kernel/bugs.c -+++ b/arch/arm/kernel/bugs.c -@@ -1,5 +1,6 @@ - // SPDX-License-Identifier: GPL-2.0 - #include -+#include - #include - #include - -@@ -11,7 +12,7 @@ void check_other_bugs(void) - #endif - } - --void __init check_bugs(void) -+void __init arch_cpu_finalize_init(void) - { - check_writebuffer_bugs(); - check_other_bugs(); -diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S -index 241b73d64df73..68261a83b7ad8 100644 ---- a/arch/arm/kernel/entry-armv.S -+++ b/arch/arm/kernel/entry-armv.S -@@ -597,11 +597,9 @@ call_fpe: - tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 - reteq lr - and r8, r0, #0x00000f00 @ mask out CP number -- THUMB( lsr r8, r8, #8 ) - mov r7, #1 -- add r6, r10, #TI_USED_CP -- ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] -- THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] -+ add r6, r10, r8, lsr #8 @ add used_cp[] array offset first -+ strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[] - #ifdef CONFIG_IWMMXT - @ Test if we need to give access to iWMMXt coprocessors - ldr r5, [r10, #TI_FLAGS] -@@ -610,7 +608,7 @@ call_fpe: - bcs iwmmxt_task_enable - #endif - ARM( add pc, pc, r8, lsr #6 ) -- THUMB( lsl r8, r8, #2 ) -+ THUMB( lsr r8, r8, #6 ) - THUMB( add pc, r8 ) - nop - -@@ -1002,12 +1000,11 @@ vector_\name: - sub lr, lr, #\correction - .endif - -- @ -- @ Save r0, lr_ (parent PC) and spsr_ -- @ (parent CPSR) -- @ -+ @ Save r0, lr_ (parent PC) - stmia sp, {r0, lr} @ save r0, lr -- mrs lr, spsr -+ -+ @ Save spsr_ (parent CPSR) -+2: mrs lr, spsr - str lr, [sp, #8] @ save spsr - - @ -@@ -1028,6 +1025,44 @@ vector_\name: - movs pc, lr @ branch to handler in SVC mode - ENDPROC(vector_\name) - -+#ifdef CONFIG_HARDEN_BRANCH_HISTORY -+ .subsection 1 -+ .align 5 -+vector_bhb_loop8_\name: -+ .if \correction -+ sub lr, lr, #\correction -+ .endif -+ -+ @ Save r0, lr_ (parent PC) -+ stmia sp, {r0, lr} -+ -+ @ bhb workaround -+ mov r0, #8 -+3: W(b) . + 4 -+ subs r0, r0, #1 -+ bne 3b -+ dsb -+ isb -+ b 2b -+ENDPROC(vector_bhb_loop8_\name) -+ -+vector_bhb_bpiall_\name: -+ .if \correction -+ sub lr, lr, #\correction -+ .endif -+ -+ @ Save r0, lr_ (parent PC) -+ stmia sp, {r0, lr} -+ -+ @ bhb workaround -+ mcr p15, 0, r0, c7, c5, 6 @ BPIALL -+ @ isb not needed due to "movs pc, lr" in the vector stub -+ @ which gives a "context synchronisation". -+ b 2b -+ENDPROC(vector_bhb_bpiall_\name) -+ .previous -+#endif -+ - .align 2 - @ handler addresses follow this label - 1: -@@ -1036,6 +1071,10 @@ ENDPROC(vector_\name) - .section .stubs, "ax", %progbits - @ This must be the first word - .word vector_swi -+#ifdef CONFIG_HARDEN_BRANCH_HISTORY -+ .word vector_bhb_loop8_swi -+ .word vector_bhb_bpiall_swi -+#endif - - vector_rst: - ARM( swi SYS_ERROR0 ) -@@ -1150,8 +1189,10 @@ vector_addrexcptn: - * FIQ "NMI" handler - *----------------------------------------------------------------------------- - * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 -- * systems. -+ * systems. This must be the last vector stub, so lets place it in its own -+ * subsection. - */ -+ .subsection 2 - vector_stub fiq, FIQ_MODE, 4 - - .long __fiq_usr @ 0 (USR_26 / USR_32) -@@ -1184,6 +1225,30 @@ vector_addrexcptn: - W(b) vector_irq - W(b) vector_fiq - -+#ifdef CONFIG_HARDEN_BRANCH_HISTORY -+ .section .vectors.bhb.loop8, "ax", %progbits -+.L__vectors_bhb_loop8_start: -+ W(b) vector_rst -+ W(b) vector_bhb_loop8_und -+ W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004 -+ W(b) vector_bhb_loop8_pabt -+ W(b) vector_bhb_loop8_dabt -+ W(b) vector_addrexcptn -+ W(b) vector_bhb_loop8_irq -+ W(b) vector_bhb_loop8_fiq -+ -+ .section .vectors.bhb.bpiall, "ax", %progbits -+.L__vectors_bhb_bpiall_start: -+ W(b) vector_rst -+ W(b) vector_bhb_bpiall_und -+ W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008 -+ W(b) vector_bhb_bpiall_pabt -+ W(b) vector_bhb_bpiall_dabt -+ W(b) vector_addrexcptn -+ W(b) vector_bhb_bpiall_irq -+ W(b) vector_bhb_bpiall_fiq -+#endif -+ - .data - .align 2 - -diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S -index d9c99db50243f..e7bfdd10bbcd3 100644 ---- a/arch/arm/kernel/entry-common.S -+++ b/arch/arm/kernel/entry-common.S -@@ -101,6 +101,7 @@ slow_work_pending: - cmp r0, #0 - beq no_work_pending - movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) -+ str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update - ldmia sp, {r0 - r6} @ have to reload r0 - r6 - b local_restart @ ... and off we go - ENDPROC(ret_fast_syscall) -@@ -153,6 +154,29 @@ ENDPROC(ret_from_fork) - *----------------------------------------------------------------------------- - */ - -+ .align 5 -+#ifdef CONFIG_HARDEN_BRANCH_HISTORY -+ENTRY(vector_bhb_loop8_swi) -+ sub sp, sp, #PT_REGS_SIZE -+ stmia sp, {r0 - r12} -+ mov r8, #8 -+1: b 2f -+2: subs r8, r8, #1 -+ bne 1b -+ dsb -+ isb -+ b 3f -+ENDPROC(vector_bhb_loop8_swi) -+ -+ .align 5 -+ENTRY(vector_bhb_bpiall_swi) -+ sub sp, sp, #PT_REGS_SIZE -+ stmia sp, {r0 - r12} -+ mcr p15, 0, r8, c7, c5, 6 @ BPIALL -+ isb -+ b 3f -+ENDPROC(vector_bhb_bpiall_swi) -+#endif - .align 5 - ENTRY(vector_swi) - #ifdef CONFIG_CPU_V7M -@@ -160,6 +184,7 @@ ENTRY(vector_swi) - #else - sub sp, sp, #PT_REGS_SIZE - stmia sp, {r0 - r12} @ Calling r0 - r12 -+3: - ARM( add r8, sp, #S_PC ) - ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr - THUMB( mov r8, sp ) -diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S -index a74289ebc8036..5f1b1ce10473a 100644 ---- a/arch/arm/kernel/entry-ftrace.S -+++ b/arch/arm/kernel/entry-ftrace.S -@@ -22,10 +22,7 @@ - * mcount can be thought of as a function called in the middle of a subroutine - * call. As such, it needs to be transparent for both the caller and the - * callee: the original lr needs to be restored when leaving mcount, and no -- * registers should be clobbered. (In the __gnu_mcount_nc implementation, we -- * clobber the ip register. This is OK because the ARM calling convention -- * allows it to be clobbered in subroutines and doesn't use it to hold -- * parameters.) -+ * registers should be clobbered. - * - * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}" - * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c). -@@ -70,26 +67,25 @@ - - .macro __ftrace_regs_caller - -- sub sp, sp, #8 @ space for PC and CPSR OLD_R0, -+ str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0, - @ OLD_R0 will overwrite previous LR - -- add ip, sp, #12 @ move in IP the value of SP as it was -- @ before the push {lr} of the mcount mechanism -+ ldr lr, [sp, #8] @ get previous LR - -- str lr, [sp, #0] @ store LR instead of PC -+ str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR - -- ldr lr, [sp, #8] @ get previous LR -+ str lr, [sp, #-4]! @ store previous LR as LR - -- str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR -+ add lr, sp, #16 @ move in LR the value of SP as it was -+ @ before the push {lr} of the mcount mechanism - -- stmdb sp!, {ip, lr} -- stmdb sp!, {r0-r11, lr} -+ push {r0-r11, ip, lr} - - @ stack content at this point: - @ 0 4 48 52 56 60 64 68 72 -- @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 | -+ @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 | - -- mov r3, sp @ struct pt_regs* -+ mov r3, sp @ struct pt_regs* - - ldr r2, =function_trace_op - ldr r2, [r2] @ pointer to the current -@@ -112,11 +108,9 @@ ftrace_graph_regs_call: - #endif - - @ pop saved regs -- ldmia sp!, {r0-r12} @ restore r0 through r12 -- ldr ip, [sp, #8] @ restore PC -- ldr lr, [sp, #4] @ restore LR -- ldr sp, [sp, #0] @ restore SP -- mov pc, ip @ return -+ pop {r0-r11, ip, lr} @ restore r0 through r12 -+ ldr lr, [sp], #4 @ restore LR -+ ldr pc, [sp], #12 - .endm - - #ifdef CONFIG_FUNCTION_GRAPH_TRACER -@@ -132,11 +126,9 @@ ftrace_graph_regs_call: - bl prepare_ftrace_return - - @ pop registers saved in ftrace_regs_caller -- ldmia sp!, {r0-r12} @ restore r0 through r12 -- ldr ip, [sp, #8] @ restore PC -- ldr lr, [sp, #4] @ restore LR -- ldr sp, [sp, #0] @ restore SP -- mov pc, ip @ return -+ pop {r0-r11, ip, lr} @ restore r0 through r12 -+ ldr lr, [sp], #4 @ restore LR -+ ldr pc, [sp], #12 - - .endm - #endif -@@ -202,16 +194,17 @@ ftrace_graph_call\suffix: - .endm - - .macro mcount_exit -- ldmia sp!, {r0-r3, ip, lr} -- ret ip -+ ldmia sp!, {r0-r3} -+ ldr lr, [sp, #4] -+ ldr pc, [sp], #8 - .endm - - ENTRY(__gnu_mcount_nc) - UNWIND(.fnstart) - #ifdef CONFIG_DYNAMIC_FTRACE -- mov ip, lr -- ldmia sp!, {lr} -- ret ip -+ push {lr} -+ ldr lr, [sp, #4] -+ ldr pc, [sp], #8 - #else - __mcount - #endif -diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S -index 0fc814bbc34b1..8796a69c78e00 100644 ---- a/arch/arm/kernel/head-nommu.S -+++ b/arch/arm/kernel/head-nommu.S -@@ -114,6 +114,7 @@ ENTRY(secondary_startup) - add r12, r12, r10 - ret r12 - 1: bl __after_proc_init -+ ldr r7, __secondary_data @ reload r7 - ldr sp, [r7, #12] @ set up the stack pointer - mov fp, #0 - b secondary_start_kernel -diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c -index 7bd30c0a4280d..22f937e6f3ffb 100644 ---- a/arch/arm/kernel/kgdb.c -+++ b/arch/arm/kernel/kgdb.c -@@ -154,22 +154,38 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) - return 0; - } - --static struct undef_hook kgdb_brkpt_hook = { -+static struct undef_hook kgdb_brkpt_arm_hook = { - .instr_mask = 0xffffffff, - .instr_val = KGDB_BREAKINST, -- .cpsr_mask = MODE_MASK, -+ .cpsr_mask = PSR_T_BIT | MODE_MASK, - .cpsr_val = SVC_MODE, - .fn = kgdb_brk_fn - }; - --static struct undef_hook kgdb_compiled_brkpt_hook = { -+static struct undef_hook kgdb_brkpt_thumb_hook = { -+ .instr_mask = 0xffff, -+ .instr_val = KGDB_BREAKINST & 0xffff, -+ .cpsr_mask = PSR_T_BIT | MODE_MASK, -+ .cpsr_val = PSR_T_BIT | SVC_MODE, -+ .fn = kgdb_brk_fn -+}; -+ -+static struct undef_hook kgdb_compiled_brkpt_arm_hook = { - .instr_mask = 0xffffffff, - .instr_val = KGDB_COMPILED_BREAK, -- .cpsr_mask = MODE_MASK, -+ .cpsr_mask = PSR_T_BIT | MODE_MASK, - .cpsr_val = SVC_MODE, - .fn = kgdb_compiled_brk_fn - }; - -+static struct undef_hook kgdb_compiled_brkpt_thumb_hook = { -+ .instr_mask = 0xffff, -+ .instr_val = KGDB_COMPILED_BREAK & 0xffff, -+ .cpsr_mask = PSR_T_BIT | MODE_MASK, -+ .cpsr_val = PSR_T_BIT | SVC_MODE, -+ .fn = kgdb_compiled_brk_fn -+}; -+ - static int __kgdb_notify(struct die_args *args, unsigned long cmd) - { - struct pt_regs *regs = args->regs; -@@ -210,8 +226,10 @@ int kgdb_arch_init(void) - if (ret != 0) - return ret; - -- register_undef_hook(&kgdb_brkpt_hook); -- register_undef_hook(&kgdb_compiled_brkpt_hook); -+ register_undef_hook(&kgdb_brkpt_arm_hook); -+ register_undef_hook(&kgdb_brkpt_thumb_hook); -+ register_undef_hook(&kgdb_compiled_brkpt_arm_hook); -+ register_undef_hook(&kgdb_compiled_brkpt_thumb_hook); - - return 0; - } -@@ -224,8 +242,10 @@ int kgdb_arch_init(void) - */ - void kgdb_arch_exit(void) - { -- unregister_undef_hook(&kgdb_brkpt_hook); -- unregister_undef_hook(&kgdb_compiled_brkpt_hook); -+ unregister_undef_hook(&kgdb_brkpt_arm_hook); -+ unregister_undef_hook(&kgdb_brkpt_thumb_hook); -+ unregister_undef_hook(&kgdb_compiled_brkpt_arm_hook); -+ unregister_undef_hook(&kgdb_compiled_brkpt_thumb_hook); - unregister_die_notifier(&kgdb_notifier); - } - -diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c -index 1fc309b41f944..8d809724cde52 100644 ---- a/arch/arm/kernel/module-plts.c -+++ b/arch/arm/kernel/module-plts.c -@@ -256,7 +256,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, - /* sort by type and symbol index */ - sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL); - -- if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) -+ if (!module_init_layout_section(secstrings + dstsec->sh_name)) - core_plts += count_plts(syms, dstsec->sh_addr, rels, - numrels, s->sh_info); - else -diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c -index 3b69a76d341e7..1626dfc6f6ce6 100644 ---- a/arch/arm/kernel/perf_callchain.c -+++ b/arch/arm/kernel/perf_callchain.c -@@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail, - void - perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - struct frame_tail __user *tail; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ if (guest_cbs && guest_cbs->is_in_guest()) { - /* We don't support guest os callchain now */ - return; - } -@@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr, - void - perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - struct stackframe fr; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ if (guest_cbs && guest_cbs->is_in_guest()) { - /* We don't support guest os callchain now */ - return; - } -@@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re - - unsigned long perf_instruction_pointer(struct pt_regs *regs) - { -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) -- return perf_guest_cbs->get_guest_ip(); -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); -+ -+ if (guest_cbs && guest_cbs->is_in_guest()) -+ return guest_cbs->get_guest_ip(); - - return instruction_pointer(regs); - } - - unsigned long perf_misc_flags(struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - int misc = 0; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -- if (perf_guest_cbs->is_user_mode()) -+ if (guest_cbs && guest_cbs->is_in_guest()) { -+ if (guest_cbs->is_user_mode()) - misc |= PERF_RECORD_MISC_GUEST_USER; - else - misc |= PERF_RECORD_MISC_GUEST_KERNEL; -diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c -index 43b963ea4a0e2..71c98ca3a455a 100644 ---- a/arch/arm/kernel/ptrace.c -+++ b/arch/arm/kernel/ptrace.c -@@ -786,8 +786,9 @@ long arch_ptrace(struct task_struct *child, long request, - break; - - case PTRACE_SET_SYSCALL: -- task_thread_info(child)->abi_syscall = data & -- __NR_SYSCALL_MASK; -+ if (data != -1) -+ data &= __NR_SYSCALL_MASK; -+ task_thread_info(child)->abi_syscall = data; - ret = 0; - break; - -diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c -index a41e27ace391f..539897ac28284 100644 ---- a/arch/arm/kernel/signal.c -+++ b/arch/arm/kernel/signal.c -@@ -708,6 +708,7 @@ static_assert(offsetof(siginfo_t, si_upper) == 0x18); - static_assert(offsetof(siginfo_t, si_pkey) == 0x14); - static_assert(offsetof(siginfo_t, si_perf_data) == 0x10); - static_assert(offsetof(siginfo_t, si_perf_type) == 0x14); -+static_assert(offsetof(siginfo_t, si_perf_flags) == 0x18); - static_assert(offsetof(siginfo_t, si_band) == 0x0c); - static_assert(offsetof(siginfo_t, si_fd) == 0x10); - static_assert(offsetof(siginfo_t, si_call_addr) == 0x0c); -diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c -index 842427ff2b3cb..23d369ab7e037 100644 ---- a/arch/arm/kernel/smp.c -+++ b/arch/arm/kernel/smp.c -@@ -622,11 +622,6 @@ static void ipi_complete(unsigned int cpu) - /* - * Main handler for inter-processor interrupts - */ --asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) --{ -- handle_IPI(ipinr, regs); --} -- - static void do_handle_IPI(int ipinr) - { - unsigned int cpu = smp_processor_id(); -diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c -new file mode 100644 -index 0000000000000..0dcefc36fb7a0 ---- /dev/null -+++ b/arch/arm/kernel/spectre.c -@@ -0,0 +1,71 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+#include -+#include -+#include -+ -+#include -+ -+static bool _unprivileged_ebpf_enabled(void) -+{ -+#ifdef CONFIG_BPF_SYSCALL -+ return !sysctl_unprivileged_bpf_disabled; -+#else -+ return false; -+#endif -+} -+ -+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, -+ char *buf) -+{ -+ return sprintf(buf, "Mitigation: __user pointer sanitization\n"); -+} -+ -+static unsigned int spectre_v2_state; -+static unsigned int spectre_v2_methods; -+ -+void spectre_v2_update_state(unsigned int state, unsigned int method) -+{ -+ if (state > spectre_v2_state) -+ spectre_v2_state = state; -+ spectre_v2_methods |= method; -+} -+ -+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, -+ char *buf) -+{ -+ const char *method; -+ -+ if (spectre_v2_state == SPECTRE_UNAFFECTED) -+ return sprintf(buf, "%s\n", "Not affected"); -+ -+ if (spectre_v2_state != SPECTRE_MITIGATED) -+ return sprintf(buf, "%s\n", "Vulnerable"); -+ -+ if (_unprivileged_ebpf_enabled()) -+ return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); -+ -+ switch (spectre_v2_methods) { -+ case SPECTRE_V2_METHOD_BPIALL: -+ method = "Branch predictor hardening"; -+ break; -+ -+ case SPECTRE_V2_METHOD_ICIALLU: -+ method = "I-cache invalidation"; -+ break; -+ -+ case SPECTRE_V2_METHOD_SMC: -+ case SPECTRE_V2_METHOD_HVC: -+ method = "Firmware call"; -+ break; -+ -+ case SPECTRE_V2_METHOD_LOOP8: -+ method = "History overwrite"; -+ break; -+ -+ default: -+ method = "Multiple mitigations"; -+ break; -+ } -+ -+ return sprintf(buf, "Mitigation: %s\n", method); -+} -diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c -index 76ea4178a55cb..8247749998259 100644 ---- a/arch/arm/kernel/stacktrace.c -+++ b/arch/arm/kernel/stacktrace.c -@@ -53,18 +53,17 @@ int notrace unwind_frame(struct stackframe *frame) - return -EINVAL; - - frame->sp = frame->fp; -- frame->fp = *(unsigned long *)(fp); -- frame->pc = frame->lr; -- frame->lr = *(unsigned long *)(fp + 4); -+ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); -+ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4)); - #else - /* check current frame pointer is within bounds */ - if (fp < low + 12 || fp > high - 4) - return -EINVAL; - - /* restore the registers from the stack frame */ -- frame->fp = *(unsigned long *)(fp - 12); -- frame->sp = *(unsigned long *)(fp - 8); -- frame->pc = *(unsigned long *)(fp - 4); -+ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12)); -+ frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8)); -+ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4)); - #endif - - return 0; -diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c -index 6166ba38bf994..b74bfcf94fb1a 100644 ---- a/arch/arm/kernel/swp_emulate.c -+++ b/arch/arm/kernel/swp_emulate.c -@@ -195,7 +195,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr) - destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); - - /* Check access in reasonable access range for both SWP and SWPB */ -- if (!access_ok((address & ~3), 4)) { -+ if (!access_ok((void __user *)(address & ~3), 4)) { - pr_debug("SWP{B} emulation: access to %p not allowed!\n", - (void *)address); - res = -EFAULT; -diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c -index 195dff58bafc7..91e757bb054e6 100644 ---- a/arch/arm/kernel/traps.c -+++ b/arch/arm/kernel/traps.c -@@ -30,6 +30,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -333,7 +334,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) - if (panic_on_oops) - panic("Fatal exception"); - if (signr) -- do_exit(signr); -+ make_task_dead(signr); - } - - /* -@@ -574,7 +575,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) - if (end < start || flags) - return -EINVAL; - -- if (!access_ok(start, end - start)) -+ if (!access_ok((void __user *)start, end - start)) - return -EFAULT; - - return __do_cache_op(start, end); -@@ -787,10 +788,59 @@ static inline void __init kuser_init(void *vectors) - } - #endif - -+#ifndef CONFIG_CPU_V7M -+static void copy_from_lma(void *vma, void *lma_start, void *lma_end) -+{ -+ memcpy(vma, lma_start, lma_end - lma_start); -+} -+ -+static void flush_vectors(void *vma, size_t offset, size_t size) -+{ -+ unsigned long start = (unsigned long)vma + offset; -+ unsigned long end = start + size; -+ -+ flush_icache_range(start, end); -+} -+ -+#ifdef CONFIG_HARDEN_BRANCH_HISTORY -+int spectre_bhb_update_vectors(unsigned int method) -+{ -+ extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[]; -+ extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[]; -+ void *vec_start, *vec_end; -+ -+ if (system_state > SYSTEM_SCHEDULING) { -+ pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n", -+ smp_processor_id()); -+ return SPECTRE_VULNERABLE; -+ } -+ -+ switch (method) { -+ case SPECTRE_V2_METHOD_LOOP8: -+ vec_start = __vectors_bhb_loop8_start; -+ vec_end = __vectors_bhb_loop8_end; -+ break; -+ -+ case SPECTRE_V2_METHOD_BPIALL: -+ vec_start = __vectors_bhb_bpiall_start; -+ vec_end = __vectors_bhb_bpiall_end; -+ break; -+ -+ default: -+ pr_err("CPU%u: unknown Spectre BHB state %d\n", -+ smp_processor_id(), method); -+ return SPECTRE_VULNERABLE; -+ } -+ -+ copy_from_lma(vectors_page, vec_start, vec_end); -+ flush_vectors(vectors_page, 0, vec_end - vec_start); -+ -+ return SPECTRE_MITIGATED; -+} -+#endif -+ - void __init early_trap_init(void *vectors_base) - { --#ifndef CONFIG_CPU_V7M -- unsigned long vectors = (unsigned long)vectors_base; - extern char __stubs_start[], __stubs_end[]; - extern char __vectors_start[], __vectors_end[]; - unsigned i; -@@ -811,17 +861,20 @@ void __init early_trap_init(void *vectors_base) - * into the vector page, mapped at 0xffff0000, and ensure these - * are visible to the instruction stream. - */ -- memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); -- memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); -+ copy_from_lma(vectors_base, __vectors_start, __vectors_end); -+ copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end); - - kuser_init(vectors_base); - -- flush_icache_range(vectors, vectors + PAGE_SIZE * 2); -+ flush_vectors(vectors_base, 0, PAGE_SIZE * 2); -+} - #else /* ifndef CONFIG_CPU_V7M */ -+void __init early_trap_init(void *vectors_base) -+{ - /* - * on V7-M there is no need to copy the vector table to a dedicated - * memory area. The address is configurable and so a table in the kernel - * image can be used. - */ --#endif - } -+#endif -diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c -index 59fdf257bf8be..d91ed8e4310c2 100644 ---- a/arch/arm/kernel/unwind.c -+++ b/arch/arm/kernel/unwind.c -@@ -301,6 +301,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl, - return URC_OK; - } - -+static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl) -+{ -+ unsigned long bytes = 0; -+ unsigned long insn; -+ unsigned long result = 0; -+ -+ /* -+ * unwind_get_byte() will advance `ctrl` one instruction at a time, so -+ * loop until we get an instruction byte where bit 7 is not set. -+ * -+ * Note: This decodes a maximum of 4 bytes to output 28 bits data where -+ * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence -+ * it is sufficient for unwinding the stack. -+ */ -+ do { -+ insn = unwind_get_byte(ctrl); -+ result |= (insn & 0x7f) << (bytes * 7); -+ bytes++; -+ } while (!!(insn & 0x80) && (bytes != sizeof(result))); -+ -+ return result; -+} -+ - /* - * Execute the current unwind instruction. - */ -@@ -354,7 +377,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) - if (ret) - goto error; - } else if (insn == 0xb2) { -- unsigned long uleb128 = unwind_get_byte(ctrl); -+ unsigned long uleb128 = unwind_decode_uleb128(ctrl); - - ctrl->vrs[SP] += 0x204 + (uleb128 << 2); - } else { -diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h -index 95bd359912889..f069d1b2318e6 100644 ---- a/arch/arm/lib/bitops.h -+++ b/arch/arm/lib/bitops.h -@@ -28,7 +28,7 @@ UNWIND( .fnend ) - ENDPROC(\name ) - .endm - -- .macro testop, name, instr, store -+ .macro __testop, name, instr, store, barrier - ENTRY( \name ) - UNWIND( .fnstart ) - ands ip, r1, #3 -@@ -38,7 +38,7 @@ UNWIND( .fnstart ) - mov r0, r0, lsr #5 - add r1, r1, r0, lsl #2 @ Get word offset - mov r3, r2, lsl r3 @ create mask -- smp_dmb -+ \barrier - #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) - .arch_extension mp - ALT_SMP(W(pldw) [r1]) -@@ -50,13 +50,21 @@ UNWIND( .fnstart ) - strex ip, r2, [r1] - cmp ip, #0 - bne 1b -- smp_dmb -+ \barrier - cmp r0, #0 - movne r0, #1 - 2: bx lr - UNWIND( .fnend ) - ENDPROC(\name ) - .endm -+ -+ .macro testop, name, instr, store -+ __testop \name, \instr, \store, smp_dmb -+ .endm -+ -+ .macro sync_testop, name, instr, store -+ __testop \name, \instr, \store, __smp_dmb -+ .endm - #else - .macro bitop, name, instr - ENTRY( \name ) -diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S -index b5e8b9ae4c7d4..7fd3600db8efd 100644 ---- a/arch/arm/lib/findbit.S -+++ b/arch/arm/lib/findbit.S -@@ -40,8 +40,8 @@ ENDPROC(_find_first_zero_bit_le) - * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) - */ - ENTRY(_find_next_zero_bit_le) -- teq r1, #0 -- beq 3b -+ cmp r2, r1 -+ bhs 3b - ands ip, r2, #7 - beq 1b @ If new byte, goto old routine - ARM( ldrb r3, [r0, r2, lsr #3] ) -@@ -81,8 +81,8 @@ ENDPROC(_find_first_bit_le) - * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) - */ - ENTRY(_find_next_bit_le) -- teq r1, #0 -- beq 3b -+ cmp r2, r1 -+ bhs 3b - ands ip, r2, #7 - beq 1b @ If new byte, goto old routine - ARM( ldrb r3, [r0, r2, lsr #3] ) -@@ -115,8 +115,8 @@ ENTRY(_find_first_zero_bit_be) - ENDPROC(_find_first_zero_bit_be) - - ENTRY(_find_next_zero_bit_be) -- teq r1, #0 -- beq 3b -+ cmp r2, r1 -+ bhs 3b - ands ip, r2, #7 - beq 1b @ If new byte, goto old routine - eor r3, r2, #0x18 @ big endian byte ordering -@@ -149,8 +149,8 @@ ENTRY(_find_first_bit_be) - ENDPROC(_find_first_bit_be) - - ENTRY(_find_next_bit_be) -- teq r1, #0 -- beq 3b -+ cmp r2, r1 -+ bhs 3b - ands ip, r2, #7 - beq 1b @ If new byte, goto old routine - eor r3, r2, #0x18 @ big endian byte ordering -diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S -index 4ebecc67e6e04..f13fe9bc2399a 100644 ---- a/arch/arm/lib/testchangebit.S -+++ b/arch/arm/lib/testchangebit.S -@@ -10,3 +10,7 @@ - .text - - testop _test_and_change_bit, eor, str -+ -+#if __LINUX_ARM_ARCH__ >= 6 -+sync_testop _sync_test_and_change_bit, eor, str -+#endif -diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S -index 009afa0f5b4a7..4d2c5ca620ebf 100644 ---- a/arch/arm/lib/testclearbit.S -+++ b/arch/arm/lib/testclearbit.S -@@ -10,3 +10,7 @@ - .text - - testop _test_and_clear_bit, bicne, strne -+ -+#if __LINUX_ARM_ARCH__ >= 6 -+sync_testop _sync_test_and_clear_bit, bicne, strne -+#endif -diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S -index f3192e55acc87..649dbab65d8d0 100644 ---- a/arch/arm/lib/testsetbit.S -+++ b/arch/arm/lib/testsetbit.S -@@ -10,3 +10,7 @@ - .text - - testop _test_and_set_bit, orreq, streq -+ -+#if __LINUX_ARM_ARCH__ >= 6 -+sync_testop _sync_test_and_set_bit, orreq, streq -+#endif -diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c -index 106f83a5ea6d2..35e03f6a62127 100644 ---- a/arch/arm/lib/uaccess_with_memcpy.c -+++ b/arch/arm/lib/uaccess_with_memcpy.c -@@ -121,7 +121,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) - tocopy = n; - - ua_flags = uaccess_save_and_enable(); -- memcpy((void *)to, from, tocopy); -+ __memcpy((void *)to, from, tocopy); - uaccess_restore(ua_flags); - to += tocopy; - from += tocopy; -@@ -188,7 +188,7 @@ __clear_user_memset(void __user *addr, unsigned long n) - tocopy = n; - - ua_flags = uaccess_save_and_enable(); -- memset((void *)addr, 0, tocopy); -+ __memset((void *)addr, 0, tocopy); - uaccess_restore(ua_flags); - addr += tocopy; - n -= tocopy; -diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c -index b99dd8e1c93f1..7ba6cf8261626 100644 ---- a/arch/arm/lib/xor-neon.c -+++ b/arch/arm/lib/xor-neon.c -@@ -26,8 +26,9 @@ MODULE_LICENSE("GPL"); - * While older versions of GCC do not generate incorrect code, they fail to - * recognize the parallel nature of these functions, and emit plain ARM code, - * which is known to be slower than the optimized ARM code in asm-arm/xor.h. -+ * -+ * #warning This code requires at least version 4.6 of GCC - */ --#warning This code requires at least version 4.6 of GCC - #endif - - #pragma GCC diagnostic ignored "-Wunused-variable" -diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c -index 8711d6824c1fa..c8cc993ca8ca1 100644 ---- a/arch/arm/mach-at91/pm.c -+++ b/arch/arm/mach-at91/pm.c -@@ -146,7 +146,7 @@ static const struct wakeup_source_info ws_info[] = { - - static const struct of_device_id sama5d2_ws_ids[] = { - { .compatible = "atmel,sama5d2-gem", .data = &ws_info[0] }, -- { .compatible = "atmel,at91rm9200-rtc", .data = &ws_info[1] }, -+ { .compatible = "atmel,sama5d2-rtc", .data = &ws_info[1] }, - { .compatible = "atmel,sama5d3-udc", .data = &ws_info[2] }, - { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] }, - { .compatible = "usb-ohci", .data = &ws_info[2] }, -@@ -157,24 +157,24 @@ static const struct of_device_id sama5d2_ws_ids[] = { - }; - - static const struct of_device_id sam9x60_ws_ids[] = { -- { .compatible = "atmel,at91sam9x5-rtc", .data = &ws_info[1] }, -+ { .compatible = "microchip,sam9x60-rtc", .data = &ws_info[1] }, - { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] }, - { .compatible = "usb-ohci", .data = &ws_info[2] }, - { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] }, - { .compatible = "usb-ehci", .data = &ws_info[2] }, -- { .compatible = "atmel,at91sam9260-rtt", .data = &ws_info[4] }, -+ { .compatible = "microchip,sam9x60-rtt", .data = &ws_info[4] }, - { .compatible = "cdns,sam9x60-macb", .data = &ws_info[5] }, - { /* sentinel */ } - }; - - static const struct of_device_id sama7g5_ws_ids[] = { -- { .compatible = "atmel,at91sam9x5-rtc", .data = &ws_info[1] }, -+ { .compatible = "microchip,sama7g5-rtc", .data = &ws_info[1] }, - { .compatible = "microchip,sama7g5-ohci", .data = &ws_info[2] }, - { .compatible = "usb-ohci", .data = &ws_info[2] }, - { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] }, - { .compatible = "usb-ehci", .data = &ws_info[2] }, - { .compatible = "microchip,sama7g5-sdhci", .data = &ws_info[3] }, -- { .compatible = "atmel,at91sam9260-rtt", .data = &ws_info[4] }, -+ { .compatible = "microchip,sama7g5-rtt", .data = &ws_info[4] }, - { /* sentinel */ } - }; - -@@ -350,9 +350,41 @@ extern u32 at91_pm_suspend_in_sram_sz; - - static int at91_suspend_finish(unsigned long val) - { -+ unsigned char modified_gray_code[] = { -+ 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d, -+ 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b, -+ 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13, -+ 0x10, 0x11, -+ }; -+ unsigned int tmp, index; - int i; - - if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) { -+ /* -+ * Bootloader will perform DDR recalibration and will try to -+ * restore the ZQ0SR0 with the value saved here. But the -+ * calibration is buggy and restoring some values from ZQ0SR0 -+ * is forbidden and risky thus we need to provide processed -+ * values for these (modified gray code values). -+ */ -+ tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0); -+ -+ /* Store pull-down output impedance select. */ -+ index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f; -+ soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index]; -+ -+ /* Store pull-up output impedance select. */ -+ index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f; -+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; -+ -+ /* Store pull-down on-die termination impedance select. */ -+ index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f; -+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; -+ -+ /* Store pull-up on-die termination impedance select. */ -+ index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f; -+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; -+ - /* - * The 1st 8 words of memory might get corrupted in the process - * of DDR PHY recalibration; it is saved here in securam and it -@@ -841,10 +873,6 @@ static int __init at91_pm_backup_init(void) - of_scan_flat_dt(at91_pm_backup_scan_memcs, &located); - if (!located) - goto securam_fail; -- -- /* DDR3PHY_ZQ0SR0 */ -- soc_pm.bu->ddr_phy_calibration[0] = readl(soc_pm.data.ramc_phy + -- 0x188); - } - - return 0; -diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S -index fdb4f63ecde4b..2f0a370a13096 100644 ---- a/arch/arm/mach-at91/pm_suspend.S -+++ b/arch/arm/mach-at91/pm_suspend.S -@@ -169,12 +169,23 @@ sr_ena_2: - cmp tmp1, #UDDRC_STAT_SELFREF_TYPE_SW - bne sr_ena_2 - -- /* Put DDR PHY's DLL in bypass mode for non-backup modes. */ -+ /* Disable DX DLLs for non-backup modes. */ - cmp r7, #AT91_PM_BACKUP - beq sr_ena_3 -- ldr tmp1, [r3, #DDR3PHY_PIR] -- orr tmp1, tmp1, #DDR3PHY_PIR_DLLBYP -- str tmp1, [r3, #DDR3PHY_PIR] -+ -+ /* Do not soft reset the AC DLL. */ -+ ldr tmp1, [r3, DDR3PHY_ACDLLCR] -+ bic tmp1, tmp1, DDR3PHY_ACDLLCR_DLLSRST -+ str tmp1, [r3, DDR3PHY_ACDLLCR] -+ -+ /* Disable DX DLLs. */ -+ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR] -+ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS -+ str tmp1, [r3, #DDR3PHY_DX0DLLCR] -+ -+ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR] -+ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS -+ str tmp1, [r3, #DDR3PHY_DX1DLLCR] - - sr_ena_3: - /* Power down DDR PHY data receivers. */ -@@ -221,10 +232,14 @@ sr_ena_3: - bic tmp1, tmp1, #DDR3PHY_DSGCR_ODTPDD_ODT0 - str tmp1, [r3, #DDR3PHY_DSGCR] - -- /* Take DDR PHY's DLL out of bypass mode. */ -- ldr tmp1, [r3, #DDR3PHY_PIR] -- bic tmp1, tmp1, #DDR3PHY_PIR_DLLBYP -- str tmp1, [r3, #DDR3PHY_PIR] -+ /* Enable DX DLLs. */ -+ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR] -+ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS -+ str tmp1, [r3, #DDR3PHY_DX0DLLCR] -+ -+ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR] -+ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS -+ str tmp1, [r3, #DDR3PHY_DX1DLLCR] - - /* Enable quasi-dynamic programming. */ - mov tmp1, #0 -diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c -index 512943eae30a5..2e203626eda52 100644 ---- a/arch/arm/mach-axxia/platsmp.c -+++ b/arch/arm/mach-axxia/platsmp.c -@@ -39,6 +39,7 @@ static int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle) - return -ENOENT; - - syscon = of_iomap(syscon_np, 0); -+ of_node_put(syscon_np); - if (!syscon) - return -ENOMEM; - -diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c -index 43829e49ad93f..347bfb7f03e2c 100644 ---- a/arch/arm/mach-bcm/bcm_kona_smc.c -+++ b/arch/arm/mach-bcm/bcm_kona_smc.c -@@ -52,6 +52,7 @@ int __init bcm_kona_smc_init(void) - return -ENODEV; - - prop_val = of_get_address(node, 0, &prop_size, NULL); -+ of_node_put(node); - if (!prop_val) - return -EINVAL; - -diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c -index e4f4b20b83a2d..3fc4ec830e3a3 100644 ---- a/arch/arm/mach-cns3xxx/core.c -+++ b/arch/arm/mach-cns3xxx/core.c -@@ -372,6 +372,7 @@ static void __init cns3xxx_init(void) - /* De-Asscer SATA Reset */ - cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SATA)); - } -+ of_node_put(dn); - - dn = of_find_compatible_node(NULL, NULL, "cavium,cns3420-sdhci"); - if (of_device_is_available(dn)) { -@@ -385,6 +386,7 @@ static void __init cns3xxx_init(void) - cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SDIO)); - cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SDIO)); - } -+ of_node_put(dn); - - pm_power_off = cns3xxx_power_off; - -diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c -index 428012687a802..7f7f6bae21c2d 100644 ---- a/arch/arm/mach-davinci/board-da850-evm.c -+++ b/arch/arm/mach-davinci/board-da850-evm.c -@@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void) - int ret; - u32 val; - struct davinci_soc_info *soc_info = &davinci_soc_info; -- u8 rmii_en = soc_info->emac_pdata->rmii_en; -+ u8 rmii_en; - - if (!machine_is_davinci_da850_evm()) - return 0; - -+ rmii_en = soc_info->emac_pdata->rmii_en; -+ - cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG); - - val = __raw_readl(cfg_chip3_base); -diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c -index dd4b164d18317..a9efa7bc2fa12 100644 ---- a/arch/arm/mach-ep93xx/timer-ep93xx.c -+++ b/arch/arm/mach-ep93xx/timer-ep93xx.c -@@ -9,6 +9,7 @@ - #include - #include - #include "soc.h" -+#include "platform.h" - - /************************************************************************* - * Timer handling for EP93xx -@@ -60,7 +61,7 @@ static u64 notrace ep93xx_read_sched_clock(void) - return ret; - } - --u64 ep93xx_clocksource_read(struct clocksource *c) -+static u64 ep93xx_clocksource_read(struct clocksource *c) - { - u64 ret; - -diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig -index 5a48abac6af49..4b554cc8fa58a 100644 ---- a/arch/arm/mach-exynos/Kconfig -+++ b/arch/arm/mach-exynos/Kconfig -@@ -18,7 +18,6 @@ menuconfig ARCH_EXYNOS - select EXYNOS_PMU - select EXYNOS_SROM - select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS -- select GPIOLIB - select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5 - select HAVE_ARM_SCU if SMP - select HAVE_S3C2410_I2C if I2C -diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c -index 8b48326be9fd5..51a247ca4da8c 100644 ---- a/arch/arm/mach-exynos/exynos.c -+++ b/arch/arm/mach-exynos/exynos.c -@@ -149,6 +149,7 @@ static void exynos_map_pmu(void) - np = of_find_matching_node(NULL, exynos_dt_pmu_match); - if (np) - pmu_base_addr = of_iomap(np, 0); -+ of_node_put(np); - } - - static void __init exynos_init_irq(void) -diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c -index a56cc64deeb8f..9ce93e0b6cdc3 100644 ---- a/arch/arm/mach-hisi/platsmp.c -+++ b/arch/arm/mach-hisi/platsmp.c -@@ -67,14 +67,17 @@ static void __init hi3xxx_smp_prepare_cpus(unsigned int max_cpus) - } - ctrl_base = of_iomap(np, 0); - if (!ctrl_base) { -+ of_node_put(np); - pr_err("failed to map address\n"); - return; - } - if (of_property_read_u32(np, "smp-offset", &offset) < 0) { -+ of_node_put(np); - pr_err("failed to find smp-offset property\n"); - return; - } - ctrl_base += offset; -+ of_node_put(np); - } - } - -@@ -160,6 +163,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle) - if (WARN_ON(!node)) - return -1; - ctrl_base = of_iomap(node, 0); -+ of_node_put(node); - - /* set the secondary core boot from DDR */ - remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL); -diff --git a/arch/arm/mach-imx/cpu-imx25.c b/arch/arm/mach-imx/cpu-imx25.c -index b2e1963f473de..2ee2d2813d577 100644 ---- a/arch/arm/mach-imx/cpu-imx25.c -+++ b/arch/arm/mach-imx/cpu-imx25.c -@@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void) - - np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim"); - iim_base = of_iomap(np, 0); -+ of_node_put(np); - BUG_ON(!iim_base); - rev = readl(iim_base + MXC_IIMSREV); - iounmap(iim_base); -diff --git a/arch/arm/mach-imx/cpu-imx27.c b/arch/arm/mach-imx/cpu-imx27.c -index bf70e13bbe9ee..1d28939083683 100644 ---- a/arch/arm/mach-imx/cpu-imx27.c -+++ b/arch/arm/mach-imx/cpu-imx27.c -@@ -28,6 +28,7 @@ static int mx27_read_cpu_rev(void) - - np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm"); - ccm_base = of_iomap(np, 0); -+ of_node_put(np); - BUG_ON(!ccm_base); - /* - * now we have access to the IO registers. As we need -diff --git a/arch/arm/mach-imx/cpu-imx31.c b/arch/arm/mach-imx/cpu-imx31.c -index b9c24b851d1ab..35c544924e509 100644 ---- a/arch/arm/mach-imx/cpu-imx31.c -+++ b/arch/arm/mach-imx/cpu-imx31.c -@@ -39,6 +39,7 @@ static int mx31_read_cpu_rev(void) - - np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim"); - iim_base = of_iomap(np, 0); -+ of_node_put(np); - BUG_ON(!iim_base); - - /* read SREV register from IIM module */ -diff --git a/arch/arm/mach-imx/cpu-imx35.c b/arch/arm/mach-imx/cpu-imx35.c -index 80e7d8ab9f1bb..1fe75b39c2d99 100644 ---- a/arch/arm/mach-imx/cpu-imx35.c -+++ b/arch/arm/mach-imx/cpu-imx35.c -@@ -21,6 +21,7 @@ static int mx35_read_cpu_rev(void) - - np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim"); - iim_base = of_iomap(np, 0); -+ of_node_put(np); - BUG_ON(!iim_base); - - rev = imx_readl(iim_base + MXC_IIMSREV); -diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c -index ad56263778f93..a67c89bf155dd 100644 ---- a/arch/arm/mach-imx/cpu-imx5.c -+++ b/arch/arm/mach-imx/cpu-imx5.c -@@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat) - - np = of_find_compatible_node(NULL, NULL, compat); - iim_base = of_iomap(np, 0); -+ of_node_put(np); - WARN_ON(!iim_base); - - srev = readl(iim_base + IIM_SREV) & 0xff; -diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c -index af12668d0bf51..b9efe9da06e0b 100644 ---- a/arch/arm/mach-imx/mmdc.c -+++ b/arch/arm/mach-imx/mmdc.c -@@ -99,6 +99,7 @@ struct mmdc_pmu { - cpumask_t cpu; - struct hrtimer hrtimer; - unsigned int active_events; -+ int id; - struct device *dev; - struct perf_event *mmdc_events[MMDC_NUM_COUNTERS]; - struct hlist_node node; -@@ -433,8 +434,6 @@ static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer) - static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc, - void __iomem *mmdc_base, struct device *dev) - { -- int mmdc_num; -- - *pmu_mmdc = (struct mmdc_pmu) { - .pmu = (struct pmu) { - .task_ctx_nr = perf_invalid_context, -@@ -452,15 +451,16 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc, - .active_events = 0, - }; - -- mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL); -+ pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL); - -- return mmdc_num; -+ return pmu_mmdc->id; - } - - static int imx_mmdc_remove(struct platform_device *pdev) - { - struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev); - -+ ida_simple_remove(&mmdc_ida, pmu_mmdc->id); - cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); - perf_pmu_unregister(&pmu_mmdc->pmu); - iounmap(pmu_mmdc->mmdc_base); -@@ -474,7 +474,6 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b - { - struct mmdc_pmu *pmu_mmdc; - char *name; -- int mmdc_num; - int ret; - const struct of_device_id *of_id = - of_match_device(imx_mmdc_dt_ids, &pdev->dev); -@@ -497,14 +496,14 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b - cpuhp_mmdc_state = ret; - } - -- mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); -- pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; -- if (mmdc_num == 0) -- name = "mmdc"; -- else -- name = devm_kasprintf(&pdev->dev, -- GFP_KERNEL, "mmdc%d", mmdc_num); -+ ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); -+ if (ret < 0) -+ goto pmu_free; - -+ name = devm_kasprintf(&pdev->dev, -+ GFP_KERNEL, "mmdc%d", ret); -+ -+ pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk; - pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data; - - hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC, -@@ -525,6 +524,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b - - pmu_register_err: - pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret); -+ ida_simple_remove(&mmdc_ida, pmu_mmdc->id); - cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node); - hrtimer_cancel(&pmu_mmdc->hrtimer); - pmu_free: -diff --git a/arch/arm/mach-iop32x/include/mach/entry-macro.S b/arch/arm/mach-iop32x/include/mach/entry-macro.S -index 8e6766d4621eb..341e5d9a6616d 100644 ---- a/arch/arm/mach-iop32x/include/mach/entry-macro.S -+++ b/arch/arm/mach-iop32x/include/mach/entry-macro.S -@@ -20,7 +20,7 @@ - mrc p6, 0, \irqstat, c8, c0, 0 @ Read IINTSRC - cmp \irqstat, #0 - clzne \irqnr, \irqstat -- rsbne \irqnr, \irqnr, #31 -+ rsbne \irqnr, \irqnr, #32 - .endm - - .macro arch_ret_to_user, tmp1, tmp2 -diff --git a/arch/arm/mach-iop32x/include/mach/irqs.h b/arch/arm/mach-iop32x/include/mach/irqs.h -index c4e78df428e86..e09ae5f48aec5 100644 ---- a/arch/arm/mach-iop32x/include/mach/irqs.h -+++ b/arch/arm/mach-iop32x/include/mach/irqs.h -@@ -9,6 +9,6 @@ - #ifndef __IRQS_H - #define __IRQS_H - --#define NR_IRQS 32 -+#define NR_IRQS 33 - - #endif -diff --git a/arch/arm/mach-iop32x/irq.c b/arch/arm/mach-iop32x/irq.c -index 2d48bf1398c10..d1e8824cbd824 100644 ---- a/arch/arm/mach-iop32x/irq.c -+++ b/arch/arm/mach-iop32x/irq.c -@@ -32,14 +32,14 @@ static void intstr_write(u32 val) - static void - iop32x_irq_mask(struct irq_data *d) - { -- iop32x_mask &= ~(1 << d->irq); -+ iop32x_mask &= ~(1 << (d->irq - 1)); - intctl_write(iop32x_mask); - } - - static void - iop32x_irq_unmask(struct irq_data *d) - { -- iop32x_mask |= 1 << d->irq; -+ iop32x_mask |= 1 << (d->irq - 1); - intctl_write(iop32x_mask); - } - -@@ -65,7 +65,7 @@ void __init iop32x_init_irq(void) - machine_is_em7210()) - *IOP3XX_PCIIRSR = 0x0f; - -- for (i = 0; i < NR_IRQS; i++) { -+ for (i = 1; i < NR_IRQS; i++) { - irq_set_chip_and_handler(i, &ext_chip, handle_level_irq); - irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); - } -diff --git a/arch/arm/mach-iop32x/irqs.h b/arch/arm/mach-iop32x/irqs.h -index 69858e4e905d1..e1dfc8b4e7d7e 100644 ---- a/arch/arm/mach-iop32x/irqs.h -+++ b/arch/arm/mach-iop32x/irqs.h -@@ -7,36 +7,40 @@ - #ifndef __IOP32X_IRQS_H - #define __IOP32X_IRQS_H - -+/* Interrupts in Linux start at 1, hardware starts at 0 */ -+ -+#define IOP_IRQ(x) ((x) + 1) -+ - /* - * IOP80321 chipset interrupts - */ --#define IRQ_IOP32X_DMA0_EOT 0 --#define IRQ_IOP32X_DMA0_EOC 1 --#define IRQ_IOP32X_DMA1_EOT 2 --#define IRQ_IOP32X_DMA1_EOC 3 --#define IRQ_IOP32X_AA_EOT 6 --#define IRQ_IOP32X_AA_EOC 7 --#define IRQ_IOP32X_CORE_PMON 8 --#define IRQ_IOP32X_TIMER0 9 --#define IRQ_IOP32X_TIMER1 10 --#define IRQ_IOP32X_I2C_0 11 --#define IRQ_IOP32X_I2C_1 12 --#define IRQ_IOP32X_MESSAGING 13 --#define IRQ_IOP32X_ATU_BIST 14 --#define IRQ_IOP32X_PERFMON 15 --#define IRQ_IOP32X_CORE_PMU 16 --#define IRQ_IOP32X_BIU_ERR 17 --#define IRQ_IOP32X_ATU_ERR 18 --#define IRQ_IOP32X_MCU_ERR 19 --#define IRQ_IOP32X_DMA0_ERR 20 --#define IRQ_IOP32X_DMA1_ERR 21 --#define IRQ_IOP32X_AA_ERR 23 --#define IRQ_IOP32X_MSG_ERR 24 --#define IRQ_IOP32X_SSP 25 --#define IRQ_IOP32X_XINT0 27 --#define IRQ_IOP32X_XINT1 28 --#define IRQ_IOP32X_XINT2 29 --#define IRQ_IOP32X_XINT3 30 --#define IRQ_IOP32X_HPI 31 -+#define IRQ_IOP32X_DMA0_EOT IOP_IRQ(0) -+#define IRQ_IOP32X_DMA0_EOC IOP_IRQ(1) -+#define IRQ_IOP32X_DMA1_EOT IOP_IRQ(2) -+#define IRQ_IOP32X_DMA1_EOC IOP_IRQ(3) -+#define IRQ_IOP32X_AA_EOT IOP_IRQ(6) -+#define IRQ_IOP32X_AA_EOC IOP_IRQ(7) -+#define IRQ_IOP32X_CORE_PMON IOP_IRQ(8) -+#define IRQ_IOP32X_TIMER0 IOP_IRQ(9) -+#define IRQ_IOP32X_TIMER1 IOP_IRQ(10) -+#define IRQ_IOP32X_I2C_0 IOP_IRQ(11) -+#define IRQ_IOP32X_I2C_1 IOP_IRQ(12) -+#define IRQ_IOP32X_MESSAGING IOP_IRQ(13) -+#define IRQ_IOP32X_ATU_BIST IOP_IRQ(14) -+#define IRQ_IOP32X_PERFMON IOP_IRQ(15) -+#define IRQ_IOP32X_CORE_PMU IOP_IRQ(16) -+#define IRQ_IOP32X_BIU_ERR IOP_IRQ(17) -+#define IRQ_IOP32X_ATU_ERR IOP_IRQ(18) -+#define IRQ_IOP32X_MCU_ERR IOP_IRQ(19) -+#define IRQ_IOP32X_DMA0_ERR IOP_IRQ(20) -+#define IRQ_IOP32X_DMA1_ERR IOP_IRQ(21) -+#define IRQ_IOP32X_AA_ERR IOP_IRQ(23) -+#define IRQ_IOP32X_MSG_ERR IOP_IRQ(24) -+#define IRQ_IOP32X_SSP IOP_IRQ(25) -+#define IRQ_IOP32X_XINT0 IOP_IRQ(27) -+#define IRQ_IOP32X_XINT1 IOP_IRQ(28) -+#define IRQ_IOP32X_XINT2 IOP_IRQ(29) -+#define IRQ_IOP32X_XINT3 IOP_IRQ(30) -+#define IRQ_IOP32X_HPI IOP_IRQ(31) - - #endif -diff --git a/arch/arm/mach-mediatek/Kconfig b/arch/arm/mach-mediatek/Kconfig -index 9e0f592d87d8e..35a3430c7942d 100644 ---- a/arch/arm/mach-mediatek/Kconfig -+++ b/arch/arm/mach-mediatek/Kconfig -@@ -30,6 +30,7 @@ config MACH_MT7623 - config MACH_MT7629 - bool "MediaTek MT7629 SoCs support" - default ARCH_MEDIATEK -+ select HAVE_ARM_ARCH_TIMER - - config MACH_MT8127 - bool "MediaTek MT8127 SoCs support" -diff --git a/arch/arm/mach-meson/platsmp.c b/arch/arm/mach-meson/platsmp.c -index 4b8ad728bb42a..32ac60b89fdcc 100644 ---- a/arch/arm/mach-meson/platsmp.c -+++ b/arch/arm/mach-meson/platsmp.c -@@ -71,6 +71,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible, - } - - sram_base = of_iomap(node, 0); -+ of_node_put(node); - if (!sram_base) { - pr_err("Couldn't map SRAM registers\n"); - return; -@@ -91,6 +92,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible, - } - - scu_base = of_iomap(node, 0); -+ of_node_put(node); - if (!scu_base) { - pr_err("Couldn't map SCU registers\n"); - return; -diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c -index 6794e2db1ad5f..ecc46c31004f6 100644 ---- a/arch/arm/mach-mmp/sram.c -+++ b/arch/arm/mach-mmp/sram.c -@@ -72,6 +72,8 @@ static int sram_probe(struct platform_device *pdev) - if (!info) - return -ENOMEM; - -+ platform_set_drvdata(pdev, info); -+ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); -@@ -107,8 +109,6 @@ static int sram_probe(struct platform_device *pdev) - list_add(&info->node, &sram_bank_list); - mutex_unlock(&sram_lock); - -- platform_set_drvdata(pdev, info); -- - dev_info(&pdev->dev, "initialized\n"); - return 0; - -@@ -127,17 +127,19 @@ static int sram_remove(struct platform_device *pdev) - struct sram_bank_info *info; - - info = platform_get_drvdata(pdev); -- if (info == NULL) -- return -ENODEV; - -- mutex_lock(&sram_lock); -- list_del(&info->node); -- mutex_unlock(&sram_lock); -+ if (info->sram_size) { -+ mutex_lock(&sram_lock); -+ list_del(&info->node); -+ mutex_unlock(&sram_lock); -+ -+ gen_pool_destroy(info->gpool); -+ iounmap(info->sram_virt); -+ kfree(info->pool_name); -+ } - -- gen_pool_destroy(info->gpool); -- iounmap(info->sram_virt); -- kfree(info->pool_name); - kfree(info); -+ - return 0; - } - -diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c -index 41b2e8abc9e69..708816caf859c 100644 ---- a/arch/arm/mach-mmp/time.c -+++ b/arch/arm/mach-mmp/time.c -@@ -43,18 +43,21 @@ - static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE; - - /* -- * FIXME: the timer needs some delay to stablize the counter capture -+ * Read the timer through the CVWR register. Delay is required after requesting -+ * a read. The CR register cannot be directly read due to metastability issues -+ * documented in the PXA168 software manual. - */ - static inline uint32_t timer_read(void) - { -- int delay = 100; -+ uint32_t val; -+ int delay = 3; - - __raw_writel(1, mmp_timer_base + TMR_CVWR(1)); - - while (delay--) -- cpu_relax(); -+ val = __raw_readl(mmp_timer_base + TMR_CVWR(1)); - -- return __raw_readl(mmp_timer_base + TMR_CVWR(1)); -+ return val; - } - - static u64 notrace mmp_read_sched_clock(void) -diff --git a/arch/arm/mach-mstar/Kconfig b/arch/arm/mach-mstar/Kconfig -index cd300eeedc206..0bf4d312bcfd9 100644 ---- a/arch/arm/mach-mstar/Kconfig -+++ b/arch/arm/mach-mstar/Kconfig -@@ -3,6 +3,7 @@ menuconfig ARCH_MSTARV7 - depends on ARCH_MULTI_V7 - select ARM_GIC - select ARM_HEAVY_MB -+ select HAVE_ARM_ARCH_TIMER - select MST_IRQ - select MSTAR_MSC313_MPLL - help -diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c -index 25c9d184fa4c6..1c57ac4016493 100644 ---- a/arch/arm/mach-mxs/mach-mxs.c -+++ b/arch/arm/mach-mxs/mach-mxs.c -@@ -393,8 +393,10 @@ static void __init mxs_machine_init(void) - - root = of_find_node_by_path("/"); - ret = of_property_read_string(root, "model", &soc_dev_attr->machine); -- if (ret) -+ if (ret) { -+ kfree(soc_dev_attr); - return; -+ } - - soc_dev_attr->family = "Freescale MXS Family"; - soc_dev_attr->soc_id = mxs_get_soc_id(); -diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c -index 9d4a0ab50a468..d63d5eb8d8fdf 100644 ---- a/arch/arm/mach-omap1/clock.c -+++ b/arch/arm/mach-omap1/clock.c -@@ -41,7 +41,7 @@ static DEFINE_SPINLOCK(clockfw_lock); - unsigned long omap1_uart_recalc(struct clk *clk) - { - unsigned int val = __raw_readl(clk->enable_reg); -- return val & clk->enable_bit ? 48000000 : 12000000; -+ return val & 1 << clk->enable_bit ? 48000000 : 12000000; - } - - unsigned long omap1_sossi_recalc(struct clk *clk) -diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c -index 0411d5508d637..7046d7fa7a0aa 100644 ---- a/arch/arm/mach-omap1/timer.c -+++ b/arch/arm/mach-omap1/timer.c -@@ -165,7 +165,7 @@ err_free_pdata: - kfree(pdata); - - err_free_pdev: -- platform_device_unregister(pdev); -+ platform_device_put(pdev); - - return ret; - } -diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c -index 1610c567a6a3a..10d2f078e4a8e 100644 ---- a/arch/arm/mach-omap2/board-generic.c -+++ b/arch/arm/mach-omap2/board-generic.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - #include - #include -diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c -index 6daaa645ae5d9..8d829f3dafe76 100644 ---- a/arch/arm/mach-omap2/display.c -+++ b/arch/arm/mach-omap2/display.c -@@ -211,6 +211,7 @@ static int __init omapdss_init_fbdev(void) - node = of_find_node_by_name(NULL, "omap4_padconf_global"); - if (node) - omap4_dsi_mux_syscon = syscon_node_to_regmap(node); -+ of_node_put(node); - - return 0; - } -@@ -259,13 +260,15 @@ static int __init omapdss_init_of(void) - - if (!pdev) { - pr_err("Unable to find DSS platform device\n"); -+ of_node_put(node); - return -ENODEV; - } - - r = of_platform_populate(node, NULL, NULL, &pdev->dev); -+ put_device(&pdev->dev); -+ of_node_put(node); - if (r) { - pr_err("Unable to populate DSS submodule devices\n"); -- put_device(&pdev->dev); - return r; - } - -diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c -index 5c3845730dbf5..0b80f8bcd3047 100644 ---- a/arch/arm/mach-omap2/omap4-common.c -+++ b/arch/arm/mach-omap2/omap4-common.c -@@ -314,10 +314,12 @@ void __init omap_gic_of_init(void) - - np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic"); - gic_dist_base_addr = of_iomap(np, 0); -+ of_node_put(np); - WARN_ON(!gic_dist_base_addr); - - np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-twd-timer"); - twd_base = of_iomap(np, 0); -+ of_node_put(np); - WARN_ON(!twd_base); - - skip_errata_init: -diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c -index 0c2936c7a3799..a5e9cffcac10c 100644 ---- a/arch/arm/mach-omap2/omap_hwmod.c -+++ b/arch/arm/mach-omap2/omap_hwmod.c -@@ -752,8 +752,10 @@ static int __init _init_clkctrl_providers(void) - - for_each_matching_node(np, ti_clkctrl_match_table) { - ret = _setup_clkctrl_provider(np); -- if (ret) -+ if (ret) { -+ of_node_put(np); - break; -+ } - } - - return ret; -diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c -index 765809b214e71..bf50acd6b8a3d 100644 ---- a/arch/arm/mach-omap2/pdata-quirks.c -+++ b/arch/arm/mach-omap2/pdata-quirks.c -@@ -587,6 +587,8 @@ pdata_quirks_init_clocks(const struct of_device_id *omap_dt_match_table) - - of_platform_populate(np, omap_dt_match_table, - omap_auxdata_lookup, NULL); -+ -+ of_node_put(np); - } - } - -diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c -index 0a5b87e2a4b07..37d23ae2e9dbe 100644 ---- a/arch/arm/mach-omap2/powerdomain.c -+++ b/arch/arm/mach-omap2/powerdomain.c -@@ -174,7 +174,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag) - break; - case PWRDM_STATE_PREV: - prev = pwrdm_read_prev_pwrst(pwrdm); -- if (pwrdm->state != prev) -+ if (prev >= 0 && pwrdm->state != prev) - pwrdm->state_counter[prev]++; - if (prev == PWRDM_POWER_RET) - _update_logic_membank_counters(pwrdm); -diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c -index 1b442b1285693..63e73e9b82bc6 100644 ---- a/arch/arm/mach-omap2/prm3xxx.c -+++ b/arch/arm/mach-omap2/prm3xxx.c -@@ -708,6 +708,7 @@ static int omap3xxx_prm_late_init(void) - } - - irq_num = of_irq_get(np, 0); -+ of_node_put(np); - if (irq_num == -EPROBE_DEFER) - return irq_num; - -diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c -index 620ba69c8f114..5677c4a08f376 100644 ---- a/arch/arm/mach-omap2/timer.c -+++ b/arch/arm/mach-omap2/timer.c -@@ -76,6 +76,7 @@ static void __init realtime_counter_init(void) - } - - rate = clk_get_rate(sys_clk); -+ clk_put(sys_clk); - - if (soc_is_dra7xx()) { - /* -diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c -index 3d36f1d951964..3f651df3a71cf 100644 ---- a/arch/arm/mach-orion5x/board-dt.c -+++ b/arch/arm/mach-orion5x/board-dt.c -@@ -63,6 +63,9 @@ static void __init orion5x_dt_init(void) - if (of_machine_is_compatible("maxtor,shared-storage-2")) - mss2_init(); - -+ if (of_machine_is_compatible("lacie,d2-network")) -+ d2net_init(); -+ - of_platform_default_populate(NULL, orion5x_auxdata_lookup, NULL); - } - -diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h -index eb96009e21c4c..b9cfdb4564568 100644 ---- a/arch/arm/mach-orion5x/common.h -+++ b/arch/arm/mach-orion5x/common.h -@@ -75,6 +75,12 @@ extern void mss2_init(void); - static inline void mss2_init(void) {} - #endif - -+#ifdef CONFIG_MACH_D2NET_DT -+void d2net_init(void); -+#else -+static inline void d2net_init(void) {} -+#endif -+ - /***************************************************************************** - * Helpers to access Orion registers - ****************************************************************************/ -diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c -index 2e35354b61f56..167e871f059ef 100644 ---- a/arch/arm/mach-pxa/cm-x300.c -+++ b/arch/arm/mach-pxa/cm-x300.c -@@ -354,13 +354,13 @@ static struct platform_device cm_x300_spi_gpio = { - static struct gpiod_lookup_table cm_x300_spi_gpiod_table = { - .dev_id = "spi_gpio", - .table = { -- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_SCL, -+ GPIO_LOOKUP("pca9555.1", GPIO_LCD_SCL - GPIO_LCD_BASE, - "sck", GPIO_ACTIVE_HIGH), -- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DIN, -+ GPIO_LOOKUP("pca9555.1", GPIO_LCD_DIN - GPIO_LCD_BASE, - "mosi", GPIO_ACTIVE_HIGH), -- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DOUT, -+ GPIO_LOOKUP("pca9555.1", GPIO_LCD_DOUT - GPIO_LCD_BASE, - "miso", GPIO_ACTIVE_HIGH), -- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_CS, -+ GPIO_LOOKUP("pca9555.1", GPIO_LCD_CS - GPIO_LCD_BASE, - "cs", GPIO_ACTIVE_HIGH), - { }, - }, -diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c -index cd9fa465b9b2a..9aee8e0f2bb1d 100644 ---- a/arch/arm/mach-pxa/magician.c -+++ b/arch/arm/mach-pxa/magician.c -@@ -681,7 +681,7 @@ static struct platform_device bq24022 = { - static struct gpiod_lookup_table bq24022_gpiod_table = { - .dev_id = "gpio-regulator", - .table = { -- GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2, -+ GPIO_LOOKUP("htc-egpio-0", EGPIO_MAGICIAN_BQ24022_ISET2 - MAGICIAN_EGPIO_BASE, - NULL, GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, - "enable", GPIO_ACTIVE_LOW), -diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c -index 83cfbb882a2d4..7f6bd7f069e49 100644 ---- a/arch/arm/mach-pxa/sharpsl_pm.c -+++ b/arch/arm/mach-pxa/sharpsl_pm.c -@@ -220,8 +220,6 @@ void sharpsl_battery_kick(void) - { - schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125)); - } --EXPORT_SYMBOL(sharpsl_battery_kick); -- - - static void sharpsl_battery_thread(struct work_struct *private_) - { -diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c -index 371008e9bb029..264de0bc97d68 100644 ---- a/arch/arm/mach-pxa/spitz.c -+++ b/arch/arm/mach-pxa/spitz.c -@@ -9,7 +9,6 @@ - */ - - #include --#include /* symbol_get ; symbol_put */ - #include - #include - #include -@@ -514,17 +513,6 @@ static struct pxa2xx_spi_chip spitz_ads7846_chip = { - .gpio_cs = SPITZ_GPIO_ADS7846_CS, - }; - --static void spitz_bl_kick_battery(void) --{ -- void (*kick_batt)(void); -- -- kick_batt = symbol_get(sharpsl_battery_kick); -- if (kick_batt) { -- kick_batt(); -- symbol_put(sharpsl_battery_kick); -- } --} -- - static struct gpiod_lookup_table spitz_lcdcon_gpio_table = { - .dev_id = "spi2.1", - .table = { -@@ -552,7 +540,7 @@ static struct corgi_lcd_platform_data spitz_lcdcon_info = { - .max_intensity = 0x2f, - .default_intensity = 0x1f, - .limit_mask = 0x0b, -- .kick_battery = spitz_bl_kick_battery, -+ .kick_battery = sharpsl_battery_kick, - }; - - static struct pxa2xx_spi_chip spitz_lcdcon_chip = { -diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c -index 431709725d02b..ded5e343e1984 100644 ---- a/arch/arm/mach-pxa/tosa.c -+++ b/arch/arm/mach-pxa/tosa.c -@@ -296,9 +296,9 @@ static struct gpiod_lookup_table tosa_mci_gpio_table = { - .table = { - GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_nSD_DETECT, - "cd", GPIO_ACTIVE_LOW), -- GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_SD_WP, -+ GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_SD_WP - TOSA_SCOOP_GPIO_BASE, - "wp", GPIO_ACTIVE_LOW), -- GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_PWR_ON, -+ GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_PWR_ON - TOSA_SCOOP_GPIO_BASE, - "power", GPIO_ACTIVE_HIGH), - { }, - }, -diff --git a/arch/arm/mach-s3c/irq-s3c24xx.c b/arch/arm/mach-s3c/irq-s3c24xx.c -index 3edc5f614eefc..c1c2f041ad3b1 100644 ---- a/arch/arm/mach-s3c/irq-s3c24xx.c -+++ b/arch/arm/mach-s3c/irq-s3c24xx.c -@@ -361,11 +361,25 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc, - static asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs) - { - do { -- if (likely(s3c_intc[0])) -- if (s3c24xx_handle_intc(s3c_intc[0], regs, 0)) -- continue; -+ /* -+ * For platform based machines, neither ERR nor NULL can happen here. -+ * The s3c24xx_handle_irq() will be set as IRQ handler iff this succeeds: -+ * -+ * s3c_intc[0] = s3c24xx_init_intc() -+ * -+ * If this fails, the next calls to s3c24xx_init_intc() won't be executed. -+ * -+ * For DT machine, s3c_init_intc_of() could set the IRQ handler without -+ * setting s3c_intc[0] only if it was called with num_ctrl=0. There is no -+ * such code path, so again the s3c_intc[0] will have a valid pointer if -+ * set_handle_irq() is called. -+ * -+ * Therefore in s3c24xx_handle_irq(), the s3c_intc[0] is always something. -+ */ -+ if (s3c24xx_handle_intc(s3c_intc[0], regs, 0)) -+ continue; - -- if (s3c_intc[2]) -+ if (!IS_ERR_OR_NULL(s3c_intc[2])) - if (s3c24xx_handle_intc(s3c_intc[2], regs, 64)) - continue; - -diff --git a/arch/arm/mach-s3c/mach-jive.c b/arch/arm/mach-s3c/mach-jive.c -index 0785638a9069b..7d15b84ae217e 100644 ---- a/arch/arm/mach-s3c/mach-jive.c -+++ b/arch/arm/mach-s3c/mach-jive.c -@@ -236,11 +236,11 @@ static int __init jive_mtdset(char *options) - unsigned long set; - - if (options == NULL || options[0] == '\0') -- return 0; -+ return 1; - - if (kstrtoul(options, 10, &set)) { - printk(KERN_ERR "failed to parse mtdset=%s\n", options); -- return 0; -+ return 1; - } - - switch (set) { -@@ -255,7 +255,7 @@ static int __init jive_mtdset(char *options) - "using default.", set); - } - -- return 0; -+ return 1; - } - - /* parse the mtdset= option given to the kernel command line */ -diff --git a/arch/arm/mach-s3c/s3c64xx.c b/arch/arm/mach-s3c/s3c64xx.c -index 4dfb648142f2a..17f0065031490 100644 ---- a/arch/arm/mach-s3c/s3c64xx.c -+++ b/arch/arm/mach-s3c/s3c64xx.c -@@ -173,7 +173,8 @@ static struct samsung_pwm_variant s3c64xx_pwm_variant = { - .tclk_mask = (1 << 7) | (1 << 6) | (1 << 5), - }; - --void __init s3c64xx_set_timer_source(unsigned int event, unsigned int source) -+void __init s3c64xx_set_timer_source(enum s3c64xx_timer_mode event, -+ enum s3c64xx_timer_mode source) - { - s3c64xx_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1; - s3c64xx_pwm_variant.output_mask &= ~(BIT(event) | BIT(source)); -diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c -index 1dbe98948ce30..9627c4cf3e41d 100644 ---- a/arch/arm/mach-sa1100/jornada720_ssp.c -+++ b/arch/arm/mach-sa1100/jornada720_ssp.c -@@ -1,5 +1,5 @@ - // SPDX-License-Identifier: GPL-2.0-only --/** -+/* - * arch/arm/mac-sa1100/jornada720_ssp.c - * - * Copyright (C) 2006/2007 Kristoffer Ericson -@@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags; - - /** - * jornada_ssp_reverse - reverses input byte -+ * @byte: input byte to reverse - * - * we need to reverse all data we receive from the mcu due to its physical location - * returns : 01110111 -> 11101110 -@@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse); - - /** - * jornada_ssp_byte - waits for ready ssp bus and sends byte -+ * @byte: input byte to transmit - * - * waits for fifo buffer to clear and then transmits, if it doesn't then we will - * timeout after rounds. Needs mcu running before its called. -@@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte); - - /** - * jornada_ssp_inout - decide if input is command or trading byte -+ * @byte: input byte to send (may be %TXDUMMY) - * - * returns : (jornada_ssp_byte(byte)) on success - * : %-ETIMEDOUT on timeout failure -diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c -index ee949255ced3f..ba44cec5e59ac 100644 ---- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c -+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c -@@ -125,6 +125,7 @@ remove: - - list_for_each_entry_safe(pos, tmp, &quirk_list, list) { - list_del(&pos->list); -+ of_node_put(pos->np); - kfree(pos); - } - -@@ -154,8 +155,10 @@ static int __init rcar_gen2_regulator_quirk(void) - return -ENODEV; - - for_each_matching_node_and_match(np, rcar_gen2_quirk_match, &id) { -- if (!of_device_is_available(np)) -+ if (!of_device_is_available(np)) { -+ of_node_put(np); - break; -+ } - - ret = of_property_read_u32(np, "reg", &addr); - if (ret) /* Skip invalid entry and continue */ -@@ -164,6 +167,7 @@ static int __init rcar_gen2_regulator_quirk(void) - quirk = kzalloc(sizeof(*quirk), GFP_KERNEL); - if (!quirk) { - ret = -ENOMEM; -+ of_node_put(np); - goto err_mem; - } - -@@ -171,11 +175,12 @@ static int __init rcar_gen2_regulator_quirk(void) - memcpy(&quirk->i2c_msg, id->data, sizeof(quirk->i2c_msg)); - - quirk->id = id; -- quirk->np = np; -+ quirk->np = of_node_get(np); - quirk->i2c_msg.addr = addr; - - ret = of_irq_parse_one(np, 0, argsa); - if (ret) { /* Skip invalid entry and continue */ -+ of_node_put(np); - kfree(quirk); - continue; - } -@@ -222,6 +227,7 @@ err_free: - err_mem: - list_for_each_entry_safe(pos, tmp, &quirk_list, list) { - list_del(&pos->list); -+ of_node_put(pos->np); - kfree(pos); - } - -diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig -index 43ddec677c0b3..594edf9bbea44 100644 ---- a/arch/arm/mach-socfpga/Kconfig -+++ b/arch/arm/mach-socfpga/Kconfig -@@ -2,6 +2,7 @@ - menuconfig ARCH_INTEL_SOCFPGA - bool "Altera SOCFPGA family" - depends on ARCH_MULTI_V7 -+ select ARCH_HAS_RESET_CONTROLLER - select ARCH_SUPPORTS_BIG_ENDIAN - select ARM_AMBA - select ARM_GIC -@@ -18,6 +19,7 @@ menuconfig ARCH_INTEL_SOCFPGA - select PL310_ERRATA_727915 - select PL310_ERRATA_753970 if PL310 - select PL310_ERRATA_769419 -+ select RESET_CONTROLLER - - if ARCH_INTEL_SOCFPGA - config SOCFPGA_SUSPEND -diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h -index fc2608b18a0d0..18f01190dcfd4 100644 ---- a/arch/arm/mach-socfpga/core.h -+++ b/arch/arm/mach-socfpga/core.h -@@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr; - u32 socfpga_sdram_self_refresh(u32 sdr_base); - extern unsigned int socfpga_sdram_self_refresh_sz; - --extern char secondary_trampoline, secondary_trampoline_end; -+extern char secondary_trampoline[], secondary_trampoline_end[]; - - extern unsigned long socfpga_cpu1start_addr; - -diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c -index fbb80b883e5dd..201191cf68f32 100644 ---- a/arch/arm/mach-socfpga/platsmp.c -+++ b/arch/arm/mach-socfpga/platsmp.c -@@ -20,14 +20,14 @@ - - static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) - { -- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; -+ int trampoline_size = secondary_trampoline_end - secondary_trampoline; - - if (socfpga_cpu1start_addr) { - /* This will put CPU #1 into reset. */ - writel(RSTMGR_MPUMODRST_CPU1, - rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST); - -- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); -+ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size); - - writel(__pa_symbol(secondary_startup), - sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff)); -@@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) - - static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle) - { -- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; -+ int trampoline_size = secondary_trampoline_end - secondary_trampoline; - - if (socfpga_cpu1start_addr) { - writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + - SOCFPGA_A10_RSTMGR_MODMPURST); -- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); -+ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size); - - writel(__pa_symbol(secondary_startup), - sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff)); -diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c -index a0554d7d04f7c..e1adc098f89ac 100644 ---- a/arch/arm/mach-vexpress/dcscb.c -+++ b/arch/arm/mach-vexpress/dcscb.c -@@ -144,6 +144,7 @@ static int __init dcscb_init(void) - if (!node) - return -ENODEV; - dcscb_base = of_iomap(node, 0); -+ of_node_put(node); - if (!dcscb_base) - return -EADDRNOTAVAIL; - cfg = readl_relaxed(dcscb_base + DCS_CFG_R); -diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c -index 1da11bdb1dfbd..1c6500c4e6a17 100644 ---- a/arch/arm/mach-vexpress/spc.c -+++ b/arch/arm/mach-vexpress/spc.c -@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void) - } - - cluster = topology_physical_package_id(cpu_dev->id); -- if (init_opp_table[cluster]) -+ if (cluster < 0 || init_opp_table[cluster]) - continue; - - if (ve_init_opp_table(cpu_dev)) -diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c -index e1ca6a5732d27..15e8a321a713b 100644 ---- a/arch/arm/mach-zynq/common.c -+++ b/arch/arm/mach-zynq/common.c -@@ -77,6 +77,7 @@ static int __init zynq_get_revision(void) - } - - zynq_devcfg_base = of_iomap(np, 0); -+ of_node_put(np); - if (!zynq_devcfg_base) { - pr_err("%s: Unable to map I/O memory\n", __func__); - return -1; -diff --git a/arch/arm/mach-zynq/slcr.c b/arch/arm/mach-zynq/slcr.c -index 37707614885a5..9765b3f4c2fc5 100644 ---- a/arch/arm/mach-zynq/slcr.c -+++ b/arch/arm/mach-zynq/slcr.c -@@ -213,6 +213,7 @@ int __init zynq_early_slcr_init(void) - zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr"); - if (IS_ERR(zynq_slcr_regmap)) { - pr_err("%s: failed to find zynq-slcr\n", __func__); -+ of_node_put(np); - return -ENODEV; - } - -diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig -index 8355c38958942..f43cdc1cfbaed 100644 ---- a/arch/arm/mm/Kconfig -+++ b/arch/arm/mm/Kconfig -@@ -750,7 +750,7 @@ config CPU_BIG_ENDIAN - config CPU_ENDIAN_BE8 - bool - depends on CPU_BIG_ENDIAN -- default CPU_V6 || CPU_V6K || CPU_V7 -+ default CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M - help - Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. - -@@ -830,6 +830,7 @@ config CPU_BPREDICT_DISABLE - - config CPU_SPECTRE - bool -+ select GENERIC_CPU_VULNERABILITIES - - config HARDEN_BRANCH_PREDICTOR - bool "Harden the branch predictor against aliasing attacks" if EXPERT -@@ -850,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR - - If unsure, say Y. - -+config HARDEN_BRANCH_HISTORY -+ bool "Harden Spectre style attacks against branch history" if EXPERT -+ depends on CPU_SPECTRE -+ default y -+ help -+ Speculation attacks against some high-performance processors can -+ make use of branch history to influence future speculation. When -+ taking an exception, a sequence of branches overwrites the branch -+ history, or branch history is invalidated. -+ - config TLS_REG_EMUL - bool - select NEED_KUSER_HELPERS -diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c -index ea81e89e77400..bcefe3f51744c 100644 ---- a/arch/arm/mm/alignment.c -+++ b/arch/arm/mm/alignment.c -@@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) - if (type == TYPE_LDST) - do_alignment_finish_ldst(addr, instr, regs, offset); - -+ if (thumb_mode(regs)) -+ regs->ARM_cpsr = it_advance(regs->ARM_cpsr); -+ - return 0; - - bad_or_fault: -diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c -index fb688003d156e..712da6a81b23f 100644 ---- a/arch/arm/mm/dump.c -+++ b/arch/arm/mm/dump.c -@@ -346,7 +346,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) - addr = start + i * PMD_SIZE; - domain = get_domain_name(pmd); - if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) -- note_page(st, addr, 3, pmd_val(*pmd), domain); -+ note_page(st, addr, 4, pmd_val(*pmd), domain); - else - walk_pte(st, pmd, addr, domain); - -diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c -index efa4020250315..af5177801fb10 100644 ---- a/arch/arm/mm/fault.c -+++ b/arch/arm/mm/fault.c -@@ -125,7 +125,7 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - show_pte(KERN_ALERT, mm, addr); - die("Oops", regs, fsr); - bust_spinlocks(0); -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - } - - /* -diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c -index 80fb5a4a5c050..2660bdfcad4d0 100644 ---- a/arch/arm/mm/ioremap.c -+++ b/arch/arm/mm/ioremap.c -@@ -479,3 +479,11 @@ void __init early_ioremap_init(void) - { - early_ioremap_setup(); - } -+ -+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, -+ unsigned long flags) -+{ -+ unsigned long pfn = PHYS_PFN(offset); -+ -+ return memblock_is_map_memory(pfn); -+} -diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c -index 9c348042a7244..948ada4a2938c 100644 ---- a/arch/arm/mm/kasan_init.c -+++ b/arch/arm/mm/kasan_init.c -@@ -226,7 +226,7 @@ void __init kasan_init(void) - BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) != - pgd_index(KASAN_SHADOW_END)); - memcpy(tmp_pmd_table, -- pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), -+ (void*)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), - sizeof(tmp_pmd_table)); - set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)], - __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); -@@ -264,12 +264,17 @@ void __init kasan_init(void) - - /* - * 1. The module global variables are in MODULES_VADDR ~ MODULES_END, -- * so we need to map this area. -+ * so we need to map this area if CONFIG_KASAN_VMALLOC=n. With -+ * VMALLOC support KASAN will manage this region dynamically, -+ * refer to kasan_populate_vmalloc() and ARM's implementation of -+ * module_alloc(). - * 2. PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR - * ~ MODULES_END's shadow is in the same PMD_SIZE, so we can't - * use kasan_populate_zero_shadow. - */ -- create_mapping((void *)MODULES_VADDR, (void *)(PKMAP_BASE + PMD_SIZE)); -+ if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && IS_ENABLED(CONFIG_MODULES)) -+ create_mapping((void *)MODULES_VADDR, (void *)(MODULES_END)); -+ create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE)); - - /* - * KAsan may reuse the contents of kasan_early_shadow_pte directly, so -diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c -index a4e0060051070..83a91e0ab8480 100644 ---- a/arch/arm/mm/mmu.c -+++ b/arch/arm/mm/mmu.c -@@ -212,12 +212,14 @@ early_param("ecc", early_ecc); - static int __init early_cachepolicy(char *p) - { - pr_warn("cachepolicy kernel parameter not supported without cp15\n"); -+ return 0; - } - early_param("cachepolicy", early_cachepolicy); - - static int __init noalign_setup(char *__unused) - { - pr_warn("noalign kernel parameter not supported without cp15\n"); -+ return 1; - } - __setup("noalign", noalign_setup); - -@@ -294,6 +296,17 @@ static struct mem_type mem_types[] __ro_after_init = { - .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, - .domain = DOMAIN_KERNEL, - }, -+ [MT_MEMORY_RO] = { -+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | -+ L_PTE_XN | L_PTE_RDONLY, -+ .prot_l1 = PMD_TYPE_TABLE, -+#ifdef CONFIG_ARM_LPAE -+ .prot_sect = PMD_TYPE_SECT | L_PMD_SECT_RDONLY | PMD_SECT_AP2, -+#else -+ .prot_sect = PMD_TYPE_SECT, -+#endif -+ .domain = DOMAIN_KERNEL, -+ }, - [MT_ROM] = { - .prot_sect = PMD_TYPE_SECT, - .domain = DOMAIN_KERNEL, -@@ -390,9 +403,9 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) - BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START); - BUG_ON(idx >= __end_of_fixed_addresses); - -- /* we only support device mappings until pgprot_kernel has been set */ -+ /* We support only device mappings before pgprot_kernel is set. */ - if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) && -- pgprot_val(pgprot_kernel) == 0)) -+ pgprot_val(prot) && pgprot_val(pgprot_kernel) == 0)) - return; - - if (pgprot_val(prot)) -@@ -487,6 +500,7 @@ static void __init build_mem_type_table(void) - - /* Also setup NX memory mapping */ - mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; -+ mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN; - } - if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { - /* -@@ -566,6 +580,7 @@ static void __init build_mem_type_table(void) - mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; - mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -+ mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; - #endif - - /* -@@ -585,6 +600,8 @@ static void __init build_mem_type_table(void) - mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; - mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; - mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; -+ mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S; -+ mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED; - mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; - mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; -@@ -645,6 +662,8 @@ static void __init build_mem_type_table(void) - mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; - mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; - mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; -+ mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd; -+ mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot; - mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; - mem_types[MT_ROM].prot_sect |= cp->pmd; -@@ -1358,7 +1377,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) - map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); - map.virtual = FDT_FIXED_BASE; - map.length = FDT_FIXED_SIZE; -- map.type = MT_ROM; -+ map.type = MT_MEMORY_RO; - create_mapping(&map); - } - -diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c -index 2658f52903da6..80613674deb5b 100644 ---- a/arch/arm/mm/nommu.c -+++ b/arch/arm/mm/nommu.c -@@ -26,6 +26,13 @@ - - unsigned long vectors_base; - -+/* -+ * empty_zero_page is a special page that is used for -+ * zero-initialized data and COW. -+ */ -+struct page *empty_zero_page; -+EXPORT_SYMBOL(empty_zero_page); -+ - #ifdef CONFIG_ARM_MPU - struct mpu_rgn_info mpu_rgn_info; - #endif -@@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void) - */ - void __init paging_init(const struct machine_desc *mdesc) - { -+ void *zero_page; -+ - early_trap_init((void *)vectors_base); - mpu_setup(); -+ -+ /* allocate the zero page. */ -+ zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE); -+ if (!zero_page) -+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n", -+ __func__, PAGE_SIZE, PAGE_SIZE); -+ - bootmem_init(); -+ -+ empty_zero_page = virt_to_page(zero_page); -+ flush_dcache_page(empty_zero_page); - } - - /* -diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c -index 114c05ab4dd91..8bc7a2d6d6c7f 100644 ---- a/arch/arm/mm/proc-v7-bugs.c -+++ b/arch/arm/mm/proc-v7-bugs.c -@@ -6,8 +6,35 @@ - #include - #include - #include -+#include - #include - -+#ifdef CONFIG_ARM_PSCI -+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) -+{ -+ struct arm_smccc_res res; -+ -+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, -+ ARM_SMCCC_ARCH_WORKAROUND_1, &res); -+ -+ switch ((int)res.a0) { -+ case SMCCC_RET_SUCCESS: -+ return SPECTRE_MITIGATED; -+ -+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: -+ return SPECTRE_UNAFFECTED; -+ -+ default: -+ return SPECTRE_VULNERABLE; -+ } -+} -+#else -+static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) -+{ -+ return SPECTRE_VULNERABLE; -+} -+#endif -+ - #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR - DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); - -@@ -36,13 +63,60 @@ static void __maybe_unused call_hvc_arch_workaround_1(void) - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); - } - --static void cpu_v7_spectre_init(void) -+static unsigned int spectre_v2_install_workaround(unsigned int method) - { - const char *spectre_v2_method = NULL; - int cpu = smp_processor_id(); - - if (per_cpu(harden_branch_predictor_fn, cpu)) -- return; -+ return SPECTRE_MITIGATED; -+ -+ switch (method) { -+ case SPECTRE_V2_METHOD_BPIALL: -+ per_cpu(harden_branch_predictor_fn, cpu) = -+ harden_branch_predictor_bpiall; -+ spectre_v2_method = "BPIALL"; -+ break; -+ -+ case SPECTRE_V2_METHOD_ICIALLU: -+ per_cpu(harden_branch_predictor_fn, cpu) = -+ harden_branch_predictor_iciallu; -+ spectre_v2_method = "ICIALLU"; -+ break; -+ -+ case SPECTRE_V2_METHOD_HVC: -+ per_cpu(harden_branch_predictor_fn, cpu) = -+ call_hvc_arch_workaround_1; -+ cpu_do_switch_mm = cpu_v7_hvc_switch_mm; -+ spectre_v2_method = "hypervisor"; -+ break; -+ -+ case SPECTRE_V2_METHOD_SMC: -+ per_cpu(harden_branch_predictor_fn, cpu) = -+ call_smc_arch_workaround_1; -+ cpu_do_switch_mm = cpu_v7_smc_switch_mm; -+ spectre_v2_method = "firmware"; -+ break; -+ } -+ -+ if (spectre_v2_method) -+ pr_info("CPU%u: Spectre v2: using %s workaround\n", -+ smp_processor_id(), spectre_v2_method); -+ -+ return SPECTRE_MITIGATED; -+} -+#else -+static unsigned int spectre_v2_install_workaround(unsigned int method) -+{ -+ pr_info_once("Spectre V2: workarounds disabled by configuration\n"); -+ -+ return SPECTRE_VULNERABLE; -+} -+#endif -+ -+static void cpu_v7_spectre_v2_init(void) -+{ -+ unsigned int state, method = 0; - - switch (read_cpuid_part()) { - case ARM_CPU_PART_CORTEX_A8: -@@ -51,69 +125,133 @@ static void cpu_v7_spectre_init(void) - case ARM_CPU_PART_CORTEX_A17: - case ARM_CPU_PART_CORTEX_A73: - case ARM_CPU_PART_CORTEX_A75: -- per_cpu(harden_branch_predictor_fn, cpu) = -- harden_branch_predictor_bpiall; -- spectre_v2_method = "BPIALL"; -+ state = SPECTRE_MITIGATED; -+ method = SPECTRE_V2_METHOD_BPIALL; - break; - - case ARM_CPU_PART_CORTEX_A15: - case ARM_CPU_PART_BRAHMA_B15: -- per_cpu(harden_branch_predictor_fn, cpu) = -- harden_branch_predictor_iciallu; -- spectre_v2_method = "ICIALLU"; -+ state = SPECTRE_MITIGATED; -+ method = SPECTRE_V2_METHOD_ICIALLU; - break; - --#ifdef CONFIG_ARM_PSCI - case ARM_CPU_PART_BRAHMA_B53: - /* Requires no workaround */ -+ state = SPECTRE_UNAFFECTED; - break; -+ - default: - /* Other ARM CPUs require no workaround */ -- if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) -+ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) { -+ state = SPECTRE_UNAFFECTED; - break; -+ } -+ - fallthrough; -- /* Cortex A57/A72 require firmware workaround */ -- case ARM_CPU_PART_CORTEX_A57: -- case ARM_CPU_PART_CORTEX_A72: { -- struct arm_smccc_res res; - -- arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, -- ARM_SMCCC_ARCH_WORKAROUND_1, &res); -- if ((int)res.a0 != 0) -- return; -+ /* Cortex A57/A72 require firmware workaround */ -+ case ARM_CPU_PART_CORTEX_A57: -+ case ARM_CPU_PART_CORTEX_A72: -+ state = spectre_v2_get_cpu_fw_mitigation_state(); -+ if (state != SPECTRE_MITIGATED) -+ break; - - switch (arm_smccc_1_1_get_conduit()) { - case SMCCC_CONDUIT_HVC: -- per_cpu(harden_branch_predictor_fn, cpu) = -- call_hvc_arch_workaround_1; -- cpu_do_switch_mm = cpu_v7_hvc_switch_mm; -- spectre_v2_method = "hypervisor"; -+ method = SPECTRE_V2_METHOD_HVC; - break; - - case SMCCC_CONDUIT_SMC: -- per_cpu(harden_branch_predictor_fn, cpu) = -- call_smc_arch_workaround_1; -- cpu_do_switch_mm = cpu_v7_smc_switch_mm; -- spectre_v2_method = "firmware"; -+ method = SPECTRE_V2_METHOD_SMC; - break; - - default: -+ state = SPECTRE_VULNERABLE; - break; - } - } --#endif -+ -+ if (state == SPECTRE_MITIGATED) -+ state = spectre_v2_install_workaround(method); -+ -+ spectre_v2_update_state(state, method); -+} -+ -+#ifdef CONFIG_HARDEN_BRANCH_HISTORY -+static int spectre_bhb_method; -+ -+static const char *spectre_bhb_method_name(int method) -+{ -+ switch (method) { -+ case SPECTRE_V2_METHOD_LOOP8: -+ return "loop"; -+ -+ case SPECTRE_V2_METHOD_BPIALL: -+ return "BPIALL"; -+ -+ default: -+ return "unknown"; - } -+} - -- if (spectre_v2_method) -- pr_info("CPU%u: Spectre v2: using %s workaround\n", -- smp_processor_id(), spectre_v2_method); -+static int spectre_bhb_install_workaround(int method) -+{ -+ if (spectre_bhb_method != method) { -+ if (spectre_bhb_method) { -+ pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n", -+ smp_processor_id()); -+ -+ return SPECTRE_VULNERABLE; -+ } -+ -+ if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE) -+ return SPECTRE_VULNERABLE; -+ -+ spectre_bhb_method = method; -+ -+ pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n", -+ smp_processor_id(), spectre_bhb_method_name(method)); -+ } -+ -+ return SPECTRE_MITIGATED; - } - #else --static void cpu_v7_spectre_init(void) -+static int spectre_bhb_install_workaround(int method) - { -+ return SPECTRE_VULNERABLE; - } - #endif - -+static void cpu_v7_spectre_bhb_init(void) -+{ -+ unsigned int state, method = 0; -+ -+ switch (read_cpuid_part()) { -+ case ARM_CPU_PART_CORTEX_A15: -+ case ARM_CPU_PART_BRAHMA_B15: -+ case ARM_CPU_PART_CORTEX_A57: -+ case ARM_CPU_PART_CORTEX_A72: -+ state = SPECTRE_MITIGATED; -+ method = SPECTRE_V2_METHOD_LOOP8; -+ break; -+ -+ case ARM_CPU_PART_CORTEX_A73: -+ case ARM_CPU_PART_CORTEX_A75: -+ state = SPECTRE_MITIGATED; -+ method = SPECTRE_V2_METHOD_BPIALL; -+ break; -+ -+ default: -+ state = SPECTRE_UNAFFECTED; -+ break; -+ } -+ -+ if (state == SPECTRE_MITIGATED) -+ state = spectre_bhb_install_workaround(method); -+ -+ spectre_v2_update_state(state, method); -+} -+ - static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, - u32 mask, const char *msg) - { -@@ -142,16 +280,18 @@ static bool check_spectre_auxcr(bool *warned, u32 bit) - void cpu_v7_ca8_ibe(void) - { - if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6))) -- cpu_v7_spectre_init(); -+ cpu_v7_spectre_v2_init(); - } - - void cpu_v7_ca15_ibe(void) - { - if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) -- cpu_v7_spectre_init(); -+ cpu_v7_spectre_v2_init(); -+ cpu_v7_spectre_bhb_init(); - } - - void cpu_v7_bugs_init(void) - { -- cpu_v7_spectre_init(); -+ cpu_v7_spectre_v2_init(); -+ cpu_v7_spectre_bhb_init(); - } -diff --git a/arch/arm/nwfpe/Makefile b/arch/arm/nwfpe/Makefile -index 303400fa2cdf7..2aec85ab1e8b9 100644 ---- a/arch/arm/nwfpe/Makefile -+++ b/arch/arm/nwfpe/Makefile -@@ -11,3 +11,9 @@ nwfpe-y += fpa11.o fpa11_cpdo.o fpa11_cpdt.o \ - entry.o - - nwfpe-$(CONFIG_FPE_NWFPE_XP) += extended_cpdo.o -+ -+# Try really hard to avoid generating calls to __aeabi_uldivmod() from -+# float64_rem() due to loop elision. -+ifdef CONFIG_CC_IS_CLANG -+CFLAGS_softfloat.o += -mllvm -replexitval=never -+endif -diff --git a/arch/arm/probes/decode.h b/arch/arm/probes/decode.h -index 9731735989921..facc889d05eee 100644 ---- a/arch/arm/probes/decode.h -+++ b/arch/arm/probes/decode.h -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - - void __init arm_probes_decode_init(void); -@@ -35,31 +36,6 @@ void __init find_str_pc_offset(void); - #endif - - --/* -- * Update ITSTATE after normal execution of an IT block instruction. -- * -- * The 8 IT state bits are split into two parts in CPSR: -- * ITSTATE<1:0> are in CPSR<26:25> -- * ITSTATE<7:2> are in CPSR<15:10> -- */ --static inline unsigned long it_advance(unsigned long cpsr) -- { -- if ((cpsr & 0x06000400) == 0) { -- /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ -- cpsr &= ~PSR_IT_MASK; -- } else { -- /* We need to shift left ITSTATE<4:0> */ -- const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */ -- unsigned long it = cpsr & mask; -- it <<= 1; -- it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */ -- it &= mask; -- cpsr &= ~mask; -- cpsr |= it; -- } -- return cpsr; --} -- - static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs) - { - long cpsr = regs->ARM_cpsr; -diff --git a/arch/arm/probes/kprobes/Makefile b/arch/arm/probes/kprobes/Makefile -index 14db56f49f0a3..6159010dac4a6 100644 ---- a/arch/arm/probes/kprobes/Makefile -+++ b/arch/arm/probes/kprobes/Makefile -@@ -1,4 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0 -+KASAN_SANITIZE_actions-common.o := n -+KASAN_SANITIZE_actions-arm.o := n -+KASAN_SANITIZE_actions-thumb.o := n - obj-$(CONFIG_KPROBES) += core.o actions-common.o checkers-common.o - obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o - test-kprobes-objs := test-core.o -diff --git a/arch/arm/probes/kprobes/checkers-common.c b/arch/arm/probes/kprobes/checkers-common.c -index 4d720990cf2a3..eba7ac4725c02 100644 ---- a/arch/arm/probes/kprobes/checkers-common.c -+++ b/arch/arm/probes/kprobes/checkers-common.c -@@ -40,7 +40,7 @@ enum probes_insn checker_stack_use_imm_0xx(probes_opcode_t insn, - * Different from other insn uses imm8, the real addressing offset of - * STRD in T32 encoding should be imm8 * 4. See ARMARM description. - */ --enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn, -+static enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn, - struct arch_probes_insn *asi, - const struct decode_header *h) - { -diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c -index 9d8634e2f12f7..3bd017f6e256a 100644 ---- a/arch/arm/probes/kprobes/core.c -+++ b/arch/arm/probes/kprobes/core.c -@@ -11,6 +11,8 @@ - * Copyright (C) 2007 Marvell Ltd. - */ - -+#define pr_fmt(fmt) "kprobes: " fmt -+ - #include - #include - #include -@@ -231,7 +233,7 @@ singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) - * kprobe, and that level is reserved for user kprobe handlers, so we can't - * risk encountering a new kprobe in an interrupt handler. - */ --void __kprobes kprobe_handler(struct pt_regs *regs) -+static void __kprobes kprobe_handler(struct pt_regs *regs) - { - struct kprobe *p, *cur; - struct kprobe_ctlblk *kcb; -@@ -278,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs) - break; - case KPROBE_REENTER: - /* A nested probe was hit in FIQ, it is a BUG */ -- pr_warn("Unrecoverable kprobe detected.\n"); -+ pr_warn("Failed to recover from reentered kprobes.\n"); - dump_kprobe(p); - fallthrough; - default: -diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c -index c78180172120f..e20304f1d8bc9 100644 ---- a/arch/arm/probes/kprobes/opt-arm.c -+++ b/arch/arm/probes/kprobes/opt-arm.c -@@ -145,8 +145,6 @@ __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) - } - } - --extern void kprobe_handler(struct pt_regs *regs); -- - static void - optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) - { -diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c -index c562832b86272..171c7076b89f4 100644 ---- a/arch/arm/probes/kprobes/test-core.c -+++ b/arch/arm/probes/kprobes/test-core.c -@@ -720,7 +720,7 @@ static const char coverage_register_lookup[16] = { - [REG_TYPE_NOSPPCX] = COVERAGE_ANY_REG | COVERAGE_SP, - }; - --unsigned coverage_start_registers(const struct decode_header *h) -+static unsigned coverage_start_registers(const struct decode_header *h) - { - unsigned regs = 0; - int i; -diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h -index f1d5583e7bbbb..7054d9fae2ea0 100644 ---- a/arch/arm/probes/kprobes/test-core.h -+++ b/arch/arm/probes/kprobes/test-core.h -@@ -454,3 +454,7 @@ void kprobe_thumb32_test_cases(void); - #else - void kprobe_arm_test_cases(void); - #endif -+ -+void __kprobes_test_case_start(void); -+void __kprobes_test_case_end_16(void); -+void __kprobes_test_case_end_32(void); -diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c -index 84a1cea1f43b9..309648c17f486 100644 ---- a/arch/arm/xen/p2m.c -+++ b/arch/arm/xen/p2m.c -@@ -63,11 +63,12 @@ out: - - unsigned long __pfn_to_mfn(unsigned long pfn) - { -- struct rb_node *n = phys_to_mach.rb_node; -+ struct rb_node *n; - struct xen_p2m_entry *entry; - unsigned long irqflags; - - read_lock_irqsave(&p2m_lock, irqflags); -+ n = phys_to_mach.rb_node; - while (n) { - entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); - if (entry->pfn <= pfn && -@@ -152,10 +153,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn, - int rc; - unsigned long irqflags; - struct xen_p2m_entry *p2m_entry; -- struct rb_node *n = phys_to_mach.rb_node; -+ struct rb_node *n; - - if (mfn == INVALID_P2M_ENTRY) { - write_lock_irqsave(&p2m_lock, irqflags); -+ n = phys_to_mach.rb_node; - while (n) { - p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); - if (p2m_entry->pfn <= pfn && -diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index fee914c716aa2..5ab4b0520eabb 100644 ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -154,7 +154,6 @@ config ARM64 - select HAVE_ARCH_KGDB - select HAVE_ARCH_MMAP_RND_BITS - select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT -- select HAVE_ARCH_PFN_VALID - select HAVE_ARCH_PREL32_RELOCATIONS - select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET - select HAVE_ARCH_SECCOMP_FILTER -@@ -221,6 +220,7 @@ config ARM64 - select THREAD_INFO_IN_TASK - select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD - select TRACE_IRQFLAGS_SUPPORT -+ select TRACE_IRQFLAGS_NMI_SUPPORT - help - ARM 64-bit (AArch64) Linux support. - -@@ -487,6 +487,22 @@ config ARM64_ERRATUM_834220 - - If unsure, say Y. - -+config ARM64_ERRATUM_1742098 -+ bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence" -+ depends on COMPAT -+ default y -+ help -+ This option removes the AES hwcap for aarch32 user-space to -+ workaround erratum 1742098 on Cortex-A57 and Cortex-A72. -+ -+ Affected parts may corrupt the AES state if an interrupt is -+ taken between a pair of AES instructions. These instructions -+ are only present if the cryptography extensions are present. -+ All software should have a fallback implementation for CPUs -+ that don't implement the cryptography extensions. -+ -+ If unsure, say Y. -+ - config ARM64_ERRATUM_845719 - bool "Cortex-A53: 845719: a load might read incorrect data" - depends on COMPAT -@@ -596,6 +612,23 @@ config ARM64_ERRATUM_1530923 - config ARM64_WORKAROUND_REPEAT_TLBI - bool - -+config ARM64_ERRATUM_2441007 -+ bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI" -+ default y -+ select ARM64_WORKAROUND_REPEAT_TLBI -+ help -+ This option adds a workaround for ARM Cortex-A55 erratum #2441007. -+ -+ Under very rare circumstances, affected Cortex-A55 CPUs -+ may not handle a race between a break-before-make sequence on one -+ CPU, and another CPU accessing the same page. This could allow a -+ store to a page that has been unmapped. -+ -+ Work around this by adding the affected CPUs to the list that needs -+ TLB sequences to be done twice. -+ -+ If unsure, say Y. -+ - config ARM64_ERRATUM_1286807 - bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation" - default y -@@ -666,6 +699,155 @@ config ARM64_ERRATUM_1508412 - - If unsure, say Y. - -+config ARM64_ERRATUM_2441009 -+ bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI" -+ default y -+ select ARM64_WORKAROUND_REPEAT_TLBI -+ help -+ This option adds a workaround for ARM Cortex-A510 erratum #2441009. -+ -+ Under very rare circumstances, affected Cortex-A510 CPUs -+ may not handle a race between a break-before-make sequence on one -+ CPU, and another CPU accessing the same page. This could allow a -+ store to a page that has been unmapped. -+ -+ Work around this by adding the affected CPUs to the list that needs -+ TLB sequences to be done twice. -+ -+ If unsure, say Y. -+ -+config ARM64_ERRATUM_2457168 -+ bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly" -+ depends on ARM64_AMU_EXTN -+ default y -+ help -+ This option adds the workaround for ARM Cortex-A510 erratum 2457168. -+ -+ The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate -+ as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments -+ incorrectly giving a significantly higher output value. -+ -+ Work around this problem by returning 0 when reading the affected counter in -+ key locations that results in disabling all users of this counter. This effect -+ is the same to firmware disabling affected counters. -+ -+ If unsure, say Y. -+ -+config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE -+ bool -+ -+config ARM64_ERRATUM_2119858 -+ bool "Cortex-A710: 2119858: workaround TRBE overwriting trace data in FILL mode" -+ default y -+ depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in -+ depends on CORESIGHT_TRBE -+ select ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE -+ help -+ This option adds the workaround for ARM Cortex-A710 erratum 2119858. -+ -+ Affected Cortex-A710 cores could overwrite up to 3 cache lines of trace -+ data at the base of the buffer (pointed to by TRBASER_EL1) in FILL mode in -+ the event of a WRAP event. -+ -+ Work around the issue by always making sure we move the TRBPTR_EL1 by -+ 256 bytes before enabling the buffer and filling the first 256 bytes of -+ the buffer with ETM ignore packets upon disabling. -+ -+ If unsure, say Y. -+ -+config ARM64_ERRATUM_2139208 -+ bool "Neoverse-N2: 2139208: workaround TRBE overwriting trace data in FILL mode" -+ default y -+ depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in -+ depends on CORESIGHT_TRBE -+ select ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE -+ help -+ This option adds the workaround for ARM Neoverse-N2 erratum 2139208. -+ -+ Affected Neoverse-N2 cores could overwrite up to 3 cache lines of trace -+ data at the base of the buffer (pointed to by TRBASER_EL1) in FILL mode in -+ the event of a WRAP event. -+ -+ Work around the issue by always making sure we move the TRBPTR_EL1 by -+ 256 bytes before enabling the buffer and filling the first 256 bytes of -+ the buffer with ETM ignore packets upon disabling. -+ -+ If unsure, say Y. -+ -+config ARM64_WORKAROUND_TSB_FLUSH_FAILURE -+ bool -+ -+config ARM64_ERRATUM_2054223 -+ bool "Cortex-A710: 2054223: workaround TSB instruction failing to flush trace" -+ default y -+ select ARM64_WORKAROUND_TSB_FLUSH_FAILURE -+ help -+ Enable workaround for ARM Cortex-A710 erratum 2054223 -+ -+ Affected cores may fail to flush the trace data on a TSB instruction, when -+ the PE is in trace prohibited state. This will cause losing a few bytes -+ of the trace cached. -+ -+ Workaround is to issue two TSB consecutively on affected cores. -+ -+ If unsure, say Y. -+ -+config ARM64_ERRATUM_2067961 -+ bool "Neoverse-N2: 2067961: workaround TSB instruction failing to flush trace" -+ default y -+ select ARM64_WORKAROUND_TSB_FLUSH_FAILURE -+ help -+ Enable workaround for ARM Neoverse-N2 erratum 2067961 -+ -+ Affected cores may fail to flush the trace data on a TSB instruction, when -+ the PE is in trace prohibited state. This will cause losing a few bytes -+ of the trace cached. -+ -+ Workaround is to issue two TSB consecutively on affected cores. -+ -+ If unsure, say Y. -+ -+config ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE -+ bool -+ -+config ARM64_ERRATUM_2253138 -+ bool "Neoverse-N2: 2253138: workaround TRBE writing to address out-of-range" -+ depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in -+ depends on CORESIGHT_TRBE -+ default y -+ select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE -+ help -+ This option adds the workaround for ARM Neoverse-N2 erratum 2253138. -+ -+ Affected Neoverse-N2 cores might write to an out-of-range address, not reserved -+ for TRBE. Under some conditions, the TRBE might generate a write to the next -+ virtually addressed page following the last page of the TRBE address space -+ (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. -+ -+ Work around this in the driver by always making sure that there is a -+ page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE. -+ -+ If unsure, say Y. -+ -+config ARM64_ERRATUM_2224489 -+ bool "Cortex-A710: 2224489: workaround TRBE writing to address out-of-range" -+ depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in -+ depends on CORESIGHT_TRBE -+ default y -+ select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE -+ help -+ This option adds the workaround for ARM Cortex-A710 erratum 2224489. -+ -+ Affected Cortex-A710 cores might write to an out-of-range address, not reserved -+ for TRBE. Under some conditions, the TRBE might generate a write to the next -+ virtually addressed page following the last page of the TRBE address space -+ (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. -+ -+ Work around this in the driver by always making sure that there is a -+ page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE. -+ -+ If unsure, say Y. -+ - config CAVIUM_ERRATUM_22375 - bool "Cavium erratum 22375, 24313" - default y -@@ -1053,9 +1235,6 @@ config HW_PERF_EVENTS - def_bool y - depends on ARM_PMU - --config ARCH_HAS_FILTER_PGPROT -- def_bool y -- - # Supported by clang >= 7.0 - config CC_HAVE_SHADOW_CALL_STACK - def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) -@@ -1184,6 +1363,15 @@ config UNMAP_KERNEL_AT_EL0 - - If unsure, say Y. - -+config MITIGATE_SPECTRE_BRANCH_HISTORY -+ bool "Mitigate Spectre style attacks against branch history" if EXPERT -+ default y -+ help -+ Speculation attacks against some high-performance processors can -+ make use of branch history to influence future speculation. -+ When taking an exception from user-space, a sequence of branches -+ or a firmware call overwrites the branch history. -+ - config RODATA_FULL_DEFAULT_ENABLED - bool "Apply r/o permissions of VM areas also to their linear aliases" - default y -@@ -1264,7 +1452,8 @@ config KUSER_HELPERS - - config COMPAT_VDSO - bool "Enable vDSO for 32-bit applications" -- depends on !CPU_BIG_ENDIAN && "$(CROSS_COMPILE_COMPAT)" != "" -+ depends on !CPU_BIG_ENDIAN -+ depends on (CC_IS_CLANG && LD_IS_LLD) || "$(CROSS_COMPILE_COMPAT)" != "" - select GENERIC_COMPAT_VDSO - default y - help -@@ -1602,6 +1791,8 @@ config ARM64_BTI_KERNEL - depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 - depends on !CC_IS_GCC || GCC_VERSION >= 100100 -+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671 -+ depends on !CC_IS_GCC - # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 - depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 - depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) -diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms -index b0ce18d4cc98c..d7772a4c34fe7 100644 ---- a/arch/arm64/Kconfig.platforms -+++ b/arch/arm64/Kconfig.platforms -@@ -259,6 +259,7 @@ config ARCH_INTEL_SOCFPGA - - config ARCH_SYNQUACER - bool "Socionext SynQuacer SoC Family" -+ select IRQ_FASTEOI_HIERARCHY_HANDLERS - - config ARCH_TEGRA - bool "NVIDIA Tegra SoC Family" -diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi -index cc321c04f1219..f6d7d7f7fdabe 100644 ---- a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi -+++ b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi -@@ -343,19 +343,19 @@ - }; - - thermal-zones { -- cpu-thermal-zone { -+ cpu-thermal { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-sensors = <&ths 0>; - }; - -- ddr-thermal-zone { -+ ddr-thermal { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-sensors = <&ths 2>; - }; - -- gpu-thermal-zone { -+ gpu-thermal { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-sensors = <&ths 1>; -diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi -index 578c37490d901..e39db51eb4489 100644 ---- a/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi -+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-cpu-opp.dtsi -@@ -4,7 +4,7 @@ - */ - - / { -- cpu0_opp_table: opp_table0 { -+ cpu0_opp_table: opp-table-cpu { - compatible = "operating-points-v2"; - opp-shared; - -diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts -index 097a5511523ad..09eee653d5caa 100644 ---- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts -+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts -@@ -40,7 +40,7 @@ - leds { - compatible = "gpio-leds"; - -- status { -+ led-0 { - label = "orangepi:green:status"; - gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */ - }; -diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5-cpu-opp.dtsi -index b2657201957eb..1afad8b437d72 100644 ---- a/arch/arm64/boot/dts/allwinner/sun50i-h5-cpu-opp.dtsi -+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-cpu-opp.dtsi -@@ -2,7 +2,7 @@ - // Copyright (C) 2020 Chen-Yu Tsai - - / { -- cpu_opp_table: cpu-opp-table { -+ cpu_opp_table: opp-table-cpu { - compatible = "operating-points-v2"; - opp-shared; - -diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts -index d13980ed7a79a..7ec5ac850a0dc 100644 ---- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts -+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts -@@ -69,7 +69,7 @@ - pinctrl-0 = <&emac_rgmii_pins>; - phy-supply = <®_gmac_3v3>; - phy-handle = <&ext_rgmii_phy>; -- phy-mode = "rgmii"; -+ phy-mode = "rgmii-id"; - status = "okay"; - }; - -diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi -index 578a63dedf466..9988e87ea7b3d 100644 ---- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi -+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi -@@ -217,7 +217,7 @@ - }; - }; - -- gpu_thermal { -+ gpu-thermal { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-sensors = <&ths 1>; -diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi -index 8c6e8536b69fa..0baf0f8e4d272 100644 ---- a/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi -+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi -@@ -3,7 +3,7 @@ - // Copyright (C) 2020 Clément Péron - - / { -- cpu_opp_table: cpu-opp-table { -+ cpu_opp_table: opp-table-cpu { - compatible = "allwinner,sun50i-h6-operating-points"; - nvmem-cells = <&cpu_speed_grade>; - opp-shared; -diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi -index d301ac0d406bf..3ec301bd08a91 100644 ---- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi -+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi -@@ -594,7 +594,7 @@ - }; - - qspi: spi@ff8d2000 { -- compatible = "cdns,qspi-nor"; -+ compatible = "intel,socfpga-qspi", "cdns,qspi-nor"; - #address-cells = <1>; - #size-cells = <0>; - reg = <0xff8d2000 0x100>, -diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts -index 46e558ab7729b..f0e8af12442a4 100644 ---- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts -+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts -@@ -129,7 +129,7 @@ - status = "okay"; - clock-frequency = <100000>; - i2c-sda-falling-time-ns = <890>; /* hcnt */ -- i2c-sdl-falling-time-ns = <890>; /* lcnt */ -+ i2c-scl-falling-time-ns = <890>; /* lcnt */ - - adc@14 { - compatible = "lltc,ltc2497"; -diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts -index f9b4a39683cf4..92ac3c86ebd56 100644 ---- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts -+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts -@@ -162,7 +162,7 @@ - status = "okay"; - clock-frequency = <100000>; - i2c-sda-falling-time-ns = <890>; /* hcnt */ -- i2c-sdl-falling-time-ns = <890>; /* lcnt */ -+ i2c-scl-falling-time-ns = <890>; /* lcnt */ - - adc@14 { - compatible = "lltc,ltc2497"; -diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi -index 3f5254eeb47b1..db5a1f4653135 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi -@@ -152,7 +152,7 @@ - scpi_clocks: clocks { - compatible = "arm,scpi-clocks"; - -- scpi_dvfs: clock-controller { -+ scpi_dvfs: clocks-0 { - compatible = "arm,scpi-dvfs-clocks"; - #clock-cells = <1>; - clock-indices = <0>; -@@ -161,7 +161,7 @@ - }; - - scpi_sensors: sensors { -- compatible = "amlogic,meson-gxbb-scpi-sensors"; -+ compatible = "amlogic,meson-gxbb-scpi-sensors", "arm,scpi-sensors"; - #thermal-sensor-cells = <1>; - }; - }; -@@ -1885,7 +1885,7 @@ - sd_emmc_b: sd@5000 { - compatible = "amlogic,meson-axg-mmc"; - reg = <0x0 0x5000 0x0 0x800>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - clocks = <&clkc CLKID_SD_EMMC_B>, - <&clkc CLKID_SD_EMMC_B_CLK0>, -@@ -1897,7 +1897,7 @@ - sd_emmc_c: mmc@7000 { - compatible = "amlogic,meson-axg-mmc"; - reg = <0x0 0x7000 0x0 0x800>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - clocks = <&clkc CLKID_SD_EMMC_C>, - <&clkc CLKID_SD_EMMC_C_CLK0>, -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi -index 00c6f53290d43..369334076467a 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi -@@ -58,7 +58,7 @@ - secure-monitor = <&sm>; - }; - -- gpu_opp_table: gpu-opp-table { -+ gpu_opp_table: opp-table-gpu { - compatible = "operating-points-v2"; - - opp-124999998 { -@@ -107,6 +107,12 @@ - no-map; - }; - -+ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */ -+ secmon_reserved_bl32: secmon@5300000 { -+ reg = <0x0 0x05300000 0x0 0x2000000>; -+ no-map; -+ }; -+ - linux,cma { - compatible = "shared-dma-pool"; - reusable; -@@ -1604,10 +1610,9 @@ - - dmc: bus@38000 { - compatible = "simple-bus"; -- reg = <0x0 0x38000 0x0 0x400>; - #address-cells = <2>; - #size-cells = <2>; -- ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>; -+ ranges = <0x0 0x0 0x0 0x38000 0x0 0x2000>; - - canvas: video-lut@48 { - compatible = "amlogic,canvas"; -@@ -1727,7 +1732,7 @@ - #address-cells = <1>; - #size-cells = <0>; - -- internal_ephy: ethernet_phy@8 { -+ internal_ephy: ethernet-phy@8 { - compatible = "ethernet-phy-id0180.3301", - "ethernet-phy-ieee802.3-c22"; - interrupts = ; -@@ -2324,7 +2329,7 @@ - sd_emmc_a: sd@ffe03000 { - compatible = "amlogic,meson-axg-mmc"; - reg = <0x0 0xffe03000 0x0 0x800>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - clocks = <&clkc CLKID_SD_EMMC_A>, - <&clkc CLKID_SD_EMMC_A_CLK0>, -@@ -2336,7 +2341,7 @@ - sd_emmc_b: sd@ffe05000 { - compatible = "amlogic,meson-axg-mmc"; - reg = <0x0 0xffe05000 0x0 0x800>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - clocks = <&clkc CLKID_SD_EMMC_B>, - <&clkc CLKID_SD_EMMC_B_CLK0>, -@@ -2348,7 +2353,7 @@ - sd_emmc_c: mmc@ffe07000 { - compatible = "amlogic,meson-axg-mmc"; - reg = <0x0 0xffe07000 0x0 0x800>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - clocks = <&clkc CLKID_SD_EMMC_C>, - <&clkc CLKID_SD_EMMC_C_CLK0>, -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts -index 81269ccc24968..4fb31c2ba31c4 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts -@@ -139,7 +139,7 @@ - regulator-min-microvolt = <721000>; - regulator-max-microvolt = <1022000>; - -- vin-supply = <&dc_in>; -+ pwm-supply = <&dc_in>; - - pwms = <&pwm_AO_cd 1 1250 0>; - pwm-dutycycle-range = <100 0>; -@@ -157,14 +157,6 @@ - regulator-always-on; - }; - -- reserved-memory { -- /* TEE Reserved Memory */ -- bl32_reserved: bl32@5000000 { -- reg = <0x0 0x05300000 0x0 0x2000000>; -- no-map; -- }; -- }; -- - sdio_pwrseq: sdio-pwrseq { - compatible = "mmc-pwrseq-simple"; - reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts -index a26bfe72550fe..4b5d11e56364d 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts -@@ -139,7 +139,7 @@ - regulator-min-microvolt = <721000>; - regulator-max-microvolt = <1022000>; - -- vin-supply = <&main_12v>; -+ pwm-supply = <&main_12v>; - - pwms = <&pwm_AO_cd 1 1250 0>; - pwm-dutycycle-range = <100 0>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts -index 579f3d02d613e..b4e86196e3468 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts -@@ -139,7 +139,7 @@ - regulator-min-microvolt = <721000>; - regulator-max-microvolt = <1022000>; - -- vin-supply = <&dc_in>; -+ pwm-supply = <&dc_in>; - - pwms = <&pwm_AO_cd 1 1250 0>; - pwm-dutycycle-range = <100 0>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi -index fb0ab27d1f642..6eaceb717d617 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi -@@ -57,26 +57,6 @@ - compatible = "operating-points-v2"; - opp-shared; - -- opp-100000000 { -- opp-hz = /bits/ 64 <100000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-250000000 { -- opp-hz = /bits/ 64 <250000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-500000000 { -- opp-hz = /bits/ 64 <500000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-667000000 { -- opp-hz = /bits/ 64 <666666666>; -- opp-microvolt = <731000>; -- }; -- - opp-1000000000 { - opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <731000>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi -index d61f43052a344..8e9ad1e51d665 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi -@@ -11,26 +11,6 @@ - compatible = "operating-points-v2"; - opp-shared; - -- opp-100000000 { -- opp-hz = /bits/ 64 <100000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-250000000 { -- opp-hz = /bits/ 64 <250000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-500000000 { -- opp-hz = /bits/ 64 <500000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-667000000 { -- opp-hz = /bits/ 64 <667000000>; -- opp-microvolt = <731000>; -- }; -- - opp-1000000000 { - opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <761000>; -@@ -71,26 +51,6 @@ - compatible = "operating-points-v2"; - opp-shared; - -- opp-100000000 { -- opp-hz = /bits/ 64 <100000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-250000000 { -- opp-hz = /bits/ 64 <250000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-500000000 { -- opp-hz = /bits/ 64 <500000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-667000000 { -- opp-hz = /bits/ 64 <667000000>; -- opp-microvolt = <731000>; -- }; -- - opp-1000000000 { - opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <731000>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi -index f42cf4b8af2d4..16dd409051b40 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi -@@ -18,7 +18,7 @@ - regulator-min-microvolt = <690000>; - regulator-max-microvolt = <1050000>; - -- vin-supply = <&dc_in>; -+ pwm-supply = <&dc_in>; - - pwms = <&pwm_ab 0 1250 0>; - pwm-dutycycle-range = <100 0>; -@@ -37,7 +37,7 @@ - regulator-min-microvolt = <690000>; - regulator-max-microvolt = <1050000>; - -- vin-supply = <&vsys_3v3>; -+ pwm-supply = <&vsys_3v3>; - - pwms = <&pwm_AO_cd 1 1250 0>; - pwm-dutycycle-range = <100 0>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi -index 344573e157a7b..d33e54b5e1969 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi -@@ -17,7 +17,7 @@ - rtc1 = &vrtc; - }; - -- dioo2133: audio-amplifier-0 { -+ dio2133: audio-amplifier-0 { - compatible = "simple-audio-amplifier"; - enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>; - VCC-supply = <&vcc_5v>; -@@ -130,7 +130,7 @@ - regulator-min-microvolt = <721000>; - regulator-max-microvolt = <1022000>; - -- vin-supply = <&main_12v>; -+ pwm-supply = <&main_12v>; - - pwms = <&pwm_ab 0 1250 0>; - pwm-dutycycle-range = <100 0>; -@@ -149,7 +149,7 @@ - regulator-min-microvolt = <721000>; - regulator-max-microvolt = <1022000>; - -- vin-supply = <&main_12v>; -+ pwm-supply = <&main_12v>; - - pwms = <&pwm_AO_cd 1 1250 0>; - pwm-dutycycle-range = <100 0>; -@@ -217,7 +217,7 @@ - audio-widgets = "Line", "Lineout"; - audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>, - <&tdmin_b>, <&tdmin_c>, <&tdmin_lb>, -- <&dioo2133>; -+ <&dio2133>; - audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1", - "TDMOUT_B IN 1", "FRDDR_B OUT 1", - "TDMOUT_B IN 2", "FRDDR_C OUT 1", -@@ -607,7 +607,7 @@ - pinctrl-0 = <&nor_pins>; - pinctrl-names = "default"; - -- mx25u64: spi-flash@0 { -+ mx25u64: flash@0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "mxicy,mx25u6435f", "jedec,spi-nor"; -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi -index 1e5d0ee5d541b..44c23c984034c 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi -@@ -11,26 +11,6 @@ - compatible = "operating-points-v2"; - opp-shared; - -- opp-100000000 { -- opp-hz = /bits/ 64 <100000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-250000000 { -- opp-hz = /bits/ 64 <250000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-500000000 { -- opp-hz = /bits/ 64 <500000000>; -- opp-microvolt = <731000>; -- }; -- -- opp-667000000 { -- opp-hz = /bits/ 64 <667000000>; -- opp-microvolt = <731000>; -- }; -- - opp-1000000000 { - opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <731000>; -@@ -76,26 +56,6 @@ - compatible = "operating-points-v2"; - opp-shared; - -- opp-100000000 { -- opp-hz = /bits/ 64 <100000000>; -- opp-microvolt = <751000>; -- }; -- -- opp-250000000 { -- opp-hz = /bits/ 64 <250000000>; -- opp-microvolt = <751000>; -- }; -- -- opp-500000000 { -- opp-hz = /bits/ 64 <500000000>; -- opp-microvolt = <751000>; -- }; -- -- opp-667000000 { -- opp-hz = /bits/ 64 <667000000>; -- opp-microvolt = <751000>; -- }; -- - opp-1000000000 { - opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <771000>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi -index feb0885047400..b40d2c1002c92 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi -@@ -96,7 +96,7 @@ - regulator-min-microvolt = <721000>; - regulator-max-microvolt = <1022000>; - -- vin-supply = <&main_12v>; -+ pwm-supply = <&main_12v>; - - pwms = <&pwm_ab 0 1250 0>; - pwm-dutycycle-range = <100 0>; -@@ -115,7 +115,7 @@ - regulator-min-microvolt = <721000>; - regulator-max-microvolt = <1022000>; - -- vin-supply = <&main_12v>; -+ pwm-supply = <&main_12v>; - - pwms = <&pwm_AO_cd 1 1250 0>; - pwm-dutycycle-range = <100 0>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi -index 2d7032f41e4b5..772c220c8f496 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi -@@ -17,7 +17,7 @@ - io-channel-names = "buttons"; - keyup-threshold-microvolt = <1800000>; - -- update-button { -+ button-update { - label = "update"; - linux,code = ; - press-threshold-microvolt = <1300000>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi -index 6b457b2c30a4b..32cc9fab4490f 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi -@@ -49,6 +49,12 @@ - no-map; - }; - -+ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */ -+ secmon_reserved_bl32: secmon@5300000 { -+ reg = <0x0 0x05300000 0x0 0x2000000>; -+ no-map; -+ }; -+ - linux,cma { - compatible = "shared-dma-pool"; - reusable; -@@ -226,7 +232,7 @@ - reg = <0x14 0x10>; - }; - -- eth_mac: eth_mac@34 { -+ eth_mac: eth-mac@34 { - reg = <0x34 0x10>; - }; - -@@ -243,7 +249,7 @@ - scpi_clocks: clocks { - compatible = "arm,scpi-clocks"; - -- scpi_dvfs: scpi_clocks@0 { -+ scpi_dvfs: clocks-0 { - compatible = "arm,scpi-dvfs-clocks"; - #clock-cells = <1>; - clock-indices = <0>; -@@ -525,7 +531,7 @@ - #size-cells = <2>; - ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>; - -- hwrng: rng { -+ hwrng: rng@0 { - compatible = "amlogic,meson-rng"; - reg = <0x0 0x0 0x0 0x4>; - }; -@@ -596,21 +602,21 @@ - sd_emmc_a: mmc@70000 { - compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; - reg = <0x0 0x70000 0x0 0x800>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - }; - - sd_emmc_b: mmc@72000 { - compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; - reg = <0x0 0x72000 0x0 0x800>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - }; - - sd_emmc_c: mmc@74000 { - compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc"; - reg = <0x0 0x74000 0x0 0x800>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - }; - }; -diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts -index e8394a8269ee1..802faf7e4e3cb 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts -@@ -16,7 +16,7 @@ - - leds { - compatible = "gpio-leds"; -- status { -+ led { - gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_LOW>; - default-state = "off"; - color = ; -diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi -index a350fee1264d7..a4d34398da358 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi -@@ -6,6 +6,7 @@ - */ - - #include "meson-gxbb.dtsi" -+#include - - / { - aliases { -@@ -64,6 +65,7 @@ - regulator-name = "VDDIO_AO18"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; -+ regulator-always-on; - }; - - vcc_3v3: regulator-vcc_3v3 { -@@ -161,6 +163,7 @@ - status = "okay"; - pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>; - pinctrl-names = "default"; -+ hdmi-supply = <&vddio_ao18>; - }; - - &hdmi_tx_tmds_port { -diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts -index 9ef210f17b4aa..393d3cb33b9ee 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts -@@ -18,7 +18,7 @@ - leds { - compatible = "gpio-leds"; - -- status { -+ led { - label = "n1:white:status"; - gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>; - default-state = "on"; -diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts -index b331a013572f3..c490dbbf063bf 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts -@@ -79,6 +79,5 @@ - enable-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>; - max-speed = <2000000>; - clocks = <&wifi32k>; -- clock-names = "lpo"; - }; - }; -diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi -index c3ac531c4f84a..3500229350522 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi -@@ -759,7 +759,7 @@ - }; - }; - -- eth-phy-mux { -+ eth-phy-mux@55c { - compatible = "mdio-mux-mmioreg", "mdio-mux"; - #address-cells = <1>; - #size-cells = <0>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts -index effaa138b5f98..38ebe98ba9c6b 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts -@@ -17,13 +17,13 @@ - compatible = "bananapi,bpi-m5", "amlogic,sm1"; - model = "Banana Pi BPI-M5"; - -- adc_keys { -+ adc-keys { - compatible = "adc-keys"; - io-channels = <&saradc 2>; - io-channel-names = "buttons"; - keyup-threshold-microvolt = <1800000>; - -- key { -+ button-sw3 { - label = "SW3"; - linux,code = ; - press-threshold-microvolt = <1700000>; -@@ -123,7 +123,7 @@ - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3300000>; - -- enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>; -+ enable-gpio = <&gpio_ao GPIOE_2 GPIO_OPEN_DRAIN>; - enable-active-high; - regulator-always-on; - -@@ -173,7 +173,7 @@ - regulator-min-microvolt = <690000>; - regulator-max-microvolt = <1050000>; - -- vin-supply = <&dc_in>; -+ pwm-supply = <&dc_in>; - - pwms = <&pwm_AO_cd 1 1250 0>; - pwm-dutycycle-range = <100 0>; -@@ -437,6 +437,7 @@ - "", - "eMMC_RST#", /* BOOT_12 */ - "eMMC_DS", /* BOOT_13 */ -+ "", "", - /* GPIOC */ - "SD_D0_B", /* GPIOC_0 */ - "SD_D1_B", /* GPIOC_1 */ -diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts -index f2c0981435944..9c0b544e22098 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts -@@ -24,7 +24,7 @@ - regulator-min-microvolt = <690000>; - regulator-max-microvolt = <1050000>; - -- vin-supply = <&vsys_3v3>; -+ pwm-supply = <&vsys_3v3>; - - pwms = <&pwm_AO_cd 1 1250 0>; - pwm-dutycycle-range = <100 0>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts -index f3f953225bf5b..15fece2e63205 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts -@@ -76,9 +76,17 @@ - }; - - &cpu_thermal { -+ trips { -+ cpu_active: cpu-active { -+ temperature = <60000>; /* millicelsius */ -+ hysteresis = <2000>; /* millicelsius */ -+ type = "active"; -+ }; -+ }; -+ - cooling-maps { - map { -- trip = <&cpu_passive>; -+ trip = <&cpu_active>; - cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; - }; - }; -diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi -index fd0ad85c165ba..76ad052fbf0c9 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi -@@ -48,7 +48,7 @@ - regulator-max-microvolt = <3300000>; - vin-supply = <&vcc_5v>; - -- enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>; -+ enable-gpio = <&gpio_ao GPIOE_2 GPIO_OPEN_DRAIN>; - enable-active-high; - regulator-always-on; - -@@ -116,7 +116,7 @@ - regulator-min-microvolt = <721000>; - regulator-max-microvolt = <1022000>; - -- vin-supply = <&main_12v>; -+ pwm-supply = <&main_12v>; - - pwms = <&pwm_AO_cd 1 1250 0>; - pwm-dutycycle-range = <100 0>; -@@ -263,6 +263,10 @@ - reg = <0>; - max-speed = <1000>; - -+ reset-assert-us = <10000>; -+ reset-deassert-us = <80000>; -+ reset-gpios = <&gpio GPIOZ_15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>; -+ - interrupt-parent = <&gpio_intc>; - /* MAC_INTR on GPIOZ_14 */ - interrupts = <26 IRQ_TYPE_LEVEL_LOW>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts -index 2194a778973f1..a5d79f2f7c196 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts -+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts -@@ -185,7 +185,7 @@ - regulator-min-microvolt = <690000>; - regulator-max-microvolt = <1050000>; - -- vin-supply = <&dc_in>; -+ pwm-supply = <&dc_in>; - - pwms = <&pwm_AO_cd 1 1500 0>; - pwm-dutycycle-range = <100 0>; -@@ -203,14 +203,6 @@ - regulator-always-on; - }; - -- reserved-memory { -- /* TEE Reserved Memory */ -- bl32_reserved: bl32@5000000 { -- reg = <0x0 0x05300000 0x0 0x2000000>; -- no-map; -- }; -- }; -- - sdio_pwrseq: sdio-pwrseq { - compatible = "mmc-pwrseq-simple"; - reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>; -diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi -index 3d8b1f4f2001b..78bdbd2ccc9de 100644 ---- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi -+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi -@@ -95,26 +95,6 @@ - compatible = "operating-points-v2"; - opp-shared; - -- opp-100000000 { -- opp-hz = /bits/ 64 <100000000>; -- opp-microvolt = <730000>; -- }; -- -- opp-250000000 { -- opp-hz = /bits/ 64 <250000000>; -- opp-microvolt = <730000>; -- }; -- -- opp-500000000 { -- opp-hz = /bits/ 64 <500000000>; -- opp-microvolt = <730000>; -- }; -- -- opp-667000000 { -- opp-hz = /bits/ 64 <666666666>; -- opp-microvolt = <750000>; -- }; -- - opp-1000000000 { - opp-hz = /bits/ 64 <1000000000>; - opp-microvolt = <770000>; -diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi -index 6288e104a0893..a00b0f14c222f 100644 ---- a/arch/arm64/boot/dts/arm/juno-base.dtsi -+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi -@@ -26,7 +26,8 @@ - compatible = "arm,mhu", "arm,primecell"; - reg = <0x0 0x2b1f0000 0x0 0x1000>; - interrupts = , -- ; -+ , -+ ; - #mbox-cells = <1>; - clocks = <&soc_refclk100mhz>; - clock-names = "apb_pclk"; -@@ -543,8 +544,7 @@ - <0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>, - <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>; - /* Standard AXI Translation entries as programmed by EDK2 */ -- dma-ranges = <0x02000000 0x0 0x2c1c0000 0x0 0x2c1c0000 0x0 0x00040000>, -- <0x02000000 0x0 0x80000000 0x0 0x80000000 0x0 0x80000000>, -+ dma-ranges = <0x02000000 0x0 0x80000000 0x0 0x80000000 0x0 0x80000000>, - <0x43000000 0x8 0x00000000 0x8 0x00000000 0x2 0x00000000>; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 7>; -@@ -597,12 +597,26 @@ - polling-delay = <1000>; - polling-delay-passive = <100>; - thermal-sensors = <&scpi_sensors0 0>; -+ trips { -+ pmic_crit0: trip0 { -+ temperature = <90000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; - }; - - soc { - polling-delay = <1000>; - polling-delay-passive = <100>; - thermal-sensors = <&scpi_sensors0 3>; -+ trips { -+ soc_crit0: trip0 { -+ temperature = <80000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; - }; - - big_cluster_thermal_zone: big-cluster { -diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile -index 11eae3e3a9447..bce0a12554539 100644 ---- a/arch/arm64/boot/dts/broadcom/Makefile -+++ b/arch/arm64/boot/dts/broadcom/Makefile -@@ -6,6 +6,6 @@ dtb-$(CONFIG_ARCH_BCM2835) += bcm2711-rpi-400.dtb \ - bcm2837-rpi-3-b-plus.dtb \ - bcm2837-rpi-cm3-io3.dtb - --subdir-y += bcm4908 -+subdir-y += bcmbca - subdir-y += northstar2 - subdir-y += stingray -diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/Makefile b/arch/arm64/boot/dts/broadcom/bcm4908/Makefile -deleted file mode 100644 -index cc75854519ac3..0000000000000 ---- a/arch/arm64/boot/dts/broadcom/bcm4908/Makefile -+++ /dev/null -@@ -1,4 +0,0 @@ --# SPDX-License-Identifier: GPL-2.0 --dtb-$(CONFIG_ARCH_BCM4908) += bcm4906-netgear-r8000p.dtb --dtb-$(CONFIG_ARCH_BCM4908) += bcm4906-tplink-archer-c2300-v1.dtb --dtb-$(CONFIG_ARCH_BCM4908) += bcm4908-asus-gt-ac5300.dtb -diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts -deleted file mode 100644 -index 2dd028438c22c..0000000000000 ---- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts -+++ /dev/null -@@ -1,157 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -- --#include --#include --#include -- --#include "bcm4906.dtsi" -- --/ { -- compatible = "netgear,r8000p", "brcm,bcm4906", "brcm,bcm4908"; -- model = "Netgear R8000P"; -- -- memory@0 { -- device_type = "memory"; -- reg = <0x00 0x00 0x00 0x20000000>; -- }; -- -- leds { -- compatible = "gpio-leds"; -- -- led-power-white { -- function = LED_FUNCTION_POWER; -- color = ; -- gpios = <&gpio0 8 GPIO_ACTIVE_LOW>; -- }; -- -- led-power-amber { -- function = LED_FUNCTION_POWER; -- color = ; -- gpios = <&gpio0 9 GPIO_ACTIVE_LOW>; -- }; -- -- led-wps { -- function = LED_FUNCTION_WPS; -- color = ; -- gpios = <&gpio0 10 GPIO_ACTIVE_LOW>; -- }; -- -- led-2ghz { -- function = "2ghz"; -- color = ; -- gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; -- }; -- -- led-5ghz-1 { -- function = "5ghz-1"; -- color = ; -- gpios = <&gpio0 15 GPIO_ACTIVE_LOW>; -- }; -- -- led-5ghz-2 { -- function = "5ghz-2"; -- color = ; -- gpios = <&gpio0 16 GPIO_ACTIVE_LOW>; -- }; -- -- led-usb2 { -- function = "usb2"; -- color = ; -- gpios = <&gpio0 17 GPIO_ACTIVE_LOW>; -- }; -- -- led-usb3 { -- function = "usb3"; -- color = ; -- gpios = <&gpio0 18 GPIO_ACTIVE_LOW>; -- }; -- -- led-wifi { -- function = "wifi"; -- color = ; -- gpios = <&gpio0 56 GPIO_ACTIVE_LOW>; -- }; -- }; --}; -- --&enet { -- nvmem-cells = <&base_mac_addr>; -- nvmem-cell-names = "mac-address"; --}; -- --&usb_phy { -- brcm,ioc = <1>; -- status = "okay"; --}; -- --&ehci { -- status = "okay"; --}; -- --&ohci { -- status = "okay"; --}; -- --&xhci { -- status = "okay"; --}; -- --&ports { -- port@0 { -- label = "lan4"; -- }; -- -- port@1 { -- label = "lan3"; -- }; -- -- port@2 { -- label = "lan2"; -- }; -- -- port@3 { -- label = "lan1"; -- }; -- -- port@7 { -- reg = <7>; -- phy-mode = "internal"; -- phy-handle = <&phy12>; -- label = "wan"; -- }; --}; -- --&nandcs { -- nand-ecc-strength = <4>; -- nand-ecc-step-size = <512>; -- nand-on-flash-bbt; -- -- #address-cells = <1>; -- #size-cells = <0>; -- -- partitions { -- compatible = "fixed-partitions"; -- #address-cells = <1>; -- #size-cells = <1>; -- -- partition@0 { -- compatible = "nvmem-cells"; -- label = "cferom"; -- reg = <0x0 0x100000>; -- -- #address-cells = <1>; -- #size-cells = <1>; -- ranges = <0 0x0 0x100000>; -- -- base_mac_addr: mac@106a0 { -- reg = <0x106a0 0x6>; -- }; -- }; -- -- partition@100000 { -- compatible = "brcm,bcm4908-firmware"; -- label = "firmware"; -- reg = <0x100000 0x4400000>; -- }; -- }; --}; -diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts -deleted file mode 100644 -index b63eefab48bd5..0000000000000 ---- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts -+++ /dev/null -@@ -1,182 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -- --#include --#include --#include -- --#include "bcm4906.dtsi" -- --/ { -- compatible = "tplink,archer-c2300-v1", "brcm,bcm4906", "brcm,bcm4908"; -- model = "TP-Link Archer C2300 V1"; -- -- memory@0 { -- device_type = "memory"; -- reg = <0x00 0x00 0x00 0x20000000>; -- }; -- -- leds { -- compatible = "gpio-leds"; -- -- led-power { -- function = LED_FUNCTION_POWER; -- color = ; -- gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; -- }; -- -- led-2ghz { -- function = "2ghz"; -- color = ; -- gpios = <&gpio0 2 GPIO_ACTIVE_LOW>; -- }; -- -- led-5ghz { -- function = "5ghz"; -- color = ; -- gpios = <&gpio0 3 GPIO_ACTIVE_LOW>; -- }; -- -- led-wan-amber { -- function = LED_FUNCTION_WAN; -- color = ; -- gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>; -- }; -- -- led-wan-blue { -- function = LED_FUNCTION_WAN; -- color = ; -- gpios = <&gpio0 10 GPIO_ACTIVE_LOW>; -- }; -- -- led-lan { -- function = LED_FUNCTION_LAN; -- color = ; -- gpios = <&gpio0 12 GPIO_ACTIVE_LOW>; -- }; -- -- led-wps { -- function = LED_FUNCTION_WPS; -- color = ; -- gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; -- }; -- -- led-usb2 { -- function = "usb2"; -- color = ; -- gpios = <&gpio0 15 GPIO_ACTIVE_LOW>; -- }; -- -- led-usb3 { -- function = "usbd3"; -- color = ; -- gpios = <&gpio0 17 GPIO_ACTIVE_LOW>; -- }; -- -- led-brightness { -- function = LED_FUNCTION_BACKLIGHT; -- color = ; -- gpios = <&gpio0 19 GPIO_ACTIVE_LOW>; -- }; -- }; -- -- gpio-keys-polled { -- compatible = "gpio-keys-polled"; -- poll-interval = <100>; -- -- brightness { -- label = "LEDs"; -- linux,code = ; -- gpios = <&gpio0 18 GPIO_ACTIVE_LOW>; -- }; -- -- wps { -- label = "WPS"; -- linux,code = ; -- gpios = <&gpio0 21 GPIO_ACTIVE_LOW>; -- }; -- -- wifi { -- label = "WiFi"; -- linux,code = ; -- gpios = <&gpio0 22 GPIO_ACTIVE_LOW>; -- }; -- -- restart { -- label = "Reset"; -- linux,code = ; -- gpios = <&gpio0 23 GPIO_ACTIVE_LOW>; -- }; -- }; --}; -- --&usb_phy { -- brcm,ioc = <1>; -- status = "okay"; --}; -- --&ehci { -- status = "okay"; --}; -- --&ohci { -- status = "okay"; --}; -- --&xhci { -- status = "okay"; --}; -- --&ports { -- port@0 { -- label = "lan4"; -- }; -- -- port@1 { -- label = "lan3"; -- }; -- -- port@2 { -- label = "lan2"; -- }; -- -- port@3 { -- label = "lan1"; -- }; -- -- port@7 { -- reg = <7>; -- phy-mode = "internal"; -- phy-handle = <&phy12>; -- label = "wan"; -- }; --}; -- --&nandcs { -- nand-ecc-strength = <4>; -- nand-ecc-step-size = <512>; -- nand-on-flash-bbt; -- -- #address-cells = <1>; -- #size-cells = <0>; -- -- partitions { -- compatible = "brcm,bcm4908-partitions"; -- #address-cells = <1>; -- #size-cells = <1>; -- -- partition@0 { -- label = "cferom"; -- reg = <0x0 0x100000>; -- }; -- -- partition@100000 { -- compatible = "brcm,bcm4908-firmware"; -- reg = <0x100000 0x3900000>; -- }; -- -- partition@5800000 { -- compatible = "brcm,bcm4908-firmware"; -- reg = <0x3a00000 0x3900000>; -- }; -- }; --}; -diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi -deleted file mode 100644 -index 66023d5535247..0000000000000 ---- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi -+++ /dev/null -@@ -1,18 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -- --#include "bcm4908.dtsi" -- --/ { -- cpus { -- /delete-node/ cpu@2; -- -- /delete-node/ cpu@3; -- }; -- -- pmu { -- compatible = "arm,cortex-a53-pmu"; -- interrupts = , -- ; -- interrupt-affinity = <&cpu0>, <&cpu1>; -- }; --}; -diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts -deleted file mode 100644 -index 169fbb7cfd342..0000000000000 ---- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts -+++ /dev/null -@@ -1,159 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -- --#include --#include -- --#include "bcm4908.dtsi" -- --/ { -- compatible = "asus,gt-ac5300", "brcm,bcm4908"; -- model = "Asus GT-AC5300"; -- -- memory@0 { -- device_type = "memory"; -- reg = <0x00 0x00 0x00 0x40000000>; -- }; -- -- gpio-keys-polled { -- compatible = "gpio-keys-polled"; -- poll-interval = <100>; -- -- wifi { -- label = "WiFi"; -- linux,code = ; -- gpios = <&gpio0 28 GPIO_ACTIVE_LOW>; -- }; -- -- wps { -- label = "WPS"; -- linux,code = ; -- gpios = <&gpio0 29 GPIO_ACTIVE_LOW>; -- }; -- -- restart { -- label = "Reset"; -- linux,code = ; -- gpios = <&gpio0 30 GPIO_ACTIVE_LOW>; -- }; -- -- brightness { -- label = "LEDs"; -- linux,code = ; -- gpios = <&gpio0 31 GPIO_ACTIVE_LOW>; -- }; -- }; --}; -- --&enet { -- nvmem-cells = <&base_mac_addr>; -- nvmem-cell-names = "mac-address"; --}; -- --&usb_phy { -- brcm,ioc = <1>; -- status = "okay"; --}; -- --&ehci { -- status = "okay"; --}; -- --&ohci { -- status = "okay"; --}; -- --&xhci { -- status = "okay"; --}; -- --&ports { -- port@0 { -- label = "lan2"; -- }; -- -- port@1 { -- label = "lan1"; -- }; -- -- port@2 { -- label = "lan6"; -- }; -- -- port@3 { -- label = "lan5"; -- }; -- -- /* External BCM53134S switch */ -- port@7 { -- label = "sw"; -- reg = <7>; -- phy-mode = "rgmii"; -- -- fixed-link { -- speed = <1000>; -- full-duplex; -- }; -- }; --}; -- --&mdio { -- /* lan8 */ -- ethernet-phy@0 { -- reg = <0>; -- }; -- -- /* lan7 */ -- ethernet-phy@1 { -- reg = <1>; -- }; -- -- /* lan4 */ -- ethernet-phy@2 { -- reg = <2>; -- }; -- -- /* lan3 */ -- ethernet-phy@3 { -- reg = <3>; -- }; --}; -- --&nandcs { -- nand-ecc-strength = <4>; -- nand-ecc-step-size = <512>; -- nand-on-flash-bbt; -- brcm,nand-has-wp; -- -- #address-cells = <1>; -- #size-cells = <0>; -- -- partitions { -- compatible = "brcm,bcm4908-partitions"; -- #address-cells = <1>; -- #size-cells = <1>; -- -- partition@0 { -- compatible = "nvmem-cells"; -- label = "cferom"; -- reg = <0x0 0x100000>; -- -- #address-cells = <1>; -- #size-cells = <1>; -- ranges = <0 0x0 0x100000>; -- -- base_mac_addr: mac@106a0 { -- reg = <0x106a0 0x6>; -- }; -- }; -- -- partition@100000 { -- compatible = "brcm,bcm4908-firmware"; -- reg = <0x100000 0x5700000>; -- }; -- -- partition@5800000 { -- compatible = "brcm,bcm4908-firmware"; -- reg = <0x5800000 0x5700000>; -- }; -- }; --}; -diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi -deleted file mode 100644 -index a5a64d17d9ea6..0000000000000 ---- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi -+++ /dev/null -@@ -1,337 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -- --#include --#include --#include --#include -- --/dts-v1/; -- --/ { -- interrupt-parent = <&gic>; -- -- #address-cells = <2>; -- #size-cells = <2>; -- -- aliases { -- serial0 = &uart0; -- }; -- -- chosen { -- stdout-path = "serial0:115200n8"; -- }; -- -- cpus { -- #address-cells = <1>; -- #size-cells = <0>; -- -- cpu0: cpu@0 { -- device_type = "cpu"; -- compatible = "brcm,brahma-b53"; -- reg = <0x0>; -- next-level-cache = <&l2>; -- }; -- -- cpu1: cpu@1 { -- device_type = "cpu"; -- compatible = "brcm,brahma-b53"; -- reg = <0x1>; -- enable-method = "spin-table"; -- cpu-release-addr = <0x0 0xfff8>; -- next-level-cache = <&l2>; -- }; -- -- cpu2: cpu@2 { -- device_type = "cpu"; -- compatible = "brcm,brahma-b53"; -- reg = <0x2>; -- enable-method = "spin-table"; -- cpu-release-addr = <0x0 0xfff8>; -- next-level-cache = <&l2>; -- }; -- -- cpu3: cpu@3 { -- device_type = "cpu"; -- compatible = "brcm,brahma-b53"; -- reg = <0x3>; -- enable-method = "spin-table"; -- cpu-release-addr = <0x0 0xfff8>; -- next-level-cache = <&l2>; -- }; -- -- l2: l2-cache0 { -- compatible = "cache"; -- }; -- }; -- -- axi@81000000 { -- compatible = "simple-bus"; -- #address-cells = <1>; -- #size-cells = <1>; -- ranges = <0x00 0x00 0x81000000 0x4000>; -- -- gic: interrupt-controller@1000 { -- compatible = "arm,gic-400"; -- #interrupt-cells = <3>; -- #address-cells = <0>; -- interrupt-controller; -- reg = <0x1000 0x1000>, -- <0x2000 0x2000>; -- }; -- }; -- -- timer { -- compatible = "arm,armv8-timer"; -- interrupts = , -- , -- , -- ; -- }; -- -- pmu { -- compatible = "arm,cortex-a53-pmu"; -- interrupts = , -- , -- , -- ; -- interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; -- }; -- -- clocks { -- periph_clk: periph_clk { -- compatible = "fixed-clock"; -- #clock-cells = <0>; -- clock-frequency = <50000000>; -- clock-output-names = "periph"; -- }; -- }; -- -- soc { -- compatible = "simple-bus"; -- #address-cells = <1>; -- #size-cells = <1>; -- ranges = <0x00 0x00 0x80000000 0x281000>; -- -- enet: ethernet@2000 { -- compatible = "brcm,bcm4908-enet"; -- reg = <0x2000 0x1000>; -- -- interrupts = , -- ; -- interrupt-names = "rx", "tx"; -- }; -- -- usb_phy: usb-phy@c200 { -- compatible = "brcm,bcm4908-usb-phy"; -- reg = <0xc200 0x100>; -- reg-names = "ctrl"; -- power-domains = <&pmb BCM_PMB_HOST_USB>; -- dr_mode = "host"; -- brcm,has-xhci; -- brcm,has-eohci; -- #phy-cells = <1>; -- status = "disabled"; -- }; -- -- ehci: usb@c300 { -- compatible = "generic-ehci"; -- reg = <0xc300 0x100>; -- interrupts = ; -- phys = <&usb_phy PHY_TYPE_USB2>; -- status = "disabled"; -- }; -- -- ohci: usb@c400 { -- compatible = "generic-ohci"; -- reg = <0xc400 0x100>; -- interrupts = ; -- phys = <&usb_phy PHY_TYPE_USB2>; -- status = "disabled"; -- }; -- -- xhci: usb@d000 { -- compatible = "generic-xhci"; -- reg = <0xd000 0x8c8>; -- interrupts = ; -- phys = <&usb_phy PHY_TYPE_USB3>; -- status = "disabled"; -- }; -- -- bus@80000 { -- compatible = "simple-bus"; -- #size-cells = <1>; -- #address-cells = <1>; -- ranges = <0 0x80000 0x50000>; -- -- ethernet-switch@0 { -- compatible = "brcm,bcm4908-switch"; -- reg = <0x0 0x40000>, -- <0x40000 0x110>, -- <0x40340 0x30>, -- <0x40380 0x30>, -- <0x40600 0x34>, -- <0x40800 0x208>; -- reg-names = "core", "reg", "intrl2_0", -- "intrl2_1", "fcb", "acb"; -- interrupts = , -- ; -- brcm,num-gphy = <5>; -- brcm,num-rgmii-ports = <2>; -- -- #address-cells = <1>; -- #size-cells = <0>; -- -- ports: ports { -- #address-cells = <1>; -- #size-cells = <0>; -- -- port@0 { -- reg = <0>; -- phy-mode = "internal"; -- phy-handle = <&phy8>; -- }; -- -- port@1 { -- reg = <1>; -- phy-mode = "internal"; -- phy-handle = <&phy9>; -- }; -- -- port@2 { -- reg = <2>; -- phy-mode = "internal"; -- phy-handle = <&phy10>; -- }; -- -- port@3 { -- reg = <3>; -- phy-mode = "internal"; -- phy-handle = <&phy11>; -- }; -- -- port@8 { -- reg = <8>; -- phy-mode = "internal"; -- ethernet = <&enet>; -- -- fixed-link { -- speed = <1000>; -- full-duplex; -- }; -- }; -- }; -- }; -- -- mdio: mdio@405c0 { -- compatible = "brcm,unimac-mdio"; -- reg = <0x405c0 0x8>; -- reg-names = "mdio"; -- #size-cells = <0>; -- #address-cells = <1>; -- -- phy8: ethernet-phy@8 { -- reg = <8>; -- }; -- -- phy9: ethernet-phy@9 { -- reg = <9>; -- }; -- -- phy10: ethernet-phy@a { -- reg = <10>; -- }; -- -- phy11: ethernet-phy@b { -- reg = <11>; -- }; -- -- phy12: ethernet-phy@c { -- reg = <12>; -- }; -- }; -- }; -- -- procmon: syscon@280000 { -- compatible = "simple-bus"; -- reg = <0x280000 0x1000>; -- ranges; -- -- #address-cells = <1>; -- #size-cells = <1>; -- -- pmb: power-controller@2800c0 { -- compatible = "brcm,bcm4908-pmb"; -- reg = <0x2800c0 0x40>; -- #power-domain-cells = <1>; -- }; -- }; -- }; -- -- bus@ff800000 { -- compatible = "simple-bus"; -- #address-cells = <1>; -- #size-cells = <1>; -- ranges = <0x00 0x00 0xff800000 0x3000>; -- -- timer: timer@400 { -- compatible = "brcm,bcm6328-timer", "syscon"; -- reg = <0x400 0x3c>; -- }; -- -- gpio0: gpio-controller@500 { -- compatible = "brcm,bcm6345-gpio"; -- reg-names = "dirout", "dat"; -- reg = <0x500 0x28>, <0x528 0x28>; -- -- #gpio-cells = <2>; -- gpio-controller; -- }; -- -- uart0: serial@640 { -- compatible = "brcm,bcm6345-uart"; -- reg = <0x640 0x18>; -- interrupts = ; -- clocks = <&periph_clk>; -- clock-names = "periph"; -- status = "okay"; -- }; -- -- nand@1800 { -- #address-cells = <1>; -- #size-cells = <0>; -- compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand"; -- reg = <0x1800 0x600>, <0x2000 0x10>; -- reg-names = "nand", "nand-int-base"; -- interrupts = ; -- interrupt-names = "nand"; -- status = "okay"; -- -- nandcs: nand@0 { -- compatible = "brcm,nandcs"; -- reg = <0>; -- }; -- }; -- -- misc@2600 { -- compatible = "brcm,misc", "simple-mfd"; -- reg = <0x2600 0xe4>; -- -- #address-cells = <1>; -- #size-cells = <1>; -- ranges = <0x00 0x2600 0xe4>; -- -- reset-controller@2644 { -- compatible = "brcm,bcm4908-misc-pcie-reset"; -- reg = <0x44 0x04>; -- #reset-cells = <1>; -- }; -- }; -- -- reboot { -- compatible = "syscon-reboot"; -- regmap = <&timer>; -- offset = <0x34>; -- mask = <1>; -- }; -- }; --}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/Makefile b/arch/arm64/boot/dts/broadcom/bcmbca/Makefile -new file mode 100644 -index 0000000000000..dc68357849a9b ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/Makefile -@@ -0,0 +1,10 @@ -+# SPDX-License-Identifier: GPL-2.0 -+dtb-$(CONFIG_ARCH_BCMBCA) += \ -+ bcm4906-netgear-r8000p.dtb \ -+ bcm4906-tplink-archer-c2300-v1.dtb \ -+ bcm4908-asus-gt-ac5300.dtb \ -+ bcm4908-netgear-raxe500.dtb \ -+ bcm4912-asus-gt-ax6000.dtb \ -+ bcm94912.dtb \ -+ bcm963158.dtb \ -+ bcm96858.dtb -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts -new file mode 100644 -index 0000000000000..2dd028438c22c ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-netgear-r8000p.dts -@@ -0,0 +1,157 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -+ -+#include -+#include -+#include -+ -+#include "bcm4906.dtsi" -+ -+/ { -+ compatible = "netgear,r8000p", "brcm,bcm4906", "brcm,bcm4908"; -+ model = "Netgear R8000P"; -+ -+ memory@0 { -+ device_type = "memory"; -+ reg = <0x00 0x00 0x00 0x20000000>; -+ }; -+ -+ leds { -+ compatible = "gpio-leds"; -+ -+ led-power-white { -+ function = LED_FUNCTION_POWER; -+ color = ; -+ gpios = <&gpio0 8 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-power-amber { -+ function = LED_FUNCTION_POWER; -+ color = ; -+ gpios = <&gpio0 9 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-wps { -+ function = LED_FUNCTION_WPS; -+ color = ; -+ gpios = <&gpio0 10 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-2ghz { -+ function = "2ghz"; -+ color = ; -+ gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-5ghz-1 { -+ function = "5ghz-1"; -+ color = ; -+ gpios = <&gpio0 15 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-5ghz-2 { -+ function = "5ghz-2"; -+ color = ; -+ gpios = <&gpio0 16 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-usb2 { -+ function = "usb2"; -+ color = ; -+ gpios = <&gpio0 17 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-usb3 { -+ function = "usb3"; -+ color = ; -+ gpios = <&gpio0 18 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-wifi { -+ function = "wifi"; -+ color = ; -+ gpios = <&gpio0 56 GPIO_ACTIVE_LOW>; -+ }; -+ }; -+}; -+ -+&enet { -+ nvmem-cells = <&base_mac_addr>; -+ nvmem-cell-names = "mac-address"; -+}; -+ -+&usb_phy { -+ brcm,ioc = <1>; -+ status = "okay"; -+}; -+ -+&ehci { -+ status = "okay"; -+}; -+ -+&ohci { -+ status = "okay"; -+}; -+ -+&xhci { -+ status = "okay"; -+}; -+ -+&ports { -+ port@0 { -+ label = "lan4"; -+ }; -+ -+ port@1 { -+ label = "lan3"; -+ }; -+ -+ port@2 { -+ label = "lan2"; -+ }; -+ -+ port@3 { -+ label = "lan1"; -+ }; -+ -+ port@7 { -+ reg = <7>; -+ phy-mode = "internal"; -+ phy-handle = <&phy12>; -+ label = "wan"; -+ }; -+}; -+ -+&nandcs { -+ nand-ecc-strength = <4>; -+ nand-ecc-step-size = <512>; -+ nand-on-flash-bbt; -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ partitions { -+ compatible = "fixed-partitions"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ -+ partition@0 { -+ compatible = "nvmem-cells"; -+ label = "cferom"; -+ reg = <0x0 0x100000>; -+ -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0 0x0 0x100000>; -+ -+ base_mac_addr: mac@106a0 { -+ reg = <0x106a0 0x6>; -+ }; -+ }; -+ -+ partition@100000 { -+ compatible = "brcm,bcm4908-firmware"; -+ label = "firmware"; -+ reg = <0x100000 0x4400000>; -+ }; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-tplink-archer-c2300-v1.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-tplink-archer-c2300-v1.dts -new file mode 100644 -index 0000000000000..b63eefab48bd5 ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906-tplink-archer-c2300-v1.dts -@@ -0,0 +1,182 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -+ -+#include -+#include -+#include -+ -+#include "bcm4906.dtsi" -+ -+/ { -+ compatible = "tplink,archer-c2300-v1", "brcm,bcm4906", "brcm,bcm4908"; -+ model = "TP-Link Archer C2300 V1"; -+ -+ memory@0 { -+ device_type = "memory"; -+ reg = <0x00 0x00 0x00 0x20000000>; -+ }; -+ -+ leds { -+ compatible = "gpio-leds"; -+ -+ led-power { -+ function = LED_FUNCTION_POWER; -+ color = ; -+ gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-2ghz { -+ function = "2ghz"; -+ color = ; -+ gpios = <&gpio0 2 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-5ghz { -+ function = "5ghz"; -+ color = ; -+ gpios = <&gpio0 3 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-wan-amber { -+ function = LED_FUNCTION_WAN; -+ color = ; -+ gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>; -+ }; -+ -+ led-wan-blue { -+ function = LED_FUNCTION_WAN; -+ color = ; -+ gpios = <&gpio0 10 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-lan { -+ function = LED_FUNCTION_LAN; -+ color = ; -+ gpios = <&gpio0 12 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-wps { -+ function = LED_FUNCTION_WPS; -+ color = ; -+ gpios = <&gpio0 14 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-usb2 { -+ function = "usb2"; -+ color = ; -+ gpios = <&gpio0 15 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-usb3 { -+ function = "usbd3"; -+ color = ; -+ gpios = <&gpio0 17 GPIO_ACTIVE_LOW>; -+ }; -+ -+ led-brightness { -+ function = LED_FUNCTION_BACKLIGHT; -+ color = ; -+ gpios = <&gpio0 19 GPIO_ACTIVE_LOW>; -+ }; -+ }; -+ -+ gpio-keys-polled { -+ compatible = "gpio-keys-polled"; -+ poll-interval = <100>; -+ -+ brightness { -+ label = "LEDs"; -+ linux,code = ; -+ gpios = <&gpio0 18 GPIO_ACTIVE_LOW>; -+ }; -+ -+ wps { -+ label = "WPS"; -+ linux,code = ; -+ gpios = <&gpio0 21 GPIO_ACTIVE_LOW>; -+ }; -+ -+ wifi { -+ label = "WiFi"; -+ linux,code = ; -+ gpios = <&gpio0 22 GPIO_ACTIVE_LOW>; -+ }; -+ -+ restart { -+ label = "Reset"; -+ linux,code = ; -+ gpios = <&gpio0 23 GPIO_ACTIVE_LOW>; -+ }; -+ }; -+}; -+ -+&usb_phy { -+ brcm,ioc = <1>; -+ status = "okay"; -+}; -+ -+&ehci { -+ status = "okay"; -+}; -+ -+&ohci { -+ status = "okay"; -+}; -+ -+&xhci { -+ status = "okay"; -+}; -+ -+&ports { -+ port@0 { -+ label = "lan4"; -+ }; -+ -+ port@1 { -+ label = "lan3"; -+ }; -+ -+ port@2 { -+ label = "lan2"; -+ }; -+ -+ port@3 { -+ label = "lan1"; -+ }; -+ -+ port@7 { -+ reg = <7>; -+ phy-mode = "internal"; -+ phy-handle = <&phy12>; -+ label = "wan"; -+ }; -+}; -+ -+&nandcs { -+ nand-ecc-strength = <4>; -+ nand-ecc-step-size = <512>; -+ nand-on-flash-bbt; -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ partitions { -+ compatible = "brcm,bcm4908-partitions"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ -+ partition@0 { -+ label = "cferom"; -+ reg = <0x0 0x100000>; -+ }; -+ -+ partition@100000 { -+ compatible = "brcm,bcm4908-firmware"; -+ reg = <0x100000 0x3900000>; -+ }; -+ -+ partition@5800000 { -+ compatible = "brcm,bcm4908-firmware"; -+ reg = <0x3a00000 0x3900000>; -+ }; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906.dtsi -new file mode 100644 -index 0000000000000..d084c33d5ca82 ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4906.dtsi -@@ -0,0 +1,26 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -+ -+#include "bcm4908.dtsi" -+ -+/ { -+ cpus { -+ /delete-node/ cpu@2; -+ -+ /delete-node/ cpu@3; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = , -+ , -+ , -+ ; -+ }; -+ -+ pmu { -+ compatible = "arm,cortex-a53-pmu"; -+ interrupts = , -+ ; -+ interrupt-affinity = <&cpu0>, <&cpu1>; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts -new file mode 100644 -index 0000000000000..169fbb7cfd342 ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts -@@ -0,0 +1,159 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -+ -+#include -+#include -+ -+#include "bcm4908.dtsi" -+ -+/ { -+ compatible = "asus,gt-ac5300", "brcm,bcm4908"; -+ model = "Asus GT-AC5300"; -+ -+ memory@0 { -+ device_type = "memory"; -+ reg = <0x00 0x00 0x00 0x40000000>; -+ }; -+ -+ gpio-keys-polled { -+ compatible = "gpio-keys-polled"; -+ poll-interval = <100>; -+ -+ wifi { -+ label = "WiFi"; -+ linux,code = ; -+ gpios = <&gpio0 28 GPIO_ACTIVE_LOW>; -+ }; -+ -+ wps { -+ label = "WPS"; -+ linux,code = ; -+ gpios = <&gpio0 29 GPIO_ACTIVE_LOW>; -+ }; -+ -+ restart { -+ label = "Reset"; -+ linux,code = ; -+ gpios = <&gpio0 30 GPIO_ACTIVE_LOW>; -+ }; -+ -+ brightness { -+ label = "LEDs"; -+ linux,code = ; -+ gpios = <&gpio0 31 GPIO_ACTIVE_LOW>; -+ }; -+ }; -+}; -+ -+&enet { -+ nvmem-cells = <&base_mac_addr>; -+ nvmem-cell-names = "mac-address"; -+}; -+ -+&usb_phy { -+ brcm,ioc = <1>; -+ status = "okay"; -+}; -+ -+&ehci { -+ status = "okay"; -+}; -+ -+&ohci { -+ status = "okay"; -+}; -+ -+&xhci { -+ status = "okay"; -+}; -+ -+&ports { -+ port@0 { -+ label = "lan2"; -+ }; -+ -+ port@1 { -+ label = "lan1"; -+ }; -+ -+ port@2 { -+ label = "lan6"; -+ }; -+ -+ port@3 { -+ label = "lan5"; -+ }; -+ -+ /* External BCM53134S switch */ -+ port@7 { -+ label = "sw"; -+ reg = <7>; -+ phy-mode = "rgmii"; -+ -+ fixed-link { -+ speed = <1000>; -+ full-duplex; -+ }; -+ }; -+}; -+ -+&mdio { -+ /* lan8 */ -+ ethernet-phy@0 { -+ reg = <0>; -+ }; -+ -+ /* lan7 */ -+ ethernet-phy@1 { -+ reg = <1>; -+ }; -+ -+ /* lan4 */ -+ ethernet-phy@2 { -+ reg = <2>; -+ }; -+ -+ /* lan3 */ -+ ethernet-phy@3 { -+ reg = <3>; -+ }; -+}; -+ -+&nandcs { -+ nand-ecc-strength = <4>; -+ nand-ecc-step-size = <512>; -+ nand-on-flash-bbt; -+ brcm,nand-has-wp; -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ partitions { -+ compatible = "brcm,bcm4908-partitions"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ -+ partition@0 { -+ compatible = "nvmem-cells"; -+ label = "cferom"; -+ reg = <0x0 0x100000>; -+ -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0 0x0 0x100000>; -+ -+ base_mac_addr: mac@106a0 { -+ reg = <0x106a0 0x6>; -+ }; -+ }; -+ -+ partition@100000 { -+ compatible = "brcm,bcm4908-firmware"; -+ reg = <0x100000 0x5700000>; -+ }; -+ -+ partition@5800000 { -+ compatible = "brcm,bcm4908-firmware"; -+ reg = <0x5800000 0x5700000>; -+ }; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-netgear-raxe500.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-netgear-raxe500.dts -new file mode 100644 -index 0000000000000..3c2cf2d238b6f ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-netgear-raxe500.dts -@@ -0,0 +1,50 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -+ -+#include "bcm4908.dtsi" -+ -+/ { -+ compatible = "netgear,raxe500", "brcm,bcm4908"; -+ model = "Netgear RAXE500"; -+ -+ memory@0 { -+ device_type = "memory"; -+ reg = <0x00 0x00 0x00 0x40000000>; -+ }; -+}; -+ -+&ehci { -+ status = "okay"; -+}; -+ -+&ohci { -+ status = "okay"; -+}; -+ -+&xhci { -+ status = "okay"; -+}; -+ -+&ports { -+ port@0 { -+ label = "lan4"; -+ }; -+ -+ port@1 { -+ label = "lan3"; -+ }; -+ -+ port@2 { -+ label = "lan2"; -+ }; -+ -+ port@3 { -+ label = "lan1"; -+ }; -+ -+ port@7 { -+ reg = <7>; -+ phy-mode = "internal"; -+ phy-handle = <&phy12>; -+ label = "wan"; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi -new file mode 100644 -index 0000000000000..b7db95ce0bbf2 ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi -@@ -0,0 +1,339 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -+ -+#include -+#include -+#include -+#include -+ -+/dts-v1/; -+ -+/ { -+ interrupt-parent = <&gic>; -+ -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ aliases { -+ serial0 = &uart0; -+ }; -+ -+ chosen { -+ stdout-path = "serial0:115200n8"; -+ }; -+ -+ cpus { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "brcm,brahma-b53"; -+ reg = <0x0>; -+ enable-method = "spin-table"; -+ cpu-release-addr = <0x0 0xfff8>; -+ next-level-cache = <&l2>; -+ }; -+ -+ cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "brcm,brahma-b53"; -+ reg = <0x1>; -+ enable-method = "spin-table"; -+ cpu-release-addr = <0x0 0xfff8>; -+ next-level-cache = <&l2>; -+ }; -+ -+ cpu2: cpu@2 { -+ device_type = "cpu"; -+ compatible = "brcm,brahma-b53"; -+ reg = <0x2>; -+ enable-method = "spin-table"; -+ cpu-release-addr = <0x0 0xfff8>; -+ next-level-cache = <&l2>; -+ }; -+ -+ cpu3: cpu@3 { -+ device_type = "cpu"; -+ compatible = "brcm,brahma-b53"; -+ reg = <0x3>; -+ enable-method = "spin-table"; -+ cpu-release-addr = <0x0 0xfff8>; -+ next-level-cache = <&l2>; -+ }; -+ -+ l2: l2-cache0 { -+ compatible = "cache"; -+ }; -+ }; -+ -+ axi@81000000 { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x00 0x00 0x81000000 0x4000>; -+ -+ gic: interrupt-controller@1000 { -+ compatible = "arm,gic-400"; -+ #interrupt-cells = <3>; -+ #address-cells = <0>; -+ interrupt-controller; -+ reg = <0x1000 0x1000>, -+ <0x2000 0x2000>; -+ }; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = , -+ , -+ , -+ ; -+ }; -+ -+ pmu { -+ compatible = "arm,cortex-a53-pmu"; -+ interrupts = , -+ , -+ , -+ ; -+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; -+ }; -+ -+ clocks { -+ periph_clk: periph_clk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <50000000>; -+ clock-output-names = "periph"; -+ }; -+ }; -+ -+ soc { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x00 0x00 0x80000000 0x281000>; -+ -+ enet: ethernet@2000 { -+ compatible = "brcm,bcm4908-enet"; -+ reg = <0x2000 0x1000>; -+ -+ interrupts = , -+ ; -+ interrupt-names = "rx", "tx"; -+ }; -+ -+ usb_phy: usb-phy@c200 { -+ compatible = "brcm,bcm4908-usb-phy"; -+ reg = <0xc200 0x100>; -+ reg-names = "ctrl"; -+ power-domains = <&pmb BCM_PMB_HOST_USB>; -+ dr_mode = "host"; -+ brcm,has-xhci; -+ brcm,has-eohci; -+ #phy-cells = <1>; -+ status = "disabled"; -+ }; -+ -+ ehci: usb@c300 { -+ compatible = "generic-ehci"; -+ reg = <0xc300 0x100>; -+ interrupts = ; -+ phys = <&usb_phy PHY_TYPE_USB2>; -+ status = "disabled"; -+ }; -+ -+ ohci: usb@c400 { -+ compatible = "generic-ohci"; -+ reg = <0xc400 0x100>; -+ interrupts = ; -+ phys = <&usb_phy PHY_TYPE_USB2>; -+ status = "disabled"; -+ }; -+ -+ xhci: usb@d000 { -+ compatible = "generic-xhci"; -+ reg = <0xd000 0x8c8>; -+ interrupts = ; -+ phys = <&usb_phy PHY_TYPE_USB3>; -+ status = "disabled"; -+ }; -+ -+ bus@80000 { -+ compatible = "simple-bus"; -+ #size-cells = <1>; -+ #address-cells = <1>; -+ ranges = <0 0x80000 0x50000>; -+ -+ ethernet-switch@0 { -+ compatible = "brcm,bcm4908-switch"; -+ reg = <0x0 0x40000>, -+ <0x40000 0x110>, -+ <0x40340 0x30>, -+ <0x40380 0x30>, -+ <0x40600 0x34>, -+ <0x40800 0x208>; -+ reg-names = "core", "reg", "intrl2_0", -+ "intrl2_1", "fcb", "acb"; -+ interrupts = , -+ ; -+ brcm,num-gphy = <5>; -+ brcm,num-rgmii-ports = <2>; -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ ports: ports { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ port@0 { -+ reg = <0>; -+ phy-mode = "internal"; -+ phy-handle = <&phy8>; -+ }; -+ -+ port@1 { -+ reg = <1>; -+ phy-mode = "internal"; -+ phy-handle = <&phy9>; -+ }; -+ -+ port@2 { -+ reg = <2>; -+ phy-mode = "internal"; -+ phy-handle = <&phy10>; -+ }; -+ -+ port@3 { -+ reg = <3>; -+ phy-mode = "internal"; -+ phy-handle = <&phy11>; -+ }; -+ -+ port@8 { -+ reg = <8>; -+ phy-mode = "internal"; -+ ethernet = <&enet>; -+ -+ fixed-link { -+ speed = <1000>; -+ full-duplex; -+ }; -+ }; -+ }; -+ }; -+ -+ mdio: mdio@405c0 { -+ compatible = "brcm,unimac-mdio"; -+ reg = <0x405c0 0x8>; -+ reg-names = "mdio"; -+ #size-cells = <0>; -+ #address-cells = <1>; -+ -+ phy8: ethernet-phy@8 { -+ reg = <8>; -+ }; -+ -+ phy9: ethernet-phy@9 { -+ reg = <9>; -+ }; -+ -+ phy10: ethernet-phy@a { -+ reg = <10>; -+ }; -+ -+ phy11: ethernet-phy@b { -+ reg = <11>; -+ }; -+ -+ phy12: ethernet-phy@c { -+ reg = <12>; -+ }; -+ }; -+ }; -+ -+ procmon: bus@280000 { -+ compatible = "simple-bus"; -+ reg = <0x280000 0x1000>; -+ ranges; -+ -+ #address-cells = <1>; -+ #size-cells = <1>; -+ -+ pmb: power-controller@2800c0 { -+ compatible = "brcm,bcm4908-pmb"; -+ reg = <0x2800c0 0x40>; -+ #power-domain-cells = <1>; -+ }; -+ }; -+ }; -+ -+ bus@ff800000 { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x00 0x00 0xff800000 0x3000>; -+ -+ twd: timer-mfd@400 { -+ compatible = "brcm,bcm4908-twd", "simple-mfd", "syscon"; -+ reg = <0x400 0x4c>; -+ }; -+ -+ gpio0: gpio-controller@500 { -+ compatible = "brcm,bcm6345-gpio"; -+ reg-names = "dirout", "dat"; -+ reg = <0x500 0x28>, <0x528 0x28>; -+ -+ #gpio-cells = <2>; -+ gpio-controller; -+ }; -+ -+ uart0: serial@640 { -+ compatible = "brcm,bcm6345-uart"; -+ reg = <0x640 0x18>; -+ interrupts = ; -+ clocks = <&periph_clk>; -+ clock-names = "refclk"; -+ status = "okay"; -+ }; -+ -+ nand@1800 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand"; -+ reg = <0x1800 0x600>, <0x2000 0x10>; -+ reg-names = "nand", "nand-int-base"; -+ interrupts = ; -+ interrupt-names = "nand_ctlrdy"; -+ status = "okay"; -+ -+ nandcs: nand@0 { -+ compatible = "brcm,nandcs"; -+ reg = <0>; -+ }; -+ }; -+ -+ misc@2600 { -+ compatible = "brcm,misc", "simple-mfd"; -+ reg = <0x2600 0xe4>; -+ -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x00 0x2600 0xe4>; -+ -+ reset-controller@2644 { -+ compatible = "brcm,bcm4908-misc-pcie-reset"; -+ reg = <0x44 0x04>; -+ #reset-cells = <1>; -+ }; -+ }; -+ }; -+ -+ reboot { -+ compatible = "syscon-reboot"; -+ regmap = <&twd>; -+ offset = <0x34>; -+ mask = <1>; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912-asus-gt-ax6000.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912-asus-gt-ax6000.dts -new file mode 100644 -index 0000000000000..ed554666e95ea ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912-asus-gt-ax6000.dts -@@ -0,0 +1,19 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT -+ -+/dts-v1/; -+ -+#include "bcm4912.dtsi" -+ -+/ { -+ compatible = "asus,gt-ax6000", "brcm,bcm4912", "brcm,bcmbca"; -+ model = "Asus GT-AX6000"; -+ -+ memory@0 { -+ device_type = "memory"; -+ reg = <0x00 0x00 0x00 0x40000000>; -+ }; -+}; -+ -+&uart0 { -+ status = "okay"; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912.dtsi -new file mode 100644 -index 0000000000000..3d016c2ce6759 ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4912.dtsi -@@ -0,0 +1,128 @@ -+// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -+/* -+ * Copyright 2022 Broadcom Ltd. -+ */ -+ -+#include -+#include -+ -+/ { -+ compatible = "brcm,bcm4912", "brcm,bcmbca"; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ interrupt-parent = <&gic>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ B53_0: cpu@0 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x0>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ B53_1: cpu@1 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x1>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ B53_2: cpu@2 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x2>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ B53_3: cpu@3 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x3>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ L2_0: l2-cache0 { -+ compatible = "cache"; -+ }; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = , -+ , -+ , -+ ; -+ }; -+ -+ pmu: pmu { -+ compatible = "arm,cortex-a53-pmu"; -+ interrupts = , -+ , -+ , -+ ; -+ interrupt-affinity = <&B53_0>, <&B53_1>, -+ <&B53_2>, <&B53_3>; -+ }; -+ -+ clocks: clocks { -+ periph_clk: periph-clk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <200000000>; -+ }; -+ uart_clk: uart-clk { -+ compatible = "fixed-factor-clock"; -+ #clock-cells = <0>; -+ clocks = <&periph_clk>; -+ clock-div = <4>; -+ clock-mult = <1>; -+ }; -+ }; -+ -+ psci { -+ compatible = "arm,psci-0.2"; -+ method = "smc"; -+ }; -+ -+ axi@81000000 { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0x81000000 0x8000>; -+ -+ gic: interrupt-controller@1000 { -+ compatible = "arm,gic-400"; -+ #interrupt-cells = <3>; -+ interrupt-controller; -+ interrupts = ; -+ reg = <0x1000 0x1000>, -+ <0x2000 0x2000>, -+ <0x4000 0x2000>, -+ <0x6000 0x2000>; -+ }; -+ }; -+ -+ bus@ff800000 { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0xff800000 0x800000>; -+ -+ uart0: serial@12000 { -+ compatible = "arm,pl011", "arm,primecell"; -+ reg = <0x12000 0x1000>; -+ interrupts = ; -+ clocks = <&uart_clk>, <&uart_clk>; -+ clock-names = "uartclk", "apb_pclk"; -+ status = "disabled"; -+ }; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm63158.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm63158.dtsi -new file mode 100644 -index 0000000000000..13629702f70b8 ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm63158.dtsi -@@ -0,0 +1,128 @@ -+// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -+/* -+ * Copyright 2022 Broadcom Ltd. -+ */ -+ -+#include -+#include -+ -+/ { -+ compatible = "brcm,bcm63158", "brcm,bcmbca"; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ interrupt-parent = <&gic>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ B53_0: cpu@0 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x0>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ B53_1: cpu@1 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x1>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ B53_2: cpu@2 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x2>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ B53_3: cpu@3 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x3>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ L2_0: l2-cache0 { -+ compatible = "cache"; -+ }; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = , -+ , -+ , -+ ; -+ }; -+ -+ pmu: pmu { -+ compatible = "arm,cortex-a53-pmu"; -+ interrupts = , -+ , -+ , -+ ; -+ interrupt-affinity = <&B53_0>, <&B53_1>, -+ <&B53_2>, <&B53_3>; -+ }; -+ -+ clocks: clocks { -+ periph_clk: periph-clk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <200000000>; -+ }; -+ uart_clk: uart-clk { -+ compatible = "fixed-factor-clock"; -+ #clock-cells = <0>; -+ clocks = <&periph_clk>; -+ clock-div = <4>; -+ clock-mult = <1>; -+ }; -+ }; -+ -+ psci { -+ compatible = "arm,psci-0.2"; -+ method = "smc"; -+ }; -+ -+ axi@81000000 { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0x81000000 0x8000>; -+ -+ gic: interrupt-controller@1000 { -+ compatible = "arm,gic-400"; -+ #interrupt-cells = <3>; -+ interrupt-controller; -+ interrupts = ; -+ reg = <0x1000 0x1000>, -+ <0x2000 0x2000>, -+ <0x4000 0x2000>, -+ <0x6000 0x2000>; -+ }; -+ }; -+ -+ bus@ff800000 { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0xff800000 0x800000>; -+ -+ uart0: serial@12000 { -+ compatible = "arm,pl011", "arm,primecell"; -+ reg = <0x12000 0x1000>; -+ interrupts = ; -+ clocks = <&uart_clk>, <&uart_clk>; -+ clock-names = "uartclk", "apb_pclk"; -+ status = "disabled"; -+ }; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm6858.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm6858.dtsi -new file mode 100644 -index 0000000000000..29a880c6c8588 ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm6858.dtsi -@@ -0,0 +1,121 @@ -+// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -+/* -+ * Copyright 2022 Broadcom Ltd. -+ */ -+ -+#include -+#include -+ -+/ { -+ compatible = "brcm,bcm6858", "brcm,bcmbca"; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ interrupt-parent = <&gic>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ B53_0: cpu@0 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x0>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ B53_1: cpu@1 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x1>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ B53_2: cpu@2 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x2>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ -+ B53_3: cpu@3 { -+ compatible = "brcm,brahma-b53"; -+ device_type = "cpu"; -+ reg = <0x0 0x3>; -+ next-level-cache = <&L2_0>; -+ enable-method = "psci"; -+ }; -+ L2_0: l2-cache0 { -+ compatible = "cache"; -+ }; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = , -+ , -+ , -+ ; -+ }; -+ -+ pmu: pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = , -+ , -+ , -+ ; -+ interrupt-affinity = <&B53_0>, <&B53_1>, -+ <&B53_2>, <&B53_3>; -+ }; -+ -+ clocks: clocks { -+ periph_clk:periph-clk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <200000000>; -+ }; -+ }; -+ -+ psci { -+ compatible = "arm,psci-0.2"; -+ method = "smc"; -+ }; -+ -+ axi@81000000 { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0x81000000 0x8000>; -+ -+ gic: interrupt-controller@1000 { -+ compatible = "arm,gic-400"; -+ #interrupt-cells = <3>; -+ interrupt-controller; -+ reg = <0x1000 0x1000>, /* GICD */ -+ <0x2000 0x2000>, /* GICC */ -+ <0x4000 0x2000>, /* GICH */ -+ <0x6000 0x2000>; /* GICV */ -+ interrupts = ; -+ }; -+ }; -+ -+ bus@ff800000 { -+ compatible = "simple-bus"; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0xff800000 0x62000>; -+ -+ uart0: serial@640 { -+ compatible = "brcm,bcm6345-uart"; -+ reg = <0x640 0x18>; -+ interrupts = ; -+ clocks = <&periph_clk>; -+ clock-names = "refclk"; -+ status = "disabled"; -+ }; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm94912.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm94912.dts -new file mode 100644 -index 0000000000000..a3623e6f6919c ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm94912.dts -@@ -0,0 +1,30 @@ -+// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -+/* -+ * Copyright 2022 Broadcom Ltd. -+ */ -+ -+/dts-v1/; -+ -+#include "bcm4912.dtsi" -+ -+/ { -+ model = "Broadcom BCM94912 Reference Board"; -+ compatible = "brcm,bcm94912", "brcm,bcm4912", "brcm,bcmbca"; -+ -+ aliases { -+ serial0 = &uart0; -+ }; -+ -+ chosen { -+ stdout-path = "serial0:115200n8"; -+ }; -+ -+ memory@0 { -+ device_type = "memory"; -+ reg = <0x0 0x0 0x0 0x08000000>; -+ }; -+}; -+ -+&uart0 { -+ status = "okay"; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm963158.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm963158.dts -new file mode 100644 -index 0000000000000..eba07e0b1ca6f ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm963158.dts -@@ -0,0 +1,30 @@ -+// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -+/* -+ * Copyright 2022 Broadcom Ltd. -+ */ -+ -+/dts-v1/; -+ -+#include "bcm63158.dtsi" -+ -+/ { -+ model = "Broadcom BCM963158 Reference Board"; -+ compatible = "brcm,bcm963158", "brcm,bcm63158", "brcm,bcmbca"; -+ -+ aliases { -+ serial0 = &uart0; -+ }; -+ -+ chosen { -+ stdout-path = "serial0:115200n8"; -+ }; -+ -+ memory@0 { -+ device_type = "memory"; -+ reg = <0x0 0x0 0x0 0x08000000>; -+ }; -+}; -+ -+&uart0 { -+ status = "okay"; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm96858.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm96858.dts -new file mode 100644 -index 0000000000000..0cbf582f5d545 ---- /dev/null -+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm96858.dts -@@ -0,0 +1,30 @@ -+// SPDX-License-Identifier: (GPL-2.0+ OR MIT) -+/* -+ * Copyright 2022 Broadcom Ltd. -+ */ -+ -+/dts-v1/; -+ -+#include "bcm6858.dtsi" -+ -+/ { -+ model = "Broadcom BCM96858 Reference Board"; -+ compatible = "brcm,bcm96858", "brcm,bcm6858", "brcm,bcmbca"; -+ -+ aliases { -+ serial0 = &uart0; -+ }; -+ -+ chosen { -+ stdout-path = "serial0:115200n8"; -+ }; -+ -+ memory@0 { -+ device_type = "memory"; -+ reg = <0x0 0x0 0x0 0x08000000>; -+ }; -+}; -+ -+&uart0 { -+ status = "okay"; -+}; -diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts -index ec19fbf928a14..12a4b1c03390c 100644 ---- a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts -+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts -@@ -111,8 +111,8 @@ - compatible = "silabs,si3226x"; - reg = <0>; - spi-max-frequency = <5000000>; -- spi-cpha = <1>; -- spi-cpol = <1>; -+ spi-cpha; -+ spi-cpol; - pl022,hierarchy = <0>; - pl022,interface = <0>; - pl022,slave-tx-disable = <0>; -@@ -135,8 +135,8 @@ - at25,byte-len = <0x8000>; - at25,addr-mode = <2>; - at25,page-size = <64>; -- spi-cpha = <1>; -- spi-cpol = <1>; -+ spi-cpha; -+ spi-cpol; - pl022,hierarchy = <0>; - pl022,interface = <0>; - pl022,slave-tx-disable = <0>; -diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi -index 2cfeaf3b0a876..8c218689fef70 100644 ---- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi -+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi -@@ -687,7 +687,7 @@ - }; - }; - -- sata: ahci@663f2000 { -+ sata: sata@663f2000 { - compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x663f2000 0x1000>; - dma-coherent; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts -index e22c5e77fecdc..9615f3b9ee608 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts -@@ -110,7 +110,7 @@ - &i2c0 { - status = "okay"; - -- pca9547@77 { -+ i2c-mux@77 { - compatible = "nxp,pca9547"; - reg = <0x77>; - #address-cells = <1>; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts -index 79f155dedb2d0..e662677a6e28f 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts -@@ -15,6 +15,7 @@ - compatible = "fsl,ls1012a-rdb", "fsl,ls1012a"; - - aliases { -+ serial0 = &duart0; - mmc0 = &esdhc0; - mmc1 = &esdhc1; - }; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts -index bfd14b64567e4..2f92e62ecafe9 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts -@@ -272,11 +272,6 @@ - vcc-supply = <&sb_3v3>; - }; - -- rtc@51 { -- compatible = "nxp,pcf2129"; -- reg = <0x51>; -- }; -- - eeprom@56 { - compatible = "atmel,24c512"; - reg = <0x56>; -@@ -318,6 +313,15 @@ - - }; - -+&i2c1 { -+ status = "okay"; -+ -+ rtc@51 { -+ compatible = "nxp,pcf2129"; -+ reg = <0x51>; -+ }; -+}; -+ - &enetc_port1 { - phy-handle = <&qds_phy1>; - phy-connection-type = "rgmii-id"; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts -index fea167d222cfe..14856bc79b221 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts -@@ -70,7 +70,7 @@ - &i2c0 { - status = "okay"; - -- pca9547@77 { -+ i2c-mux@77 { - compatible = "nxp,pca9547"; - reg = <0x77>; - #address-cells = <1>; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -index 01b01e3204118..35d1939e690b0 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi -@@ -536,9 +536,9 @@ - clock-names = "i2c"; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(1)>; -- dmas = <&edma0 1 39>, -- <&edma0 1 38>; -- dma-names = "tx", "rx"; -+ dmas = <&edma0 1 38>, -+ <&edma0 1 39>; -+ dma-names = "rx", "tx"; - status = "disabled"; - }; - -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts -index eec62c63dafe2..9ee9928f71b49 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts -@@ -76,7 +76,7 @@ - &i2c0 { - status = "okay"; - -- pca9547@77 { -+ i2c-mux@77 { - compatible = "nxp,pca9547"; - reg = <0x77>; - #address-cells = <1>; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi -index 687fea6d8afa4..4e7bd04d97984 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi -@@ -499,9 +499,9 @@ - interrupts = ; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(2)>; -- dmas = <&edma0 1 39>, -- <&edma0 1 38>; -- dma-names = "tx", "rx"; -+ dmas = <&edma0 1 38>, -+ <&edma0 1 39>; -+ dma-names = "rx", "tx"; - status = "disabled"; - }; - -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts -index 41d8b15f25a54..aa52ff73ff9e0 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts -@@ -53,7 +53,7 @@ - &i2c0 { - status = "okay"; - -- i2c-switch@77 { -+ i2c-mux@77 { - compatible = "nxp,pca9547"; - reg = <0x77>; - #address-cells = <1>; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts -index 1bfbce69cc8b7..ee8e932628d17 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts -@@ -136,7 +136,7 @@ - &i2c0 { - status = "okay"; - -- i2c-switch@77 { -+ i2c-mux@77 { - compatible = "nxp,pca9547"; - reg = <0x77>; - #address-cells = <1>; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts -index 3063851c2fb91..a9c6682a3955e 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts -@@ -38,7 +38,6 @@ - powerdn { - label = "External Power Down"; - gpios = <&gpio1 17 GPIO_ACTIVE_LOW>; -- interrupts = <&gpio1 17 IRQ_TYPE_EDGE_FALLING>; - linux,code = ; - }; - -@@ -46,7 +45,6 @@ - admin { - label = "ADMIN button"; - gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>; -- interrupts = <&gpio3 8 IRQ_TYPE_EDGE_RISING>; - linux,code = ; - }; - }; -@@ -247,7 +245,7 @@ - &i2c3 { - status = "okay"; - -- i2c-switch@70 { -+ i2c-mux@70 { - compatible = "nxp,pca9540"; - #address-cells = <1>; - #size-cells = <0>; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi -index f85e437f80b73..63441028622a6 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi -@@ -758,6 +758,9 @@ - little-endian; - #address-cells = <1>; - #size-cells = <0>; -+ clock-frequency = <2500000>; -+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL -+ QORIQ_CLK_PLL_DIV(1)>; - status = "disabled"; - }; - -@@ -767,6 +770,9 @@ - little-endian; - #address-cells = <1>; - #size-cells = <0>; -+ clock-frequency = <2500000>; -+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL -+ QORIQ_CLK_PLL_DIV(1)>; - status = "disabled"; - }; - -@@ -847,7 +853,7 @@ - }; - - cluster1_core0_watchdog: wdt@c000000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc000000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>, -@@ -857,7 +863,7 @@ - }; - - cluster1_core1_watchdog: wdt@c010000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc010000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>, -@@ -867,7 +873,7 @@ - }; - - cluster1_core2_watchdog: wdt@c020000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc020000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>, -@@ -877,7 +883,7 @@ - }; - - cluster1_core3_watchdog: wdt@c030000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc030000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>, -@@ -887,7 +893,7 @@ - }; - - cluster2_core0_watchdog: wdt@c100000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc100000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>, -@@ -897,7 +903,7 @@ - }; - - cluster2_core1_watchdog: wdt@c110000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc110000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>, -@@ -907,7 +913,7 @@ - }; - - cluster2_core2_watchdog: wdt@c120000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc120000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>, -@@ -917,7 +923,7 @@ - }; - - cluster2_core3_watchdog: wdt@c130000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc130000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>, -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi -index 10d2fe0919651..8d96d18c3697a 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi -@@ -44,7 +44,7 @@ - - &i2c0 { - status = "okay"; -- pca9547@77 { -+ i2c-mux@77 { - compatible = "nxp,pca9547"; - reg = <0x77>; - #address-cells = <1>; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi -index 4b71c4fcb35f6..787e408da0024 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi -@@ -44,7 +44,7 @@ - - &i2c0 { - status = "okay"; -- pca9547@75 { -+ i2c-mux@75 { - compatible = "nxp,pca9547"; - reg = <0x75>; - #address-cells = <1>; -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi -index 801ba9612d361..12e59777363fe 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi -@@ -387,7 +387,7 @@ - }; - - cluster1_core0_watchdog: wdt@c000000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc000000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(4)>, -@@ -397,7 +397,7 @@ - }; - - cluster1_core1_watchdog: wdt@c010000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc010000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(4)>, -@@ -407,7 +407,7 @@ - }; - - cluster2_core0_watchdog: wdt@c100000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc100000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(4)>, -@@ -417,7 +417,7 @@ - }; - - cluster2_core1_watchdog: wdt@c110000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc110000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(4)>, -@@ -427,7 +427,7 @@ - }; - - cluster3_core0_watchdog: wdt@c200000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc200000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(4)>, -@@ -437,7 +437,7 @@ - }; - - cluster3_core1_watchdog: wdt@c210000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc210000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(4)>, -@@ -447,7 +447,7 @@ - }; - - cluster4_core0_watchdog: wdt@c300000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc300000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(4)>, -@@ -457,7 +457,7 @@ - }; - - cluster4_core1_watchdog: wdt@c310000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xc310000 0x0 0x1000>; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(4)>, -@@ -525,6 +525,9 @@ - little-endian; - #address-cells = <1>; - #size-cells = <0>; -+ clock-frequency = <2500000>; -+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL -+ QORIQ_CLK_PLL_DIV(2)>; - status = "disabled"; - }; - -@@ -534,6 +537,9 @@ - little-endian; - #address-cells = <1>; - #size-cells = <0>; -+ clock-frequency = <2500000>; -+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL -+ QORIQ_CLK_PLL_DIV(2)>; - status = "disabled"; - }; - -diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi -index afb455210bd07..d32a52ab00a42 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi -@@ -54,7 +54,7 @@ - &i2c0 { - status = "okay"; - -- i2c-switch@77 { -+ i2c-mux@77 { - compatible = "nxp,pca9547"; - #address-cells = <1>; - #size-cells = <0>; -diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi -index c4b1a59ba424b..1bc7f538f6905 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi -+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi -@@ -719,7 +719,7 @@ - clock-names = "i2c"; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>; -- scl-gpio = <&gpio2 15 GPIO_ACTIVE_HIGH>; -+ scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; - status = "disabled"; - }; - -@@ -768,7 +768,7 @@ - clock-names = "i2c"; - clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL - QORIQ_CLK_PLL_DIV(16)>; -- scl-gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; -+ scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>; - status = "disabled"; - }; - -@@ -1369,6 +1369,9 @@ - #address-cells = <1>; - #size-cells = <0>; - little-endian; -+ clock-frequency = <2500000>; -+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL -+ QORIQ_CLK_PLL_DIV(2)>; - status = "disabled"; - }; - -@@ -1379,6 +1382,9 @@ - little-endian; - #address-cells = <1>; - #size-cells = <0>; -+ clock-frequency = <2500000>; -+ clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL -+ QORIQ_CLK_PLL_DIV(2)>; - status = "disabled"; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi -index a79f42a9618ec..639220dbff008 100644 ---- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi -@@ -38,9 +38,9 @@ conn_subsys: bus@5b000000 { - interrupts = ; - reg = <0x5b010000 0x10000>; - clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>, -- <&sdhc0_lpcg IMX_LPCG_CLK_5>, -- <&sdhc0_lpcg IMX_LPCG_CLK_0>; -- clock-names = "ipg", "per", "ahb"; -+ <&sdhc0_lpcg IMX_LPCG_CLK_0>, -+ <&sdhc0_lpcg IMX_LPCG_CLK_5>; -+ clock-names = "ipg", "ahb", "per"; - power-domains = <&pd IMX_SC_R_SDHC_0>; - status = "disabled"; - }; -@@ -49,9 +49,9 @@ conn_subsys: bus@5b000000 { - interrupts = ; - reg = <0x5b020000 0x10000>; - clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>, -- <&sdhc1_lpcg IMX_LPCG_CLK_5>, -- <&sdhc1_lpcg IMX_LPCG_CLK_0>; -- clock-names = "ipg", "per", "ahb"; -+ <&sdhc1_lpcg IMX_LPCG_CLK_0>, -+ <&sdhc1_lpcg IMX_LPCG_CLK_5>; -+ clock-names = "ipg", "ahb", "per"; - power-domains = <&pd IMX_SC_R_SDHC_1>; - fsl,tuning-start-tap = <20>; - fsl,tuning-step= <2>; -@@ -62,9 +62,9 @@ conn_subsys: bus@5b000000 { - interrupts = ; - reg = <0x5b030000 0x10000>; - clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>, -- <&sdhc2_lpcg IMX_LPCG_CLK_5>, -- <&sdhc2_lpcg IMX_LPCG_CLK_0>; -- clock-names = "ipg", "per", "ahb"; -+ <&sdhc2_lpcg IMX_LPCG_CLK_0>, -+ <&sdhc2_lpcg IMX_LPCG_CLK_5>; -+ clock-names = "ipg", "ahb", "per"; - power-domains = <&pd IMX_SC_R_SDHC_2>; - status = "disabled"; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi -index 960a802b8b6ee..c33892711138f 100644 ---- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi -@@ -26,6 +26,8 @@ dma_subsys: bus@5a000000 { - clocks = <&uart0_lpcg IMX_LPCG_CLK_4>, - <&uart0_lpcg IMX_LPCG_CLK_0>; - clock-names = "ipg", "baud"; -+ assigned-clocks = <&clk IMX_SC_R_UART_0 IMX_SC_PM_CLK_PER>; -+ assigned-clock-rates = <80000000>; - power-domains = <&pd IMX_SC_R_UART_0>; - status = "disabled"; - }; -@@ -36,6 +38,8 @@ dma_subsys: bus@5a000000 { - clocks = <&uart1_lpcg IMX_LPCG_CLK_4>, - <&uart1_lpcg IMX_LPCG_CLK_0>; - clock-names = "ipg", "baud"; -+ assigned-clocks = <&clk IMX_SC_R_UART_1 IMX_SC_PM_CLK_PER>; -+ assigned-clock-rates = <80000000>; - power-domains = <&pd IMX_SC_R_UART_1>; - status = "disabled"; - }; -@@ -46,6 +50,8 @@ dma_subsys: bus@5a000000 { - clocks = <&uart2_lpcg IMX_LPCG_CLK_4>, - <&uart2_lpcg IMX_LPCG_CLK_0>; - clock-names = "ipg", "baud"; -+ assigned-clocks = <&clk IMX_SC_R_UART_2 IMX_SC_PM_CLK_PER>; -+ assigned-clock-rates = <80000000>; - power-domains = <&pd IMX_SC_R_UART_2>; - status = "disabled"; - }; -@@ -56,6 +62,8 @@ dma_subsys: bus@5a000000 { - clocks = <&uart3_lpcg IMX_LPCG_CLK_4>, - <&uart3_lpcg IMX_LPCG_CLK_0>; - clock-names = "ipg", "baud"; -+ assigned-clocks = <&clk IMX_SC_R_UART_3 IMX_SC_PM_CLK_PER>; -+ assigned-clock-rates = <80000000>; - power-domains = <&pd IMX_SC_R_UART_3>; - status = "disabled"; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi -index 6f5e63696ec0a..bb18354c10f08 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi -@@ -70,7 +70,7 @@ - &ecspi2 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_espi2>; -- cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; -+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; - status = "okay"; - - eeprom@0 { -@@ -166,6 +166,7 @@ - pinctrl-0 = <&pinctrl_uart3>; - assigned-clocks = <&clk IMX8MM_CLK_UART3>; - assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>; -+ uart-has-rtscts; - status = "okay"; - }; - -@@ -185,7 +186,7 @@ - MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82 - MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82 - MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82 -- MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41 -+ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41 - >; - }; - -@@ -236,6 +237,8 @@ - fsl,pins = < - MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x40 - MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x40 -+ MX8MM_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x40 -+ MX8MM_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x40 - >; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi -index e033d0257b5a1..ff5324e94ee82 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi -@@ -136,7 +136,7 @@ - rohm,reset-snvs-powered; - - #clock-cells = <0>; -- clocks = <&osc_32k 0>; -+ clocks = <&osc_32k>; - clock-output-names = "clk-32k-out"; - - regulators { -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts -index e99e7644ff392..49d7470812eef 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts -@@ -123,8 +123,8 @@ - - ethphy: ethernet-phy@0 { - reg = <0>; -- reset-assert-us = <100>; -- reset-deassert-us = <100>; -+ reset-assert-us = <1>; -+ reset-deassert-us = <15000>; - reset-gpios = <&gpio4 27 GPIO_ACTIVE_LOW>; - }; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts -index 74c09891600f2..0e8f0d7161ad0 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts -@@ -214,7 +214,7 @@ - pinctrl-0 = <&pinctrl_i2c3>; - status = "okay"; - -- i2cmux@70 { -+ i2c-mux@70 { - compatible = "nxp,pca9540"; - reg = <0x70>; - #address-cells = <1>; -@@ -247,7 +247,7 @@ - compatible = "wlf,wm8960"; - reg = <0x1a>; - clocks = <&clk IMX8MM_CLK_SAI1_ROOT>; -- clock-names = "mclk1"; -+ clock-names = "mclk"; - wlf,shared-lrclk; - #sound-dai-cells = <0>; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h -index a003e6af33533..56271abfb7e09 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h -+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h -@@ -601,7 +601,7 @@ - #define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0 - #define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0 - #define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0 --#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0 -+#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x1 - #define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0 - #define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0 - #define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0 -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi -index 1dc9d187601c5..a0bd540f27d3d 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi -@@ -89,12 +89,12 @@ - pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>; - - ti,x-min = /bits/ 16 <125>; -- touchscreen-size-x = /bits/ 16 <4008>; -+ touchscreen-size-x = <4008>; - ti,y-min = /bits/ 16 <282>; -- touchscreen-size-y = /bits/ 16 <3864>; -+ touchscreen-size-y = <3864>; - ti,x-plate-ohms = /bits/ 16 <180>; -- touchscreen-max-pressure = /bits/ 16 <255>; -- touchscreen-average-samples = /bits/ 16 <10>; -+ touchscreen-max-pressure = <255>; -+ touchscreen-average-samples = <10>; - ti,debounce-tol = /bits/ 16 <3>; - ti,debounce-rep = /bits/ 16 <1>; - ti,settle-delay-usec = /bits/ 16 <150>; -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi -index 8e4a0ce99790b..7ea909a4c1d5e 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi -@@ -103,12 +103,14 @@ - - &usbotg1 { - dr_mode = "otg"; -+ over-current-active-low; - vbus-supply = <®_usb_otg1_vbus>; - status = "okay"; - }; - - &usbotg2 { - dr_mode = "host"; -+ disable-over-current; - status = "okay"; - }; - -@@ -166,7 +168,7 @@ - fsl,pins = < - MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 - MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6 -- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 -+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6 - MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6 - >; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi -index b7c91bdc21dd9..806ee21651d1f 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi -@@ -139,12 +139,14 @@ - - &usbotg1 { - dr_mode = "otg"; -+ over-current-active-low; - vbus-supply = <®_usb_otg1_vbus>; - status = "okay"; - }; - - &usbotg2 { - dr_mode = "host"; -+ disable-over-current; - vbus-supply = <®_usb_otg2_vbus>; - status = "okay"; - }; -@@ -231,7 +233,7 @@ - fsl,pins = < - MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 - MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6 -- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 -+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6 - MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6 - >; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi -index d2ffd62a3bd46..942fed2eed643 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi -@@ -166,12 +166,14 @@ - - &usbotg1 { - dr_mode = "otg"; -+ over-current-active-low; - vbus-supply = <®_usb_otg1_vbus>; - status = "okay"; - }; - - &usbotg2 { - dr_mode = "host"; -+ disable-over-current; - vbus-supply = <®_usb_otg2_vbus>; - status = "okay"; - }; -@@ -280,7 +282,7 @@ - fsl,pins = < - MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 - MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6 -- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 -+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6 - MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6 - >; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts -index bafd5c8ea4e28..f7e41e5c2c7bc 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts -@@ -675,6 +675,7 @@ - &usbotg2 { - dr_mode = "host"; - vbus-supply = <®_usb2_vbus>; -+ over-current-active-low; - status = "okay"; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi -index 2f632e8ca3880..2a67122c5624c 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi -@@ -530,7 +530,7 @@ - #address-cells = <1>; - #size-cells = <1>; - -- imx8mm_uid: unique-id@410 { -+ imx8mm_uid: unique-id@4 { - reg = <0x4 0x8>; - }; - -@@ -1014,10 +1014,10 @@ - clocks = <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>; - }; - -- gpmi: nand-controller@33002000{ -+ gpmi: nand-controller@33002000 { - compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand"; - #address-cells = <1>; -- #size-cells = <1>; -+ #size-cells = <0>; - reg = <0x33002000 0x2000>, <0x33004000 0x4000>; - reg-names = "gpmi-nand", "bch"; - interrupts = ; -diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi -index 376ca8ff72133..4fc22448e411f 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi -@@ -81,7 +81,7 @@ - &ecspi2 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_espi2>; -- cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; -+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; - status = "okay"; - - eeprom@0 { -@@ -176,6 +176,7 @@ - pinctrl-0 = <&pinctrl_uart3>; - assigned-clocks = <&clk IMX8MN_CLK_UART3>; - assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_80M>; -+ uart-has-rtscts; - status = "okay"; - }; - -@@ -202,7 +203,7 @@ - MX8MN_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82 - MX8MN_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82 - MX8MN_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82 -- MX8MN_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41 -+ MX8MN_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41 - >; - }; - -@@ -259,6 +260,8 @@ - fsl,pins = < - MX8MN_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x40 - MX8MN_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x40 -+ MX8MN_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x40 -+ MX8MN_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x40 - >; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts -index 7dfee715a2c4d..d8ce217c60166 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts -@@ -59,6 +59,10 @@ - interrupts = <3 IRQ_TYPE_LEVEL_LOW>; - rohm,reset-snvs-powered; - -+ #clock-cells = <0>; -+ clocks = <&osc_32k 0>; -+ clock-output-names = "clk-32k-out"; -+ - regulators { - buck1_reg: BUCK1 { - regulator-name = "buck1"; -diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi -index b16c7caf34c11..faafefe562e4b 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi -@@ -70,12 +70,12 @@ - pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>; - - ti,x-min = /bits/ 16 <125>; -- touchscreen-size-x = /bits/ 16 <4008>; -+ touchscreen-size-x = <4008>; - ti,y-min = /bits/ 16 <282>; -- touchscreen-size-y = /bits/ 16 <3864>; -+ touchscreen-size-y = <3864>; - ti,x-plate-ohms = /bits/ 16 <180>; -- touchscreen-max-pressure = /bits/ 16 <255>; -- touchscreen-average-samples = /bits/ 16 <10>; -+ touchscreen-max-pressure = <255>; -+ touchscreen-average-samples = <10>; - ti,debounce-tol = /bits/ 16 <3>; - ti,debounce-rep = /bits/ 16 <1>; - ti,settle-delay-usec = /bits/ 16 <150>; -@@ -98,11 +98,17 @@ - #address-cells = <1>; - #size-cells = <0>; - -- ethphy: ethernet-phy@4 { -+ ethphy: ethernet-phy@4 { /* AR8033 or ADIN1300 */ - compatible = "ethernet-phy-ieee802.3-c22"; - reg = <4>; - reset-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>; - reset-assert-us = <10000>; -+ /* -+ * Deassert delay: -+ * ADIN1300 requires 5ms. -+ * AR8033 requires 1ms. -+ */ -+ reset-deassert-us = <20000>; - }; - }; - }; -@@ -345,7 +351,7 @@ - MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91 - MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91 - MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f -- MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19 -+ MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x159 - >; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi -index da6c942fb7f9d..16a5efba17f39 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi -@@ -263,8 +263,9 @@ - ranges; - - sai2: sai@30020000 { -- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; -+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; - reg = <0x30020000 0x10000>; -+ #sound-dai-cells = <0>; - interrupts = ; - clocks = <&clk IMX8MN_CLK_SAI2_IPG>, - <&clk IMX8MN_CLK_DUMMY>, -@@ -277,8 +278,9 @@ - }; - - sai3: sai@30030000 { -- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; -+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; - reg = <0x30030000 0x10000>; -+ #sound-dai-cells = <0>; - interrupts = ; - clocks = <&clk IMX8MN_CLK_SAI3_IPG>, - <&clk IMX8MN_CLK_DUMMY>, -@@ -291,8 +293,9 @@ - }; - - sai5: sai@30050000 { -- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; -+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; - reg = <0x30050000 0x10000>; -+ #sound-dai-cells = <0>; - interrupts = ; - clocks = <&clk IMX8MN_CLK_SAI5_IPG>, - <&clk IMX8MN_CLK_DUMMY>, -@@ -307,8 +310,9 @@ - }; - - sai6: sai@30060000 { -- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; -+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; - reg = <0x30060000 0x10000>; -+ #sound-dai-cells = <0>; - interrupts = ; - clocks = <&clk IMX8MN_CLK_SAI6_IPG>, - <&clk IMX8MN_CLK_DUMMY>, -@@ -364,8 +368,9 @@ - }; - - sai7: sai@300b0000 { -- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; -+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; - reg = <0x300b0000 0x10000>; -+ #sound-dai-cells = <0>; - interrupts = ; - clocks = <&clk IMX8MN_CLK_SAI7_IPG>, - <&clk IMX8MN_CLK_DUMMY>, -@@ -533,7 +538,7 @@ - #address-cells = <1>; - #size-cells = <1>; - -- imx8mn_uid: unique-id@410 { -+ imx8mn_uid: unique-id@4 { - reg = <0x4 0x8>; - }; - -@@ -998,7 +1003,7 @@ - gpmi: nand-controller@33002000 { - compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand"; - #address-cells = <1>; -- #size-cells = <1>; -+ #size-cells = <0>; - reg = <0x33002000 0x2000>, <0x33004000 0x4000>; - reg-names = "gpmi-nand", "bch"; - interrupts = ; -diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts -index 7b99fad6e4d6e..5c9fb39dd99e5 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts -@@ -285,21 +285,21 @@ - &iomuxc { - pinctrl_eqos: eqosgrp { - fsl,pins = < -- MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x3 -- MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x3 -- MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x91 -- MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x91 -- MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x91 -- MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x91 -- MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x91 -- MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x91 -- MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x1f -- MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x1f -- MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x1f -- MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x1f -- MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x1f -- MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x1f -- MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x19 -+ MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x2 -+ MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x2 -+ MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x90 -+ MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x90 -+ MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x90 -+ MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x90 -+ MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x90 -+ MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x90 -+ MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x16 -+ MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x16 -+ MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x16 -+ MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x16 -+ MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x16 -+ MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x16 -+ MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22 0x10 - >; - }; - -@@ -351,21 +351,21 @@ - - pinctrl_gpio_led: gpioledgrp { - fsl,pins = < -- MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x19 -+ MX8MP_IOMUXC_NAND_READY_B__GPIO3_IO16 0x140 - >; - }; - - pinctrl_i2c1: i2c1grp { - fsl,pins = < -- MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c3 -- MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c3 -+ MX8MP_IOMUXC_I2C1_SCL__I2C1_SCL 0x400001c2 -+ MX8MP_IOMUXC_I2C1_SDA__I2C1_SDA 0x400001c2 - >; - }; - - pinctrl_i2c3: i2c3grp { - fsl,pins = < -- MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c3 -- MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c3 -+ MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c2 -+ MX8MP_IOMUXC_I2C3_SDA__I2C3_SDA 0x400001c2 - >; - }; - -@@ -377,20 +377,20 @@ - - pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp { - fsl,pins = < -- MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x41 -+ MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x40 - >; - }; - - pinctrl_uart2: uart2grp { - fsl,pins = < -- MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x49 -- MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x49 -+ MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x140 -+ MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x140 - >; - }; - - pinctrl_usb1_vbus: usb1grp { - fsl,pins = < -- MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR 0x19 -+ MX8MP_IOMUXC_GPIO1_IO14__USB2_OTG_PWR 0x10 - >; - }; - -@@ -402,7 +402,7 @@ - MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0 - MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0 - MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0 -- MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1 -+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 - >; - }; - -@@ -414,7 +414,7 @@ - MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4 - MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4 - MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4 -- MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1 -+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 - >; - }; - -@@ -426,7 +426,7 @@ - MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6 - MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6 - MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6 -- MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1 -+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 - >; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts -index 984a6b9ded8d7..6aa720bafe289 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts -@@ -116,48 +116,48 @@ - &iomuxc { - pinctrl_eqos: eqosgrp { - fsl,pins = < -- MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x3 -- MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x3 -- MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x91 -- MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x91 -- MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x91 -- MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x91 -- MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x91 -- MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x91 -- MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x1f -- MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x1f -- MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x1f -- MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x1f -- MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x1f -- MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x1f -+ MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x2 -+ MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x2 -+ MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x90 -+ MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x90 -+ MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x90 -+ MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x90 -+ MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x90 -+ MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x90 -+ MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x16 -+ MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x16 -+ MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x16 -+ MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x16 -+ MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x16 -+ MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x16 - MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20 0x10 - >; - }; - - pinctrl_i2c2: i2c2grp { - fsl,pins = < -- MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c3 -- MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c3 -+ MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c2 -+ MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c2 - >; - }; - - pinctrl_i2c2_gpio: i2c2gpiogrp { - fsl,pins = < -- MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x1e3 -- MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x1e3 -+ MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x1e2 -+ MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x1e2 - >; - }; - - pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp { - fsl,pins = < -- MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x41 -+ MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x40 - >; - }; - - pinctrl_uart1: uart1grp { - fsl,pins = < -- MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x49 -- MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x49 -+ MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x40 -+ MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x40 - >; - }; - -@@ -175,7 +175,7 @@ - MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0 - MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0 - MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0 -- MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1 -+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 - >; - }; - -@@ -187,7 +187,7 @@ - MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4 - MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4 - MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4 -- MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1 -+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 - >; - }; - -@@ -199,7 +199,7 @@ - MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6 - MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6 - MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6 -- MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc1 -+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0 - >; - }; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi -index fc178eebf8aa4..8e189d8997941 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi -@@ -98,7 +98,6 @@ - - regulators { - buck1: BUCK1 { -- regulator-compatible = "BUCK1"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <2187500>; - regulator-boot-on; -@@ -107,7 +106,6 @@ - }; - - buck2: BUCK2 { -- regulator-compatible = "BUCK2"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <2187500>; - regulator-boot-on; -@@ -116,7 +114,6 @@ - }; - - buck4: BUCK4 { -- regulator-compatible = "BUCK4"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <3400000>; - regulator-boot-on; -@@ -124,7 +121,6 @@ - }; - - buck5: BUCK5 { -- regulator-compatible = "BUCK5"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <3400000>; - regulator-boot-on; -@@ -132,7 +128,6 @@ - }; - - buck6: BUCK6 { -- regulator-compatible = "BUCK6"; - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <3400000>; - regulator-boot-on; -@@ -140,7 +135,6 @@ - }; - - ldo1: LDO1 { -- regulator-compatible = "LDO1"; - regulator-min-microvolt = <1600000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; -@@ -148,7 +142,6 @@ - }; - - ldo2: LDO2 { -- regulator-compatible = "LDO2"; - regulator-min-microvolt = <800000>; - regulator-max-microvolt = <1150000>; - regulator-boot-on; -@@ -156,7 +149,6 @@ - }; - - ldo3: LDO3 { -- regulator-compatible = "LDO3"; - regulator-min-microvolt = <800000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; -@@ -164,7 +156,6 @@ - }; - - ldo4: LDO4 { -- regulator-compatible = "LDO4"; - regulator-min-microvolt = <800000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; -@@ -172,7 +163,6 @@ - }; - - ldo5: LDO5 { -- regulator-compatible = "LDO5"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3300000>; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi -index 9b07b26230a11..ab670b5d641b1 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi -@@ -358,7 +358,7 @@ - #address-cells = <1>; - #size-cells = <1>; - -- imx8mp_uid: unique-id@420 { -+ imx8mp_uid: unique-id@8 { - reg = <0x8 0x8>; - }; - -@@ -912,7 +912,7 @@ - interrupts = ; - phys = <&usb3_phy0>, <&usb3_phy0>; - phy-names = "usb2-phy", "usb3-phy"; -- snps,dis-u2-freeclk-exists-quirk; -+ snps,gfladj-refclk-lpm-sel-quirk; - }; - - }; -@@ -953,7 +953,7 @@ - interrupts = ; - phys = <&usb3_phy1>, <&usb3_phy1>; - phy-names = "usb2-phy", "usb3-phy"; -- snps,dis-u2-freeclk-exists-quirk; -+ snps,gfladj-refclk-lpm-sel-quirk; - }; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi -index 460ef0d86540a..c86cd20d4e709 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi -@@ -967,6 +967,7 @@ - interrupts = <20 IRQ_TYPE_LEVEL_LOW>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_gauge>; -+ power-supplies = <&bq25895>; - maxim,over-heat-temp = <700>; - maxim,over-volt = <4500>; - maxim,rsns-microohm = <5000>; -diff --git a/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts b/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts -index f70fb32b96b0c..cf14ab5f7404c 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts -@@ -133,7 +133,7 @@ - pinctrl-0 = <&pinctrl_i2c1>; - status = "okay"; - -- i2cmux@70 { -+ i2c-mux@70 { - compatible = "nxp,pca9546"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_i2c1_pca9546>; -@@ -216,7 +216,7 @@ - pinctrl-0 = <&pinctrl_i2c4>; - status = "okay"; - -- pca9546: i2cmux@70 { -+ pca9546: i2c-mux@70 { - compatible = "nxp,pca9546"; - reg = <0x70>; - #address-cells = <1>; -diff --git a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts -index 5d5aa6537225f..6e6182709d220 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts -@@ -339,7 +339,7 @@ - bus-width = <4>; - non-removable; - no-sd; -- no-emmc; -+ no-mmc; - status = "okay"; - - brcmf: wifi@1 { -@@ -359,7 +359,7 @@ - cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; - bus-width = <4>; - no-sdio; -- no-emmc; -+ no-mmc; - disable-wp; - status = "okay"; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi -index 4066b16126552..2a698c5b87bcd 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi -@@ -524,11 +524,9 @@ - <&clk IMX8MQ_VIDEO_PLL1>, - <&clk IMX8MQ_VIDEO_PLL1_OUT>; - assigned-clock-rates = <0>, <0>, <0>, <594000000>; -- interconnects = <&noc IMX8MQ_ICM_LCDIF &noc IMX8MQ_ICS_DRAM>; -- interconnect-names = "dram"; - status = "disabled"; - -- port@0 { -+ port { - lcdif_mipi_dsi: endpoint { - remote-endpoint = <&mipi_dsi_lcdif_in>; - }; -@@ -559,7 +557,7 @@ - #address-cells = <1>; - #size-cells = <1>; - -- imx8mq_uid: soc-uid@410 { -+ imx8mq_uid: soc-uid@4 { - reg = <0x4 0x8>; - }; - -@@ -1125,8 +1123,8 @@ - #address-cells = <1>; - #size-cells = <0>; - -- port@0 { -- reg = <0>; -+ port@1 { -+ reg = <1>; - - csi1_mipi_ep: endpoint { - remote-endpoint = <&csi1_ep>; -@@ -1177,8 +1175,8 @@ - #address-cells = <1>; - #size-cells = <0>; - -- port@0 { -- reg = <0>; -+ port@1 { -+ reg = <1>; - - csi2_mipi_ep: endpoint { - remote-endpoint = <&csi2_ep>; -diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts -index ce9d3f0b98fc0..607cd6b4e9721 100644 ---- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts -+++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts -@@ -82,8 +82,8 @@ - pinctrl-0 = <&pinctrl_usdhc2>; - bus-width = <4>; - vmmc-supply = <®_usdhc2_vmmc>; -- cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>; -- wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>; -+ cd-gpios = <&lsio_gpio5 22 GPIO_ACTIVE_LOW>; -+ wp-gpios = <&lsio_gpio5 21 GPIO_ACTIVE_HIGH>; - status = "okay"; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8qm.dtsi b/arch/arm64/boot/dts/freescale/imx8qm.dtsi -index aebbe2b84aa13..a143f38bc78bd 100644 ---- a/arch/arm64/boot/dts/freescale/imx8qm.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8qm.dtsi -@@ -155,7 +155,7 @@ - }; - - clk: clock-controller { -- compatible = "fsl,imx8qxp-clk", "fsl,scu-clk"; -+ compatible = "fsl,imx8qm-clk", "fsl,scu-clk"; - #clock-cells = <2>; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts -index 863232a47004c..4497763d57ccf 100644 ---- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts -+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts -@@ -61,7 +61,7 @@ - pinctrl-0 = <&pinctrl_lpi2c1 &pinctrl_ioexp_rst>; - status = "okay"; - -- i2c-switch@71 { -+ i2c-mux@71 { - compatible = "nxp,pca9646", "nxp,pca9546"; - #address-cells = <1>; - #size-cells = <0>; -diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi -index 2d5c1a348716a..6eabec2602e23 100644 ---- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi -+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi -@@ -1087,7 +1087,7 @@ - }; - - watchdog0: watchdog@e8a06000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xe8a06000 0x0 0x1000>; - interrupts = ; - clocks = <&crg_ctrl HI3660_OSC32K>, -@@ -1096,7 +1096,7 @@ - }; - - watchdog1: watchdog@e8a07000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xe8a07000 0x0 0x1000>; - interrupts = ; - clocks = <&crg_ctrl HI3660_OSC32K>, -diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi -index dde9371dc5451..e4860b8a638ec 100644 ---- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi -+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi -@@ -840,7 +840,7 @@ - }; - - watchdog0: watchdog@f8005000 { -- compatible = "arm,sp805-wdt", "arm,primecell"; -+ compatible = "arm,sp805", "arm,primecell"; - reg = <0x0 0xf8005000 0x0 0x1000>; - interrupts = ; - clocks = <&ao_ctrl HI6220_WDT0_PCLK>, -diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi -index 163f33b46e4f7..f4270cf189962 100644 ---- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi -+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi -@@ -502,7 +502,7 @@ - }; - - usb0: usb@ffb00000 { -- compatible = "snps,dwc2"; -+ compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2"; - reg = <0xffb00000 0x40000>; - interrupts = ; - phys = <&usbphy0>; -@@ -515,7 +515,7 @@ - }; - - usb1: usb@ffb40000 { -- compatible = "snps,dwc2"; -+ compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2"; - reg = <0xffb40000 0x40000>; - interrupts = ; - phys = <&usbphy0>; -@@ -628,7 +628,7 @@ - }; - - qspi: spi@ff8d2000 { -- compatible = "cdns,qspi-nor"; -+ compatible = "intel,socfpga-qspi", "cdns,qspi-nor"; - #address-cells = <1>; - #size-cells = <0>; - reg = <0xff8d2000 0x100>, -diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts -index c5eb3604dd5b7..119db6b541b7b 100644 ---- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts -+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts -@@ -71,10 +71,6 @@ - - &spi0 { - flash@0 { -- spi-max-frequency = <108000000>; -- spi-rx-bus-width = <4>; -- spi-tx-bus-width = <4>; -- - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; -@@ -112,7 +108,6 @@ - - &usb3 { - usb-phy = <&usb3_phy>; -- status = "disabled"; - }; - - &mdio { -diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts -index 04da07ae44208..b276dd77df83c 100644 ---- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts -+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts -@@ -18,6 +18,7 @@ - - aliases { - spi0 = &spi0; -+ ethernet0 = ð0; - ethernet1 = ð1; - mmc0 = &sdhci0; - mmc1 = &sdhci1; -@@ -124,9 +125,12 @@ - /delete-property/ mrvl,i2c-fast-mode; - status = "okay"; - -+ /* MCP7940MT-I/MNY RTC */ - rtc@6f { - compatible = "microchip,mcp7940x"; - reg = <0x6f>; -+ interrupt-parent = <&gpiosb>; -+ interrupts = <5 0>; /* GPIO2_5 */ - }; - }; - -@@ -138,7 +142,9 @@ - /* - * U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property - * contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and -- * 2 size cells and also expects that the second range starts at 16 MB offset. If these -+ * 2 size cells and also expects that the second range starts at 16 MB offset. Also it -+ * expects that first range uses same address for PCI (child) and CPU (parent) cells (so -+ * no remapping) and that this address is the lowest from all specified ranges. If these - * conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address - * space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window - * for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB. -@@ -147,6 +153,9 @@ - * https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7 - * https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf - * https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33 -+ * Bug related to requirement of same child and parent addresses for first range is fixed -+ * in U-Boot version 2022.04 by following commit: -+ * https://source.denx.de/u-boot/u-boot/-/commit/1fd54253bca7d43d046bba4853fe5fafd034bc17 - */ - #address-cells = <3>; - #size-cells = <2>; -diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi -index 9acc5d2b5a002..0adc194e46d15 100644 ---- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi -+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi -@@ -497,7 +497,7 @@ - * (totaling 127 MiB) for MEM. - */ - ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */ -- 0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */ -+ 0x81000000 0 0x00000000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */ - interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0 0 0 1 &pcie_intc 0>, - <0 0 0 2 &pcie_intc 1>, -diff --git a/arch/arm64/boot/dts/marvell/cn9130.dtsi b/arch/arm64/boot/dts/marvell/cn9130.dtsi -index a2b7e5ec979d3..327b04134134f 100644 ---- a/arch/arm64/boot/dts/marvell/cn9130.dtsi -+++ b/arch/arm64/boot/dts/marvell/cn9130.dtsi -@@ -11,6 +11,13 @@ - model = "Marvell Armada CN9130 SoC"; - compatible = "marvell,cn9130", "marvell,armada-ap807-quad", - "marvell,armada-ap807"; -+ -+ aliases { -+ gpio1 = &cp0_gpio1; -+ gpio2 = &cp0_gpio2; -+ spi1 = &cp0_spi0; -+ spi2 = &cp0_spi1; -+ }; - }; - - /* -@@ -35,3 +42,11 @@ - #undef CP11X_PCIE0_BASE - #undef CP11X_PCIE1_BASE - #undef CP11X_PCIE2_BASE -+ -+&cp0_gpio1 { -+ status = "okay"; -+}; -+ -+&cp0_gpio2 { -+ status = "okay"; -+}; -diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts -index 7d369fdd3117f..9d20cabf4f699 100644 ---- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts -+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts -@@ -26,14 +26,14 @@ - stdout-path = "serial0:921600n8"; - }; - -- cpus_fixed_vproc0: fixedregulator@0 { -+ cpus_fixed_vproc0: regulator-vproc-buck0 { - compatible = "regulator-fixed"; - regulator-name = "vproc_buck0"; - regulator-min-microvolt = <1000000>; - regulator-max-microvolt = <1000000>; - }; - -- cpus_fixed_vproc1: fixedregulator@1 { -+ cpus_fixed_vproc1: regulator-vproc-buck1 { - compatible = "regulator-fixed"; - regulator-name = "vproc_buck1"; - regulator-min-microvolt = <1000000>; -@@ -50,7 +50,7 @@ - id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>; - }; - -- usb_p0_vbus: regulator@2 { -+ usb_p0_vbus: regulator-usb-p0-vbus { - compatible = "regulator-fixed"; - regulator-name = "p0_vbus"; - regulator-min-microvolt = <5000000>; -@@ -59,7 +59,7 @@ - enable-active-high; - }; - -- usb_p1_vbus: regulator@3 { -+ usb_p1_vbus: regulator-usb-p1-vbus { - compatible = "regulator-fixed"; - regulator-name = "p1_vbus"; - regulator-min-microvolt = <5000000>; -@@ -68,7 +68,7 @@ - enable-active-high; - }; - -- usb_p2_vbus: regulator@4 { -+ usb_p2_vbus: regulator-usb-p2-vbus { - compatible = "regulator-fixed"; - regulator-name = "p2_vbus"; - regulator-min-microvolt = <5000000>; -@@ -77,7 +77,7 @@ - enable-active-high; - }; - -- usb_p3_vbus: regulator@5 { -+ usb_p3_vbus: regulator-usb-p3-vbus { - compatible = "regulator-fixed"; - regulator-name = "p3_vbus"; - regulator-min-microvolt = <5000000>; -diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi -index a9cca9c146fdc..993a03d7fff14 100644 ---- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi -@@ -160,70 +160,70 @@ - #clock-cells = <0>; - }; - -- clk26m: oscillator@0 { -+ clk26m: oscillator-26m { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <26000000>; - clock-output-names = "clk26m"; - }; - -- clk32k: oscillator@1 { -+ clk32k: oscillator-32k { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <32768>; - clock-output-names = "clk32k"; - }; - -- clkfpc: oscillator@2 { -+ clkfpc: oscillator-50m { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <50000000>; - clock-output-names = "clkfpc"; - }; - -- clkaud_ext_i_0: oscillator@3 { -+ clkaud_ext_i_0: oscillator-aud0 { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <6500000>; - clock-output-names = "clkaud_ext_i_0"; - }; - -- clkaud_ext_i_1: oscillator@4 { -+ clkaud_ext_i_1: oscillator-aud1 { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <196608000>; - clock-output-names = "clkaud_ext_i_1"; - }; - -- clkaud_ext_i_2: oscillator@5 { -+ clkaud_ext_i_2: oscillator-aud2 { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <180633600>; - clock-output-names = "clkaud_ext_i_2"; - }; - -- clki2si0_mck_i: oscillator@6 { -+ clki2si0_mck_i: oscillator-i2s0 { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <30000000>; - clock-output-names = "clki2si0_mck_i"; - }; - -- clki2si1_mck_i: oscillator@7 { -+ clki2si1_mck_i: oscillator-i2s1 { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <30000000>; - clock-output-names = "clki2si1_mck_i"; - }; - -- clki2si2_mck_i: oscillator@8 { -+ clki2si2_mck_i: oscillator-i2s2 { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <30000000>; - clock-output-names = "clki2si2_mck_i"; - }; - -- clktdmin_mclk_i: oscillator@9 { -+ clktdmin_mclk_i: oscillator-mclk { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <30000000>; -@@ -266,7 +266,7 @@ - reg = <0 0x10005000 0 0x1000>; - }; - -- pio: pinctrl@10005000 { -+ pio: pinctrl@1000b000 { - compatible = "mediatek,mt2712-pinctrl"; - reg = <0 0x1000b000 0 0x1000>; - mediatek,pctl-regmap = <&syscfg_pctl_a>; -diff --git a/arch/arm64/boot/dts/mediatek/mt6779.dtsi b/arch/arm64/boot/dts/mediatek/mt6779.dtsi -index 9bdf5145966c5..dde9ce137b4f1 100644 ---- a/arch/arm64/boot/dts/mediatek/mt6779.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt6779.dtsi -@@ -88,14 +88,14 @@ - interrupts = ; - }; - -- clk26m: oscillator@0 { -+ clk26m: oscillator-26m { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <26000000>; - clock-output-names = "clk26m"; - }; - -- clk32k: oscillator@1 { -+ clk32k: oscillator-32k { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <32768>; -@@ -117,7 +117,7 @@ - compatible = "simple-bus"; - ranges; - -- gic: interrupt-controller@0c000000 { -+ gic: interrupt-controller@c000000 { - compatible = "arm,gic-v3"; - #interrupt-cells = <4>; - interrupt-parent = <&gic>; -@@ -138,7 +138,7 @@ - - }; - -- sysirq: intpol-controller@0c53a650 { -+ sysirq: intpol-controller@c53a650 { - compatible = "mediatek,mt6779-sysirq", - "mediatek,mt6577-sysirq"; - interrupt-controller; -diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi -index 15616231022a2..c3677d77e0a45 100644 ---- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi -@@ -95,7 +95,7 @@ - }; - }; - -- clk26m: oscillator@0 { -+ clk26m: oscillator-26m { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <26000000>; -diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts -index 2f77dc40b9b82..6b99d903b4791 100644 ---- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts -+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts -@@ -49,7 +49,7 @@ - wps { - label = "wps"; - linux,code = ; -- gpios = <&pio 102 GPIO_ACTIVE_HIGH>; -+ gpios = <&pio 102 GPIO_ACTIVE_LOW>; - }; - }; - -diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -index 890a942ec6082..a4c48b2abd209 100644 ---- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -@@ -428,6 +428,7 @@ - pwm: pwm@11006000 { - compatible = "mediatek,mt7622-pwm"; - reg = <0 0x11006000 0 0x1000>; -+ #pwm-cells = <2>; - interrupts = ; - clocks = <&topckgen CLK_TOP_PWM_SEL>, - <&pericfg CLK_PERI_PWM_PD>, -diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi -index 8e9cf36a9a41a..6529962edd4e9 100644 ---- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi -@@ -281,6 +281,10 @@ - }; - }; - -+&gic { -+ mediatek,broken-save-restore-fw; -+}; -+ - &gpu { - mali-supply = <&mt6358_vgpu_reg>; - sram-supply = <&mt6358_vsram_gpu_reg>; -diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi -index 409cf827970cf..81fde34ffd52a 100644 ---- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi -@@ -299,6 +299,15 @@ - method = "smc"; - }; - -+ clk13m: fixed-factor-clock-13m { -+ compatible = "fixed-factor-clock"; -+ #clock-cells = <0>; -+ clocks = <&clk26m>; -+ clock-div = <2>; -+ clock-mult = <1>; -+ clock-output-names = "clk13m"; -+ }; -+ - clk26m: oscillator { - compatible = "fixed-clock"; - #clock-cells = <0>; -@@ -610,8 +619,7 @@ - "mediatek,mt6765-timer"; - reg = <0 0x10017000 0 0x1000>; - interrupts = ; -- clocks = <&topckgen CLK_TOP_CLK13M>; -- clock-names = "clk13m"; -+ clocks = <&clk13m>; - }; - - iommu: iommu@10205000 { -@@ -1212,7 +1220,7 @@ - ; - interrupt-names = "job", "mmu", "gpu"; - -- clocks = <&topckgen CLK_TOP_MFGPLL_CK>; -+ clocks = <&mfgcfg CLK_MFG_BG3D>; - - power-domains = - <&spm MT8183_POWER_DOMAIN_MFG_CORE0>, -diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi -index 9757138a8bbd8..72f444405ebfe 100644 ---- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi -@@ -39,9 +39,10 @@ - reg = <0x000>; - enable-method = "psci"; - clock-frequency = <1701000000>; -- cpu-idle-states = <&cpuoff_l &clusteroff_l>; -+ cpu-idle-states = <&cpu_sleep_l &cluster_sleep_l>; - next-level-cache = <&l2_0>; -- capacity-dmips-mhz = <530>; -+ performance-domains = <&performance 0>; -+ capacity-dmips-mhz = <427>; - }; - - cpu1: cpu@100 { -@@ -50,9 +51,10 @@ - reg = <0x100>; - enable-method = "psci"; - clock-frequency = <1701000000>; -- cpu-idle-states = <&cpuoff_l &clusteroff_l>; -+ cpu-idle-states = <&cpu_sleep_l &cluster_sleep_l>; - next-level-cache = <&l2_0>; -- capacity-dmips-mhz = <530>; -+ performance-domains = <&performance 0>; -+ capacity-dmips-mhz = <427>; - }; - - cpu2: cpu@200 { -@@ -61,9 +63,10 @@ - reg = <0x200>; - enable-method = "psci"; - clock-frequency = <1701000000>; -- cpu-idle-states = <&cpuoff_l &clusteroff_l>; -+ cpu-idle-states = <&cpu_sleep_l &cluster_sleep_l>; - next-level-cache = <&l2_0>; -- capacity-dmips-mhz = <530>; -+ performance-domains = <&performance 0>; -+ capacity-dmips-mhz = <427>; - }; - - cpu3: cpu@300 { -@@ -72,9 +75,10 @@ - reg = <0x300>; - enable-method = "psci"; - clock-frequency = <1701000000>; -- cpu-idle-states = <&cpuoff_l &clusteroff_l>; -+ cpu-idle-states = <&cpu_sleep_l &cluster_sleep_l>; - next-level-cache = <&l2_0>; -- capacity-dmips-mhz = <530>; -+ performance-domains = <&performance 0>; -+ capacity-dmips-mhz = <427>; - }; - - cpu4: cpu@400 { -@@ -83,8 +87,9 @@ - reg = <0x400>; - enable-method = "psci"; - clock-frequency = <2171000000>; -- cpu-idle-states = <&cpuoff_b &clusteroff_b>; -+ cpu-idle-states = <&cpu_sleep_b &cluster_sleep_b>; - next-level-cache = <&l2_1>; -+ performance-domains = <&performance 1>; - capacity-dmips-mhz = <1024>; - }; - -@@ -94,8 +99,9 @@ - reg = <0x500>; - enable-method = "psci"; - clock-frequency = <2171000000>; -- cpu-idle-states = <&cpuoff_b &clusteroff_b>; -+ cpu-idle-states = <&cpu_sleep_b &cluster_sleep_b>; - next-level-cache = <&l2_1>; -+ performance-domains = <&performance 1>; - capacity-dmips-mhz = <1024>; - }; - -@@ -105,8 +111,9 @@ - reg = <0x600>; - enable-method = "psci"; - clock-frequency = <2171000000>; -- cpu-idle-states = <&cpuoff_b &clusteroff_b>; -+ cpu-idle-states = <&cpu_sleep_b &cluster_sleep_b>; - next-level-cache = <&l2_1>; -+ performance-domains = <&performance 1>; - capacity-dmips-mhz = <1024>; - }; - -@@ -116,8 +123,9 @@ - reg = <0x700>; - enable-method = "psci"; - clock-frequency = <2171000000>; -- cpu-idle-states = <&cpuoff_b &clusteroff_b>; -+ cpu-idle-states = <&cpu_sleep_b &cluster_sleep_b>; - next-level-cache = <&l2_1>; -+ performance-domains = <&performance 1>; - capacity-dmips-mhz = <1024>; - }; - -@@ -135,19 +143,16 @@ - core3 { - cpu = <&cpu3>; - }; -- }; -- -- cluster1 { -- core0 { -+ core4 { - cpu = <&cpu4>; - }; -- core1 { -+ core5 { - cpu = <&cpu5>; - }; -- core2 { -+ core6 { - cpu = <&cpu6>; - }; -- core3 { -+ core7 { - cpu = <&cpu7>; - }; - }; -@@ -168,8 +173,8 @@ - }; - - idle-states { -- entry-method = "arm,psci"; -- cpuoff_l: cpuoff_l { -+ entry-method = "psci"; -+ cpu_sleep_l: cpu-sleep-l { - compatible = "arm,idle-state"; - arm,psci-suspend-param = <0x00010001>; - local-timer-stop; -@@ -177,7 +182,7 @@ - exit-latency-us = <140>; - min-residency-us = <780>; - }; -- cpuoff_b: cpuoff_b { -+ cpu_sleep_b: cpu-sleep-b { - compatible = "arm,idle-state"; - arm,psci-suspend-param = <0x00010001>; - local-timer-stop; -@@ -185,7 +190,7 @@ - exit-latency-us = <145>; - min-residency-us = <720>; - }; -- clusteroff_l: clusteroff_l { -+ cluster_sleep_l: cluster-sleep-l { - compatible = "arm,idle-state"; - arm,psci-suspend-param = <0x01010002>; - local-timer-stop; -@@ -193,7 +198,7 @@ - exit-latency-us = <155>; - min-residency-us = <860>; - }; -- clusteroff_b: clusteroff_b { -+ cluster_sleep_b: cluster-sleep-b { - compatible = "arm,idle-state"; - arm,psci-suspend-param = <0x01010002>; - local-timer-stop; -@@ -237,6 +242,12 @@ - compatible = "simple-bus"; - ranges; - -+ performance: performance-controller@11bc10 { -+ compatible = "mediatek,cpufreq-hw"; -+ reg = <0 0x0011bc10 0 0x120>, <0 0x0011bd30 0 0x120>; -+ #performance-domain-cells = <1>; -+ }; -+ - gic: interrupt-controller@c000000 { - compatible = "arm,gic-v3"; - #interrupt-cells = <4>; -@@ -433,7 +444,7 @@ - clock-names = "spi", "sf", "axi"; - #address-cells = <1>; - #size-cells = <0>; -- status = "disable"; -+ status = "disabled"; - }; - - i2c3: i2c3@11cb0000 { -diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi -index fcddec14738d8..54514d62398f2 100644 ---- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi -+++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi -@@ -17,7 +17,7 @@ - }; - - firmware { -- optee: optee@4fd00000 { -+ optee: optee { - compatible = "linaro,optee-tz"; - method = "smc"; - }; -@@ -210,7 +210,7 @@ - }; - }; - -- i2c0_pins_a: i2c0@0 { -+ i2c0_pins_a: i2c0 { - pins1 { - pinmux = , - ; -@@ -218,7 +218,7 @@ - }; - }; - -- i2c2_pins_a: i2c2@0 { -+ i2c2_pins_a: i2c2 { - pins1 { - pinmux = , - ; -diff --git a/arch/arm64/boot/dts/microchip/sparx5.dtsi b/arch/arm64/boot/dts/microchip/sparx5.dtsi -index 787ebcec121d6..a6405059636c3 100644 ---- a/arch/arm64/boot/dts/microchip/sparx5.dtsi -+++ b/arch/arm64/boot/dts/microchip/sparx5.dtsi -@@ -61,7 +61,7 @@ - interrupt-affinity = <&cpu0>, <&cpu1>; - }; - -- psci { -+ psci: psci { - compatible = "arm,psci-0.2"; - method = "smc"; - }; -diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi -index 9d1a082de3e29..32bb76b3202a0 100644 ---- a/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi -+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb_common.dtsi -@@ -6,6 +6,18 @@ - /dts-v1/; - #include "sparx5.dtsi" - -+&psci { -+ status = "disabled"; -+}; -+ -+&cpu0 { -+ enable-method = "spin-table"; -+}; -+ -+&cpu1 { -+ enable-method = "spin-table"; -+}; -+ - &uart0 { - status = "okay"; - }; -diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi -index e94f8add1a400..5b0bc9aa1a426 100644 ---- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi -+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi -@@ -1079,7 +1079,7 @@ - - ccplex@e000000 { - compatible = "nvidia,tegra186-ccplex-cluster"; -- reg = <0x0 0x0e000000 0x0 0x3fffff>; -+ reg = <0x0 0x0e000000 0x0 0x400000>; - - nvidia,bpmp = <&bpmp>; - }; -@@ -1583,6 +1583,7 @@ - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x0 0x0 0x30000000 0x50000>; -+ no-memory-wc; - - cpu_bpmp_tx: sram@4e000 { - reg = <0x4e000 0x1000>; -@@ -1635,7 +1636,7 @@ - iommus = <&smmu TEGRA186_SID_BPMP>; - mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB - TEGRA_HSP_DB_MASTER_BPMP>; -- shmem = <&cpu_bpmp_tx &cpu_bpmp_rx>; -+ shmem = <&cpu_bpmp_tx>, <&cpu_bpmp_rx>; - #clock-cells = <1>; - #reset-cells = <1>; - #power-domain-cells = <1>; -diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi -index c4058ee36fecb..1a444705517f3 100644 ---- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi -+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi -@@ -75,7 +75,7 @@ - - /* SDMMC1 (SD/MMC) */ - mmc@3400000 { -- cd-gpios = <&gpio TEGRA194_MAIN_GPIO(A, 0) GPIO_ACTIVE_LOW>; -+ cd-gpios = <&gpio TEGRA194_MAIN_GPIO(G, 7) GPIO_ACTIVE_LOW>; - }; - - /* SDMMC4 (eMMC) */ -diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi -index c8250a3f7891f..ca71b71d801a6 100644 ---- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi -+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi -@@ -818,9 +818,8 @@ - <&bpmp TEGRA194_CLK_HDA2CODEC_2X>; - clock-names = "hda", "hda2hdmi", "hda2codec_2x"; - resets = <&bpmp TEGRA194_RESET_HDA>, -- <&bpmp TEGRA194_RESET_HDA2HDMICODEC>, -- <&bpmp TEGRA194_RESET_HDA2CODEC_2X>; -- reset-names = "hda", "hda2hdmi", "hda2codec_2x"; -+ <&bpmp TEGRA194_RESET_HDA2HDMICODEC>; -+ reset-names = "hda", "hda2hdmi"; - power-domains = <&bpmp TEGRA194_POWER_DOMAIN_DISP>; - interconnects = <&mc TEGRA194_MEMORY_CLIENT_HDAR &emc>, - <&mc TEGRA194_MEMORY_CLIENT_HDAW &emc>; -@@ -2250,6 +2249,7 @@ - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x0 0x0 0x40000000 0x50000>; -+ no-memory-wc; - - cpu_bpmp_tx: sram@4e000 { - reg = <0x4e000 0x1000>; -@@ -2268,7 +2268,7 @@ - compatible = "nvidia,tegra186-bpmp"; - mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB - TEGRA_HSP_DB_MASTER_BPMP>; -- shmem = <&cpu_bpmp_tx &cpu_bpmp_rx>; -+ shmem = <&cpu_bpmp_tx>, <&cpu_bpmp_rx>; - #clock-cells = <1>; - #reset-cells = <1>; - #power-domain-cells = <1>; -diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi -index 26b3f98a211c2..f88dc820389b2 100644 ---- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi -+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi -@@ -1355,8 +1355,9 @@ - <&tegra_car TEGRA210_CLK_DFLL_REF>, - <&tegra_car TEGRA210_CLK_I2C5>; - clock-names = "soc", "ref", "i2c"; -- resets = <&tegra_car TEGRA210_RST_DFLL_DVCO>; -- reset-names = "dvco"; -+ resets = <&tegra_car TEGRA210_RST_DFLL_DVCO>, -+ <&tegra_car 155>; -+ reset-names = "dvco", "dfll"; - #clock-cells = <0>; - clock-output-names = "dfllCPU_out"; - status = "disabled"; -diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi -index f0efb3a628040..2b47845722206 100644 ---- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi -+++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi -@@ -122,21 +122,22 @@ - }; - }; - -- sysram@40000000 { -+ sram@40000000 { - compatible = "nvidia,tegra234-sysram", "mmio-sram"; -- reg = <0x0 0x40000000 0x0 0x50000>; -+ reg = <0x0 0x40000000 0x0 0x80000>; - #address-cells = <1>; - #size-cells = <1>; -- ranges = <0x0 0x0 0x40000000 0x50000>; -+ ranges = <0x0 0x0 0x40000000 0x80000>; -+ no-memory-wc; - -- cpu_bpmp_tx: shmem@4e000 { -- reg = <0x4e000 0x1000>; -+ cpu_bpmp_tx: sram@70000 { -+ reg = <0x70000 0x1000>; - label = "cpu-bpmp-tx"; - pool; - }; - -- cpu_bpmp_rx: shmem@4f000 { -- reg = <0x4f000 0x1000>; -+ cpu_bpmp_rx: sram@71000 { -+ reg = <0x71000 0x1000>; - label = "cpu-bpmp-rx"; - pool; - }; -@@ -146,7 +147,7 @@ - compatible = "nvidia,tegra234-bpmp", "nvidia,tegra186-bpmp"; - mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB - TEGRA_HSP_DB_MASTER_BPMP>; -- shmem = <&cpu_bpmp_tx &cpu_bpmp_rx>; -+ shmem = <&cpu_bpmp_tx>, <&cpu_bpmp_rx>; - #clock-cells = <1>; - #reset-cells = <1>; - #power-domain-cells = <1>; -diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts -index f3c0dbfd0a232..ad4c2ccec63ee 100644 ---- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts -+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts -@@ -5,9 +5,847 @@ - - /dts-v1/; - --#include "apq8016-sbc.dtsi" -+#include "msm8916-pm8916.dtsi" -+#include -+#include -+#include -+#include -+#include - - / { - model = "Qualcomm Technologies, Inc. APQ 8016 SBC"; - compatible = "qcom,apq8016-sbc", "qcom,apq8016"; -+ -+ aliases { -+ serial0 = &blsp1_uart2; -+ serial1 = &blsp1_uart1; -+ usid0 = &pm8916_0; -+ i2c0 = &blsp_i2c2; -+ i2c1 = &blsp_i2c6; -+ i2c3 = &blsp_i2c4; -+ spi0 = &blsp_spi5; -+ spi1 = &blsp_spi3; -+ }; -+ -+ chosen { -+ stdout-path = "serial0"; -+ }; -+ -+ camera_vdddo_1v8: camera-vdddo-1v8 { -+ compatible = "regulator-fixed"; -+ regulator-name = "camera_vdddo"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ regulator-always-on; -+ }; -+ -+ camera_vdda_2v8: camera-vdda-2v8 { -+ compatible = "regulator-fixed"; -+ regulator-name = "camera_vdda"; -+ regulator-min-microvolt = <2800000>; -+ regulator-max-microvolt = <2800000>; -+ regulator-always-on; -+ }; -+ -+ camera_vddd_1v5: camera-vddd-1v5 { -+ compatible = "regulator-fixed"; -+ regulator-name = "camera_vddd"; -+ regulator-min-microvolt = <1500000>; -+ regulator-max-microvolt = <1500000>; -+ regulator-always-on; -+ }; -+ -+ reserved-memory { -+ ramoops@bff00000 { -+ compatible = "ramoops"; -+ reg = <0x0 0xbff00000 0x0 0x100000>; -+ -+ record-size = <0x20000>; -+ console-size = <0x20000>; -+ ftrace-size = <0x20000>; -+ }; -+ }; -+ -+ usb2513 { -+ compatible = "smsc,usb3503"; -+ reset-gpios = <&pm8916_gpios 3 GPIO_ACTIVE_LOW>; -+ initial-mode = <1>; -+ }; -+ -+ usb_id: usb-id { -+ compatible = "linux,extcon-usb-gpio"; -+ id-gpio = <&msmgpio 121 GPIO_ACTIVE_HIGH>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&usb_id_default>; -+ }; -+ -+ hdmi-out { -+ compatible = "hdmi-connector"; -+ type = "a"; -+ -+ port { -+ hdmi_con: endpoint { -+ remote-endpoint = <&adv7533_out>; -+ }; -+ }; -+ }; -+ -+ gpio-keys { -+ compatible = "gpio-keys"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ autorepeat; -+ -+ pinctrl-names = "default"; -+ pinctrl-0 = <&msm_key_volp_n_default>; -+ -+ button@0 { -+ label = "Volume Up"; -+ linux,code = ; -+ gpios = <&msmgpio 107 GPIO_ACTIVE_LOW>; -+ }; -+ }; -+ -+ leds { -+ pinctrl-names = "default"; -+ pinctrl-0 = <&msmgpio_leds>, -+ <&pm8916_gpios_leds>, -+ <&pm8916_mpps_leds>; -+ -+ compatible = "gpio-leds"; -+ -+ led@1 { -+ label = "apq8016-sbc:green:user1"; -+ gpios = <&msmgpio 21 GPIO_ACTIVE_HIGH>; -+ linux,default-trigger = "heartbeat"; -+ default-state = "off"; -+ }; -+ -+ led@2 { -+ label = "apq8016-sbc:green:user2"; -+ gpios = <&msmgpio 120 GPIO_ACTIVE_HIGH>; -+ linux,default-trigger = "mmc0"; -+ default-state = "off"; -+ }; -+ -+ led@3 { -+ label = "apq8016-sbc:green:user3"; -+ gpios = <&pm8916_gpios 1 GPIO_ACTIVE_HIGH>; -+ linux,default-trigger = "mmc1"; -+ default-state = "off"; -+ }; -+ -+ led@4 { -+ label = "apq8016-sbc:green:user4"; -+ gpios = <&pm8916_gpios 2 GPIO_ACTIVE_HIGH>; -+ linux,default-trigger = "none"; -+ panic-indicator; -+ default-state = "off"; -+ }; -+ -+ led@5 { -+ label = "apq8016-sbc:yellow:wlan"; -+ gpios = <&pm8916_mpps 2 GPIO_ACTIVE_HIGH>; -+ linux,default-trigger = "phy0tx"; -+ default-state = "off"; -+ }; -+ -+ led@6 { -+ label = "apq8016-sbc:blue:bt"; -+ gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>; -+ linux,default-trigger = "bluetooth-power"; -+ default-state = "off"; -+ }; -+ }; -+}; -+ -+&blsp_dma { -+ status = "okay"; -+}; -+ -+&blsp_i2c2 { -+ /* On Low speed expansion */ -+ status = "okay"; -+ label = "LS-I2C0"; -+}; -+ -+&blsp_i2c4 { -+ /* On High speed expansion */ -+ status = "okay"; -+ label = "HS-I2C2"; -+ -+ adv_bridge: bridge@39 { -+ status = "okay"; -+ -+ compatible = "adi,adv7533"; -+ reg = <0x39>; -+ -+ interrupt-parent = <&msmgpio>; -+ interrupts = <31 IRQ_TYPE_EDGE_FALLING>; -+ -+ adi,dsi-lanes = <4>; -+ clocks = <&rpmcc RPM_SMD_BB_CLK2>; -+ clock-names = "cec"; -+ -+ pd-gpios = <&msmgpio 32 GPIO_ACTIVE_HIGH>; -+ -+ avdd-supply = <&pm8916_l6>; -+ v1p2-supply = <&pm8916_l6>; -+ v3p3-supply = <&pm8916_l17>; -+ -+ pinctrl-names = "default","sleep"; -+ pinctrl-0 = <&adv7533_int_active &adv7533_switch_active>; -+ pinctrl-1 = <&adv7533_int_suspend &adv7533_switch_suspend>; -+ #sound-dai-cells = <1>; -+ -+ ports { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ port@0 { -+ reg = <0>; -+ adv7533_in: endpoint { -+ remote-endpoint = <&dsi0_out>; -+ }; -+ }; -+ -+ port@1 { -+ reg = <1>; -+ adv7533_out: endpoint { -+ remote-endpoint = <&hdmi_con>; -+ }; -+ }; -+ }; -+ }; -+}; -+ -+&blsp_i2c6 { -+ /* On Low speed expansion */ -+ status = "okay"; -+ label = "LS-I2C1"; -+}; -+ -+&blsp_spi3 { -+ /* On High speed expansion */ -+ status = "okay"; -+ label = "HS-SPI1"; -+}; -+ -+&blsp_spi5 { -+ /* On Low speed expansion */ -+ status = "okay"; -+ label = "LS-SPI0"; -+}; -+ -+&blsp1_uart1 { -+ status = "okay"; -+ label = "LS-UART0"; -+}; -+ -+&blsp1_uart2 { -+ status = "okay"; -+ label = "LS-UART1"; -+}; -+ -+&camss { -+ status = "okay"; -+ ports { -+ port@0 { -+ reg = <0>; -+ csiphy0_ep: endpoint { -+ clock-lanes = <1>; -+ data-lanes = <0 2>; -+ remote-endpoint = <&ov5640_ep>; -+ status = "okay"; -+ }; -+ }; -+ }; -+}; -+ -+&cci { -+ status = "okay"; -+}; -+ -+&cci_i2c0 { -+ camera_rear@3b { -+ compatible = "ovti,ov5640"; -+ reg = <0x3b>; -+ -+ enable-gpios = <&msmgpio 34 GPIO_ACTIVE_HIGH>; -+ reset-gpios = <&msmgpio 35 GPIO_ACTIVE_LOW>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&camera_rear_default>; -+ -+ clocks = <&gcc GCC_CAMSS_MCLK0_CLK>; -+ clock-names = "xclk"; -+ clock-frequency = <23880000>; -+ -+ DOVDD-supply = <&camera_vdddo_1v8>; -+ AVDD-supply = <&camera_vdda_2v8>; -+ DVDD-supply = <&camera_vddd_1v5>; -+ -+ /* No camera mezzanine by default */ -+ status = "disabled"; -+ -+ port { -+ ov5640_ep: endpoint { -+ clock-lanes = <1>; -+ data-lanes = <0 2>; -+ remote-endpoint = <&csiphy0_ep>; -+ }; -+ }; -+ }; -+}; -+ -+&dsi0_out { -+ data-lanes = <0 1 2 3>; -+ remote-endpoint = <&adv7533_in>; -+}; -+ -+&lpass { -+ status = "okay"; -+}; -+ -+&mdss { -+ status = "okay"; -+}; -+ -+&mpss { -+ status = "okay"; -+ -+ firmware-name = "qcom/apq8016/mba.mbn", "qcom/apq8016/modem.mbn"; -+}; -+ -+&pm8916_resin { -+ status = "okay"; -+ linux,code = ; -+}; -+ -+&pronto { -+ status = "okay"; -+ -+ firmware-name = "qcom/apq8016/wcnss.mbn"; -+}; -+ -+&sdhc_1 { -+ status = "okay"; -+ -+ pinctrl-names = "default", "sleep"; -+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>; -+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>; -+}; -+ -+&sdhc_2 { -+ status = "okay"; -+ -+ pinctrl-names = "default", "sleep"; -+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; -+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; -+ -+ cd-gpios = <&msmgpio 38 GPIO_ACTIVE_LOW>; -+}; -+ -+&sound { -+ status = "okay"; -+ -+ pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>; -+ pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>; -+ pinctrl-names = "default", "sleep"; -+ qcom,model = "DB410c"; -+ qcom,audio-routing = -+ "AMIC2", "MIC BIAS Internal2", -+ "AMIC3", "MIC BIAS External1"; -+ -+ external-dai-link@0 { -+ link-name = "ADV7533"; -+ cpu { -+ sound-dai = <&lpass MI2S_QUATERNARY>; -+ }; -+ codec { -+ sound-dai = <&adv_bridge 0>; -+ }; -+ }; -+ -+ internal-codec-playback-dai-link@0 { -+ link-name = "WCD"; -+ cpu { -+ sound-dai = <&lpass MI2S_PRIMARY>; -+ }; -+ codec { -+ sound-dai = <&lpass_codec 0>, <&wcd_codec 0>; -+ }; -+ }; -+ -+ internal-codec-capture-dai-link@0 { -+ link-name = "WCD-Capture"; -+ cpu { -+ sound-dai = <&lpass MI2S_TERTIARY>; -+ }; -+ codec { -+ sound-dai = <&lpass_codec 1>, <&wcd_codec 1>; -+ }; -+ }; -+}; -+ -+&usb { -+ status = "okay"; -+ extcon = <&usb_id>, <&usb_id>; -+ -+ pinctrl-names = "default", "device"; -+ pinctrl-0 = <&usb_sw_sel_pm &usb_hub_reset_pm>; -+ pinctrl-1 = <&usb_sw_sel_pm_device &usb_hub_reset_pm_device>; -+}; -+ -+&usb_hs_phy { -+ extcon = <&usb_id>; -+}; -+ -+&wcd_codec { -+ clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>; -+ clock-names = "mclk"; -+ qcom,mbhc-vthreshold-low = <75 150 237 450 500>; -+ qcom,mbhc-vthreshold-high = <75 150 237 450 500>; -+}; -+ -+&wcnss_ctrl { -+ firmware-name = "qcom/apq8016/WCNSS_qcom_wlan_nv_sbc.bin"; -+}; -+ -+/* Enable CoreSight */ -+&cti0 { status = "okay"; }; -+&cti1 { status = "okay"; }; -+&cti12 { status = "okay"; }; -+&cti13 { status = "okay"; }; -+&cti14 { status = "okay"; }; -+&cti15 { status = "okay"; }; -+&debug0 { status = "okay"; }; -+&debug1 { status = "okay"; }; -+&debug2 { status = "okay"; }; -+&debug3 { status = "okay"; }; -+&etf { status = "okay"; }; -+&etm0 { status = "okay"; }; -+&etm1 { status = "okay"; }; -+&etm2 { status = "okay"; }; -+&etm3 { status = "okay"; }; -+&etr { status = "okay"; }; -+&funnel0 { status = "okay"; }; -+&funnel1 { status = "okay"; }; -+&replicator { status = "okay"; }; -+&stm { status = "okay"; }; -+&tpiu { status = "okay"; }; -+ -+&smd_rpm_regulators { -+ vdd_l1_l2_l3-supply = <&pm8916_s3>; -+ vdd_l4_l5_l6-supply = <&pm8916_s4>; -+ vdd_l7-supply = <&pm8916_s4>; -+ -+ s3 { -+ regulator-min-microvolt = <1250000>; -+ regulator-max-microvolt = <1350000>; -+ }; -+ -+ s4 { -+ regulator-min-microvolt = <1850000>; -+ regulator-max-microvolt = <2150000>; -+ -+ regulator-always-on; -+ regulator-boot-on; -+ }; -+ -+ l1 { -+ regulator-min-microvolt = <1225000>; -+ regulator-max-microvolt = <1225000>; -+ }; -+ -+ l2 { -+ regulator-min-microvolt = <1200000>; -+ regulator-max-microvolt = <1200000>; -+ }; -+ -+ l4 { -+ regulator-min-microvolt = <2050000>; -+ regulator-max-microvolt = <2050000>; -+ }; -+ -+ l5 { -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ -+ l6 { -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ -+ l7 { -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ -+ l8 { -+ regulator-min-microvolt = <2900000>; -+ regulator-max-microvolt = <2900000>; -+ }; -+ -+ l9 { -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; -+ }; -+ -+ l10 { -+ regulator-min-microvolt = <2800000>; -+ regulator-max-microvolt = <2800000>; -+ }; -+ -+ l11 { -+ regulator-min-microvolt = <2950000>; -+ regulator-max-microvolt = <2950000>; -+ regulator-allow-set-load; -+ regulator-system-load = <200000>; -+ }; -+ -+ l12 { -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <2950000>; -+ }; -+ -+ l13 { -+ regulator-min-microvolt = <3075000>; -+ regulator-max-microvolt = <3075000>; -+ }; -+ -+ l14 { -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <3300000>; -+ }; -+ -+ /* -+ * The 96Boards specification expects a 1.8V power rail on the low-speed -+ * expansion connector that is able to provide at least 0.18W / 100 mA. -+ * L15/L16 are connected in parallel to provide 55 mA each. A minimum load -+ * must be specified to ensure the regulators are not put in LPM where they -+ * would only provide 5 mA. -+ */ -+ l15 { -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ regulator-system-load = <50000>; -+ regulator-allow-set-load; -+ regulator-always-on; -+ }; -+ -+ l16 { -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ regulator-system-load = <50000>; -+ regulator-allow-set-load; -+ regulator-always-on; -+ }; -+ -+ l17 { -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; -+ }; -+ -+ l18 { -+ regulator-min-microvolt = <2700000>; -+ regulator-max-microvolt = <2700000>; -+ }; -+}; -+ -+/* -+ * 2mA drive strength is not enough when connecting multiple -+ * I2C devices with different pull up resistors. -+ */ -+&i2c2_default { -+ drive-strength = <16>; -+}; -+ -+&i2c4_default { -+ drive-strength = <16>; -+}; -+ -+&i2c6_default { -+ drive-strength = <16>; -+}; -+ -+/* -+ * GPIO name legend: proper name = the GPIO line is used as GPIO -+ * NC = not connected (pin out but not routed from the chip to -+ * anything the board) -+ * "[PER]" = pin is muxed for [peripheral] (not GPIO) -+ * LSEC = Low Speed External Connector -+ * HSEC = High Speed External Connector -+ * -+ * Line names are taken from the schematic "DragonBoard410c" -+ * dated monday, august 31, 2015. Page 5 in particular. -+ * -+ * For the lines routed to the external connectors the -+ * lines are named after the 96Boards CE Specification 1.0, -+ * Appendix "Expansion Connector Signal Description". -+ * -+ * When the 96Board naming of a line and the schematic name of -+ * the same line are in conflict, the 96Board specification -+ * takes precedence, which means that the external UART on the -+ * LSEC is named UART0 while the schematic and SoC names this -+ * UART3. This is only for the informational lines i.e. "[FOO]", -+ * the GPIO named lines "GPIO-A" thru "GPIO-L" are the only -+ * ones actually used for GPIO. -+ */ -+ -+&msmgpio { -+ gpio-line-names = -+ "[UART0_TX]", /* GPIO_0, LSEC pin 5 */ -+ "[UART0_RX]", /* GPIO_1, LSEC pin 7 */ -+ "[UART0_CTS_N]", /* GPIO_2, LSEC pin 3 */ -+ "[UART0_RTS_N]", /* GPIO_3, LSEC pin 9 */ -+ "[UART1_TX]", /* GPIO_4, LSEC pin 11 */ -+ "[UART1_RX]", /* GPIO_5, LSEC pin 13 */ -+ "[I2C0_SDA]", /* GPIO_8, LSEC pin 17 */ -+ "[I2C0_SCL]", /* GPIO_7, LSEC pin 15 */ -+ "[SPI1_DOUT]", /* SPI1_MOSI, HSEC pin 1 */ -+ "[SPI1_DIN]", /* SPI1_MISO, HSEC pin 11 */ -+ "[SPI1_CS]", /* SPI1_CS_N, HSEC pin 7 */ -+ "[SPI1_SCLK]", /* SPI1_CLK, HSEC pin 9 */ -+ "GPIO-B", /* LS_EXP_GPIO_B, LSEC pin 24 */ -+ "GPIO-C", /* LS_EXP_GPIO_C, LSEC pin 25 */ -+ "[I2C3_SDA]", /* HSEC pin 38 */ -+ "[I2C3_SCL]", /* HSEC pin 36 */ -+ "[SPI0_MOSI]", /* LSEC pin 14 */ -+ "[SPI0_MISO]", /* LSEC pin 10 */ -+ "[SPI0_CS_N]", /* LSEC pin 12 */ -+ "[SPI0_CLK]", /* LSEC pin 8 */ -+ "HDMI_HPD_N", /* GPIO 20 */ -+ "USR_LED_1_CTRL", -+ "[I2C1_SDA]", /* GPIO_22, LSEC pin 21 */ -+ "[I2C1_SCL]", /* GPIO_23, LSEC pin 19 */ -+ "GPIO-G", /* LS_EXP_GPIO_G, LSEC pin 29 */ -+ "GPIO-H", /* LS_EXP_GPIO_H, LSEC pin 30 */ -+ "[CSI0_MCLK]", /* HSEC pin 15 */ -+ "[CSI1_MCLK]", /* HSEC pin 17 */ -+ "GPIO-K", /* LS_EXP_GPIO_K, LSEC pin 33 */ -+ "[I2C2_SDA]", /* HSEC pin 34 */ -+ "[I2C2_SCL]", /* HSEC pin 32 */ -+ "DSI2HDMI_INT_N", -+ "DSI_SW_SEL_APQ", -+ "GPIO-L", /* LS_EXP_GPIO_L, LSEC pin 34 */ -+ "GPIO-J", /* LS_EXP_GPIO_J, LSEC pin 32 */ -+ "GPIO-I", /* LS_EXP_GPIO_I, LSEC pin 31 */ -+ "GPIO-A", /* LS_EXP_GPIO_A, LSEC pin 23 */ -+ "FORCED_USB_BOOT", -+ "SD_CARD_DET_N", -+ "[WCSS_BT_SSBI]", -+ "[WCSS_WLAN_DATA_2]", /* GPIO 40 */ -+ "[WCSS_WLAN_DATA_1]", -+ "[WCSS_WLAN_DATA_0]", -+ "[WCSS_WLAN_SET]", -+ "[WCSS_WLAN_CLK]", -+ "[WCSS_FM_SSBI]", -+ "[WCSS_FM_SDI]", -+ "[WCSS_BT_DAT_CTL]", -+ "[WCSS_BT_DAT_STB]", -+ "NC", -+ "NC", /* GPIO 50 */ -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", /* GPIO 60 */ -+ "NC", -+ "NC", -+ "[CDC_PDM0_CLK]", -+ "[CDC_PDM0_SYNC]", -+ "[CDC_PDM0_TX0]", -+ "[CDC_PDM0_RX0]", -+ "[CDC_PDM0_RX1]", -+ "[CDC_PDM0_RX2]", -+ "GPIO-D", /* LS_EXP_GPIO_D, LSEC pin 26 */ -+ "NC", /* GPIO 70 */ -+ "NC", -+ "NC", -+ "NC", -+ "NC", /* GPIO 74 */ -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "BOOT_CONFIG_0", /* GPIO 80 */ -+ "BOOT_CONFIG_1", -+ "BOOT_CONFIG_2", -+ "BOOT_CONFIG_3", -+ "NC", -+ "NC", -+ "BOOT_CONFIG_5", -+ "NC", -+ "NC", -+ "NC", -+ "NC", /* GPIO 90 */ -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", /* GPIO 100 */ -+ "NC", -+ "NC", -+ "NC", -+ "SSBI_GPS", -+ "NC", -+ "NC", -+ "KEY_VOLP_N", -+ "NC", -+ "NC", -+ "[LS_EXP_MI2S_WS]", /* GPIO 110 */ -+ "NC", -+ "NC", -+ "[LS_EXP_MI2S_SCK]", -+ "[LS_EXP_MI2S_DATA0]", -+ "GPIO-E", /* LS_EXP_GPIO_E, LSEC pin 27 */ -+ "NC", -+ "[DSI2HDMI_MI2S_WS]", -+ "[DSI2HDMI_MI2S_SCK]", -+ "[DSI2HDMI_MI2S_DATA0]", -+ "USR_LED_2_CTRL", /* GPIO 120 */ -+ "SB_HS_ID"; -+ -+ msmgpio_leds: msmgpio-leds { -+ pins = "gpio21", "gpio120"; -+ function = "gpio"; -+ -+ output-low; -+ }; -+ -+ usb_id_default: usb-id-default { -+ pins = "gpio121"; -+ function = "gpio"; -+ -+ drive-strength = <8>; -+ input-enable; -+ bias-pull-up; -+ }; -+ -+ adv7533_int_active: adv533-int-active { -+ pins = "gpio31"; -+ function = "gpio"; -+ -+ drive-strength = <16>; -+ bias-disable; -+ }; -+ -+ adv7533_int_suspend: adv7533-int-suspend { -+ pins = "gpio31"; -+ function = "gpio"; -+ -+ drive-strength = <2>; -+ bias-disable; -+ }; -+ -+ adv7533_switch_active: adv7533-switch-active { -+ pins = "gpio32"; -+ function = "gpio"; -+ -+ drive-strength = <16>; -+ bias-disable; -+ }; -+ -+ adv7533_switch_suspend: adv7533-switch-suspend { -+ pins = "gpio32"; -+ function = "gpio"; -+ -+ drive-strength = <2>; -+ bias-disable; -+ }; -+ -+ msm_key_volp_n_default: msm-key-volp-n-default { -+ pins = "gpio107"; -+ function = "gpio"; -+ -+ drive-strength = <8>; -+ input-enable; -+ bias-pull-up; -+ }; -+}; -+ -+&pm8916_gpios { -+ gpio-line-names = -+ "USR_LED_3_CTRL", -+ "USR_LED_4_CTRL", -+ "USB_HUB_RESET_N_PM", -+ "USB_SW_SEL_PM"; -+ -+ usb_hub_reset_pm: usb-hub-reset-pm { -+ pins = "gpio3"; -+ function = PMIC_GPIO_FUNC_NORMAL; -+ -+ input-disable; -+ output-high; -+ }; -+ -+ usb_hub_reset_pm_device: usb-hub-reset-pm-device { -+ pins = "gpio3"; -+ function = PMIC_GPIO_FUNC_NORMAL; -+ -+ output-low; -+ }; -+ -+ usb_sw_sel_pm: usb-sw-sel-pm { -+ pins = "gpio4"; -+ function = PMIC_GPIO_FUNC_NORMAL; -+ -+ power-source = ; -+ input-disable; -+ output-high; -+ }; -+ -+ usb_sw_sel_pm_device: usb-sw-sel-pm-device { -+ pins = "gpio4"; -+ function = PMIC_GPIO_FUNC_NORMAL; -+ -+ power-source = ; -+ input-disable; -+ output-low; -+ }; -+ -+ pm8916_gpios_leds: pm8916-gpios-leds { -+ pins = "gpio1", "gpio2"; -+ function = PMIC_GPIO_FUNC_NORMAL; -+ -+ output-low; -+ }; -+}; -+ -+&pm8916_mpps { -+ gpio-line-names = -+ "VDD_PX_BIAS", -+ "WLAN_LED_CTRL", -+ "BT_LED_CTRL", -+ "GPIO-F"; /* LS_EXP_GPIO_F, LSEC pin 28 */ -+ -+ pinctrl-names = "default"; -+ pinctrl-0 = <&ls_exp_gpio_f>; -+ -+ ls_exp_gpio_f: pm8916-mpp4-state { -+ pins = "mpp4"; -+ function = "digital"; -+ -+ output-low; -+ power-source = ; // 1.8V -+ }; -+ -+ pm8916_mpps_leds: pm8916-mpps-state { -+ pins = "mpp2", "mpp3"; -+ function = "digital"; -+ -+ output-low; -+ }; - }; -diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi -deleted file mode 100644 -index f8d8f3e3664ec..0000000000000 ---- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi -+++ /dev/null -@@ -1,826 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-only --/* -- * Copyright (c) 2015, The Linux Foundation. All rights reserved. -- */ -- --#include "msm8916-pm8916.dtsi" --#include --#include --#include --#include --#include -- --/ { -- aliases { -- serial0 = &blsp1_uart2; -- serial1 = &blsp1_uart1; -- usid0 = &pm8916_0; -- i2c0 = &blsp_i2c2; -- i2c1 = &blsp_i2c6; -- i2c3 = &blsp_i2c4; -- spi0 = &blsp_spi5; -- spi1 = &blsp_spi3; -- }; -- -- chosen { -- stdout-path = "serial0"; -- }; -- -- camera_vdddo_1v8: camera-vdddo-1v8 { -- compatible = "regulator-fixed"; -- regulator-name = "camera_vdddo"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- regulator-always-on; -- }; -- -- camera_vdda_2v8: camera-vdda-2v8 { -- compatible = "regulator-fixed"; -- regulator-name = "camera_vdda"; -- regulator-min-microvolt = <2800000>; -- regulator-max-microvolt = <2800000>; -- regulator-always-on; -- }; -- -- camera_vddd_1v5: camera-vddd-1v5 { -- compatible = "regulator-fixed"; -- regulator-name = "camera_vddd"; -- regulator-min-microvolt = <1500000>; -- regulator-max-microvolt = <1500000>; -- regulator-always-on; -- }; -- -- reserved-memory { -- ramoops@bff00000 { -- compatible = "ramoops"; -- reg = <0x0 0xbff00000 0x0 0x100000>; -- -- record-size = <0x20000>; -- console-size = <0x20000>; -- ftrace-size = <0x20000>; -- }; -- }; -- -- usb2513 { -- compatible = "smsc,usb3503"; -- reset-gpios = <&pm8916_gpios 3 GPIO_ACTIVE_LOW>; -- initial-mode = <1>; -- }; -- -- usb_id: usb-id { -- compatible = "linux,extcon-usb-gpio"; -- id-gpio = <&msmgpio 121 GPIO_ACTIVE_HIGH>; -- pinctrl-names = "default"; -- pinctrl-0 = <&usb_id_default>; -- }; -- -- hdmi-out { -- compatible = "hdmi-connector"; -- type = "a"; -- -- port { -- hdmi_con: endpoint { -- remote-endpoint = <&adv7533_out>; -- }; -- }; -- }; -- -- gpio-keys { -- compatible = "gpio-keys"; -- #address-cells = <1>; -- #size-cells = <0>; -- autorepeat; -- -- pinctrl-names = "default"; -- pinctrl-0 = <&msm_key_volp_n_default>; -- -- button@0 { -- label = "Volume Up"; -- linux,code = ; -- gpios = <&msmgpio 107 GPIO_ACTIVE_LOW>; -- }; -- }; -- -- leds { -- pinctrl-names = "default"; -- pinctrl-0 = <&msmgpio_leds>, -- <&pm8916_gpios_leds>, -- <&pm8916_mpps_leds>; -- -- compatible = "gpio-leds"; -- -- led@1 { -- label = "apq8016-sbc:green:user1"; -- gpios = <&msmgpio 21 GPIO_ACTIVE_HIGH>; -- linux,default-trigger = "heartbeat"; -- default-state = "off"; -- }; -- -- led@2 { -- label = "apq8016-sbc:green:user2"; -- gpios = <&msmgpio 120 GPIO_ACTIVE_HIGH>; -- linux,default-trigger = "mmc0"; -- default-state = "off"; -- }; -- -- led@3 { -- label = "apq8016-sbc:green:user3"; -- gpios = <&pm8916_gpios 1 GPIO_ACTIVE_HIGH>; -- linux,default-trigger = "mmc1"; -- default-state = "off"; -- }; -- -- led@4 { -- label = "apq8016-sbc:green:user4"; -- gpios = <&pm8916_gpios 2 GPIO_ACTIVE_HIGH>; -- linux,default-trigger = "none"; -- panic-indicator; -- default-state = "off"; -- }; -- -- led@5 { -- label = "apq8016-sbc:yellow:wlan"; -- gpios = <&pm8916_mpps 2 GPIO_ACTIVE_HIGH>; -- linux,default-trigger = "phy0tx"; -- default-state = "off"; -- }; -- -- led@6 { -- label = "apq8016-sbc:blue:bt"; -- gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>; -- linux,default-trigger = "bluetooth-power"; -- default-state = "off"; -- }; -- }; --}; -- --&blsp_dma { -- status = "okay"; --}; -- --&blsp_i2c2 { -- /* On Low speed expansion */ -- status = "okay"; -- label = "LS-I2C0"; --}; -- --&blsp_i2c4 { -- /* On High speed expansion */ -- status = "okay"; -- label = "HS-I2C2"; -- -- adv_bridge: bridge@39 { -- status = "okay"; -- -- compatible = "adi,adv7533"; -- reg = <0x39>; -- -- interrupt-parent = <&msmgpio>; -- interrupts = <31 IRQ_TYPE_EDGE_FALLING>; -- -- adi,dsi-lanes = <4>; -- clocks = <&rpmcc RPM_SMD_BB_CLK2>; -- clock-names = "cec"; -- -- pd-gpios = <&msmgpio 32 GPIO_ACTIVE_HIGH>; -- -- avdd-supply = <&pm8916_l6>; -- v1p2-supply = <&pm8916_l6>; -- v3p3-supply = <&pm8916_l17>; -- -- pinctrl-names = "default","sleep"; -- pinctrl-0 = <&adv7533_int_active &adv7533_switch_active>; -- pinctrl-1 = <&adv7533_int_suspend &adv7533_switch_suspend>; -- #sound-dai-cells = <1>; -- -- ports { -- #address-cells = <1>; -- #size-cells = <0>; -- -- port@0 { -- reg = <0>; -- adv7533_in: endpoint { -- remote-endpoint = <&dsi0_out>; -- }; -- }; -- -- port@1 { -- reg = <1>; -- adv7533_out: endpoint { -- remote-endpoint = <&hdmi_con>; -- }; -- }; -- }; -- }; --}; -- --&blsp_i2c6 { -- /* On Low speed expansion */ -- status = "okay"; -- label = "LS-I2C1"; --}; -- --&blsp_spi3 { -- /* On High speed expansion */ -- status = "okay"; -- label = "HS-SPI1"; --}; -- --&blsp_spi5 { -- /* On Low speed expansion */ -- status = "okay"; -- label = "LS-SPI0"; --}; -- --&blsp1_uart1 { -- status = "okay"; -- label = "LS-UART0"; --}; -- --&blsp1_uart2 { -- status = "okay"; -- label = "LS-UART1"; --}; -- --&camss { -- status = "okay"; -- ports { -- port@0 { -- reg = <0>; -- csiphy0_ep: endpoint { -- clock-lanes = <1>; -- data-lanes = <0 2>; -- remote-endpoint = <&ov5640_ep>; -- status = "okay"; -- }; -- }; -- }; --}; -- --&cci { -- status = "okay"; --}; -- --&cci_i2c0 { -- camera_rear@3b { -- compatible = "ovti,ov5640"; -- reg = <0x3b>; -- -- enable-gpios = <&msmgpio 34 GPIO_ACTIVE_HIGH>; -- reset-gpios = <&msmgpio 35 GPIO_ACTIVE_LOW>; -- pinctrl-names = "default"; -- pinctrl-0 = <&camera_rear_default>; -- -- clocks = <&gcc GCC_CAMSS_MCLK0_CLK>; -- clock-names = "xclk"; -- clock-frequency = <23880000>; -- -- vdddo-supply = <&camera_vdddo_1v8>; -- vdda-supply = <&camera_vdda_2v8>; -- vddd-supply = <&camera_vddd_1v5>; -- -- /* No camera mezzanine by default */ -- status = "disabled"; -- -- port { -- ov5640_ep: endpoint { -- clock-lanes = <1>; -- data-lanes = <0 2>; -- remote-endpoint = <&csiphy0_ep>; -- }; -- }; -- }; --}; -- --&dsi0_out { -- data-lanes = <0 1 2 3>; -- remote-endpoint = <&adv7533_in>; --}; -- --&lpass { -- status = "okay"; --}; -- --&mdss { -- status = "okay"; --}; -- --&pm8916_resin { -- status = "okay"; -- linux,code = ; --}; -- --&pronto { -- status = "okay"; --}; -- --&sdhc_1 { -- status = "okay"; -- -- pinctrl-names = "default", "sleep"; -- pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>; -- pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>; --}; -- --&sdhc_2 { -- status = "okay"; -- -- pinctrl-names = "default", "sleep"; -- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; -- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; -- -- cd-gpios = <&msmgpio 38 GPIO_ACTIVE_LOW>; --}; -- --&sound { -- status = "okay"; -- -- pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>; -- pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>; -- pinctrl-names = "default", "sleep"; -- qcom,model = "DB410c"; -- qcom,audio-routing = -- "AMIC2", "MIC BIAS Internal2", -- "AMIC3", "MIC BIAS External1"; -- -- external-dai-link@0 { -- link-name = "ADV7533"; -- cpu { -- sound-dai = <&lpass MI2S_QUATERNARY>; -- }; -- codec { -- sound-dai = <&adv_bridge 0>; -- }; -- }; -- -- internal-codec-playback-dai-link@0 { -- link-name = "WCD"; -- cpu { -- sound-dai = <&lpass MI2S_PRIMARY>; -- }; -- codec { -- sound-dai = <&lpass_codec 0>, <&wcd_codec 0>; -- }; -- }; -- -- internal-codec-capture-dai-link@0 { -- link-name = "WCD-Capture"; -- cpu { -- sound-dai = <&lpass MI2S_TERTIARY>; -- }; -- codec { -- sound-dai = <&lpass_codec 1>, <&wcd_codec 1>; -- }; -- }; --}; -- --&usb { -- status = "okay"; -- extcon = <&usb_id>, <&usb_id>; -- -- pinctrl-names = "default", "device"; -- pinctrl-0 = <&usb_sw_sel_pm &usb_hub_reset_pm>; -- pinctrl-1 = <&usb_sw_sel_pm_device &usb_hub_reset_pm_device>; --}; -- --&usb_hs_phy { -- extcon = <&usb_id>; --}; -- --&wcd_codec { -- clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>; -- clock-names = "mclk"; -- qcom,mbhc-vthreshold-low = <75 150 237 450 500>; -- qcom,mbhc-vthreshold-high = <75 150 237 450 500>; --}; -- --/* Enable CoreSight */ --&cti0 { status = "okay"; }; --&cti1 { status = "okay"; }; --&cti12 { status = "okay"; }; --&cti13 { status = "okay"; }; --&cti14 { status = "okay"; }; --&cti15 { status = "okay"; }; --&debug0 { status = "okay"; }; --&debug1 { status = "okay"; }; --&debug2 { status = "okay"; }; --&debug3 { status = "okay"; }; --&etf { status = "okay"; }; --&etm0 { status = "okay"; }; --&etm1 { status = "okay"; }; --&etm2 { status = "okay"; }; --&etm3 { status = "okay"; }; --&etr { status = "okay"; }; --&funnel0 { status = "okay"; }; --&funnel1 { status = "okay"; }; --&replicator { status = "okay"; }; --&stm { status = "okay"; }; --&tpiu { status = "okay"; }; -- --&smd_rpm_regulators { -- vdd_l1_l2_l3-supply = <&pm8916_s3>; -- vdd_l4_l5_l6-supply = <&pm8916_s4>; -- vdd_l7-supply = <&pm8916_s4>; -- -- s3 { -- regulator-min-microvolt = <375000>; -- regulator-max-microvolt = <1562000>; -- }; -- -- s4 { -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- -- regulator-always-on; -- regulator-boot-on; -- }; -- -- l1 { -- regulator-min-microvolt = <375000>; -- regulator-max-microvolt = <1525000>; -- }; -- -- l2 { -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <1200000>; -- }; -- -- l4 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- l5 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- l6 { -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- }; -- -- l7 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- l8 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- l9 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- l10 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- l11 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- regulator-allow-set-load; -- regulator-system-load = <200000>; -- }; -- -- l12 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- l13 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- l14 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- /** -- * 1.8v required on LS expansion -- * for mezzanine boards -- */ -- l15 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- regulator-always-on; -- }; -- -- l16 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; -- -- l17 { -- regulator-min-microvolt = <3300000>; -- regulator-max-microvolt = <3300000>; -- }; -- -- l18 { -- regulator-min-microvolt = <1750000>; -- regulator-max-microvolt = <3337000>; -- }; --}; -- --/* -- * 2mA drive strength is not enough when connecting multiple -- * I2C devices with different pull up resistors. -- */ --&i2c2_default { -- drive-strength = <16>; --}; -- --&i2c4_default { -- drive-strength = <16>; --}; -- --&i2c6_default { -- drive-strength = <16>; --}; -- --/* -- * GPIO name legend: proper name = the GPIO line is used as GPIO -- * NC = not connected (pin out but not routed from the chip to -- * anything the board) -- * "[PER]" = pin is muxed for [peripheral] (not GPIO) -- * LSEC = Low Speed External Connector -- * HSEC = High Speed External Connector -- * -- * Line names are taken from the schematic "DragonBoard410c" -- * dated monday, august 31, 2015. Page 5 in particular. -- * -- * For the lines routed to the external connectors the -- * lines are named after the 96Boards CE Specification 1.0, -- * Appendix "Expansion Connector Signal Description". -- * -- * When the 96Board naming of a line and the schematic name of -- * the same line are in conflict, the 96Board specification -- * takes precedence, which means that the external UART on the -- * LSEC is named UART0 while the schematic and SoC names this -- * UART3. This is only for the informational lines i.e. "[FOO]", -- * the GPIO named lines "GPIO-A" thru "GPIO-L" are the only -- * ones actually used for GPIO. -- */ -- --&msmgpio { -- gpio-line-names = -- "[UART0_TX]", /* GPIO_0, LSEC pin 5 */ -- "[UART0_RX]", /* GPIO_1, LSEC pin 7 */ -- "[UART0_CTS_N]", /* GPIO_2, LSEC pin 3 */ -- "[UART0_RTS_N]", /* GPIO_3, LSEC pin 9 */ -- "[UART1_TX]", /* GPIO_4, LSEC pin 11 */ -- "[UART1_RX]", /* GPIO_5, LSEC pin 13 */ -- "[I2C0_SDA]", /* GPIO_8, LSEC pin 17 */ -- "[I2C0_SCL]", /* GPIO_7, LSEC pin 15 */ -- "[SPI1_DOUT]", /* SPI1_MOSI, HSEC pin 1 */ -- "[SPI1_DIN]", /* SPI1_MISO, HSEC pin 11 */ -- "[SPI1_CS]", /* SPI1_CS_N, HSEC pin 7 */ -- "[SPI1_SCLK]", /* SPI1_CLK, HSEC pin 9 */ -- "GPIO-B", /* LS_EXP_GPIO_B, LSEC pin 24 */ -- "GPIO-C", /* LS_EXP_GPIO_C, LSEC pin 25 */ -- "[I2C3_SDA]", /* HSEC pin 38 */ -- "[I2C3_SCL]", /* HSEC pin 36 */ -- "[SPI0_MOSI]", /* LSEC pin 14 */ -- "[SPI0_MISO]", /* LSEC pin 10 */ -- "[SPI0_CS_N]", /* LSEC pin 12 */ -- "[SPI0_CLK]", /* LSEC pin 8 */ -- "HDMI_HPD_N", /* GPIO 20 */ -- "USR_LED_1_CTRL", -- "[I2C1_SDA]", /* GPIO_22, LSEC pin 21 */ -- "[I2C1_SCL]", /* GPIO_23, LSEC pin 19 */ -- "GPIO-G", /* LS_EXP_GPIO_G, LSEC pin 29 */ -- "GPIO-H", /* LS_EXP_GPIO_H, LSEC pin 30 */ -- "[CSI0_MCLK]", /* HSEC pin 15 */ -- "[CSI1_MCLK]", /* HSEC pin 17 */ -- "GPIO-K", /* LS_EXP_GPIO_K, LSEC pin 33 */ -- "[I2C2_SDA]", /* HSEC pin 34 */ -- "[I2C2_SCL]", /* HSEC pin 32 */ -- "DSI2HDMI_INT_N", -- "DSI_SW_SEL_APQ", -- "GPIO-L", /* LS_EXP_GPIO_L, LSEC pin 34 */ -- "GPIO-J", /* LS_EXP_GPIO_J, LSEC pin 32 */ -- "GPIO-I", /* LS_EXP_GPIO_I, LSEC pin 31 */ -- "GPIO-A", /* LS_EXP_GPIO_A, LSEC pin 23 */ -- "FORCED_USB_BOOT", -- "SD_CARD_DET_N", -- "[WCSS_BT_SSBI]", -- "[WCSS_WLAN_DATA_2]", /* GPIO 40 */ -- "[WCSS_WLAN_DATA_1]", -- "[WCSS_WLAN_DATA_0]", -- "[WCSS_WLAN_SET]", -- "[WCSS_WLAN_CLK]", -- "[WCSS_FM_SSBI]", -- "[WCSS_FM_SDI]", -- "[WCSS_BT_DAT_CTL]", -- "[WCSS_BT_DAT_STB]", -- "NC", -- "NC", /* GPIO 50 */ -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", /* GPIO 60 */ -- "NC", -- "NC", -- "[CDC_PDM0_CLK]", -- "[CDC_PDM0_SYNC]", -- "[CDC_PDM0_TX0]", -- "[CDC_PDM0_RX0]", -- "[CDC_PDM0_RX1]", -- "[CDC_PDM0_RX2]", -- "GPIO-D", /* LS_EXP_GPIO_D, LSEC pin 26 */ -- "NC", /* GPIO 70 */ -- "NC", -- "NC", -- "NC", -- "NC", /* GPIO 74 */ -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "BOOT_CONFIG_0", /* GPIO 80 */ -- "BOOT_CONFIG_1", -- "BOOT_CONFIG_2", -- "BOOT_CONFIG_3", -- "NC", -- "NC", -- "BOOT_CONFIG_5", -- "NC", -- "NC", -- "NC", -- "NC", /* GPIO 90 */ -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", /* GPIO 100 */ -- "NC", -- "NC", -- "NC", -- "SSBI_GPS", -- "NC", -- "NC", -- "KEY_VOLP_N", -- "NC", -- "NC", -- "[LS_EXP_MI2S_WS]", /* GPIO 110 */ -- "NC", -- "NC", -- "[LS_EXP_MI2S_SCK]", -- "[LS_EXP_MI2S_DATA0]", -- "GPIO-E", /* LS_EXP_GPIO_E, LSEC pin 27 */ -- "NC", -- "[DSI2HDMI_MI2S_WS]", -- "[DSI2HDMI_MI2S_SCK]", -- "[DSI2HDMI_MI2S_DATA0]", -- "USR_LED_2_CTRL", /* GPIO 120 */ -- "SB_HS_ID"; -- -- msmgpio_leds: msmgpio-leds { -- pins = "gpio21", "gpio120"; -- function = "gpio"; -- -- output-low; -- }; -- -- usb_id_default: usb-id-default { -- pins = "gpio121"; -- function = "gpio"; -- -- drive-strength = <8>; -- input-enable; -- bias-pull-up; -- }; -- -- adv7533_int_active: adv533-int-active { -- pins = "gpio31"; -- function = "gpio"; -- -- drive-strength = <16>; -- bias-disable; -- }; -- -- adv7533_int_suspend: adv7533-int-suspend { -- pins = "gpio31"; -- function = "gpio"; -- -- drive-strength = <2>; -- bias-disable; -- }; -- -- adv7533_switch_active: adv7533-switch-active { -- pins = "gpio32"; -- function = "gpio"; -- -- drive-strength = <16>; -- bias-disable; -- }; -- -- adv7533_switch_suspend: adv7533-switch-suspend { -- pins = "gpio32"; -- function = "gpio"; -- -- drive-strength = <2>; -- bias-disable; -- }; -- -- msm_key_volp_n_default: msm-key-volp-n-default { -- pins = "gpio107"; -- function = "gpio"; -- -- drive-strength = <8>; -- input-enable; -- bias-pull-up; -- }; --}; -- --&pm8916_gpios { -- gpio-line-names = -- "USR_LED_3_CTRL", -- "USR_LED_4_CTRL", -- "USB_HUB_RESET_N_PM", -- "USB_SW_SEL_PM"; -- -- usb_hub_reset_pm: usb-hub-reset-pm { -- pins = "gpio3"; -- function = PMIC_GPIO_FUNC_NORMAL; -- -- input-disable; -- output-high; -- }; -- -- usb_hub_reset_pm_device: usb-hub-reset-pm-device { -- pins = "gpio3"; -- function = PMIC_GPIO_FUNC_NORMAL; -- -- output-low; -- }; -- -- usb_sw_sel_pm: usb-sw-sel-pm { -- pins = "gpio4"; -- function = PMIC_GPIO_FUNC_NORMAL; -- -- power-source = ; -- input-disable; -- output-high; -- }; -- -- usb_sw_sel_pm_device: usb-sw-sel-pm-device { -- pins = "gpio4"; -- function = PMIC_GPIO_FUNC_NORMAL; -- -- power-source = ; -- input-disable; -- output-low; -- }; -- -- pm8916_gpios_leds: pm8916-gpios-leds { -- pins = "gpio1", "gpio2"; -- function = PMIC_GPIO_FUNC_NORMAL; -- -- output-low; -- }; --}; -- --&pm8916_mpps { -- gpio-line-names = -- "VDD_PX_BIAS", -- "WLAN_LED_CTRL", -- "BT_LED_CTRL", -- "GPIO-F"; /* LS_EXP_GPIO_F, LSEC pin 28 */ -- -- pinctrl-names = "default"; -- pinctrl-0 = <&ls_exp_gpio_f>; -- -- ls_exp_gpio_f: pm8916-mpp4 { -- pins = "mpp4"; -- function = "digital"; -- -- output-low; -- power-source = ; // 1.8V -- }; -- -- pm8916_mpps_leds: pm8916-mpps-leds { -- pins = "mpp2", "mpp3"; -- function = "digital"; -- -- output-low; -- }; --}; -diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts -index 757afa27424dd..d01a512634cfe 100644 ---- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts -+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts -@@ -5,9 +5,1077 @@ - - /dts-v1/; - --#include "apq8096-db820c.dtsi" -+#include "msm8996.dtsi" -+#include "pm8994.dtsi" -+#include "pmi8994.dtsi" -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * GPIO name legend: proper name = the GPIO line is used as GPIO -+ * NC = not connected (pin out but not routed from the chip to -+ * anything the board) -+ * "[PER]" = pin is muxed for [peripheral] (not GPIO) -+ * LSEC = Low Speed External Connector -+ * P HSEC = Primary High Speed External Connector -+ * S HSEC = Secondary High Speed External Connector -+ * J14 = Camera Connector -+ * TP = Test Points -+ * -+ * Line names are taken from the schematic "DragonBoard 820c", -+ * drawing no: LM25-P2751-1 -+ * -+ * For the lines routed to the external connectors the -+ * lines are named after the 96Boards CE Specification 1.0, -+ * Appendix "Expansion Connector Signal Description". -+ * -+ * When the 96Board naming of a line and the schematic name of -+ * the same line are in conflict, the 96Board specification -+ * takes precedence, which means that the external UART on the -+ * LSEC is named UART0 while the schematic and SoC names this -+ * UART3. This is only for the informational lines i.e. "[FOO]", -+ * the GPIO named lines "GPIO-A" thru "GPIO-L" are the only -+ * ones actually used for GPIO. -+ */ - - / { - model = "Qualcomm Technologies, Inc. DB820c"; - compatible = "arrow,apq8096-db820c", "qcom,apq8096-sbc", "qcom,apq8096"; -+ -+ aliases { -+ serial0 = &blsp2_uart2; -+ serial1 = &blsp2_uart3; -+ serial2 = &blsp1_uart2; -+ i2c0 = &blsp1_i2c3; -+ i2c1 = &blsp2_i2c1; -+ i2c2 = &blsp2_i2c1; -+ spi0 = &blsp1_spi1; -+ spi1 = &blsp2_spi6; -+ }; -+ -+ chosen { -+ stdout-path = "serial0:115200n8"; -+ }; -+ -+ clocks { -+ compatible = "simple-bus"; -+ divclk4: divclk4 { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <32768>; -+ clock-output-names = "divclk4"; -+ -+ pinctrl-names = "default"; -+ pinctrl-0 = <&divclk4_pin_a>; -+ }; -+ -+ div1_mclk: divclk1 { -+ compatible = "gpio-gate-clock"; -+ pinctrl-0 = <&audio_mclk>; -+ pinctrl-names = "default"; -+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>; -+ #clock-cells = <0>; -+ enable-gpios = <&pm8994_gpios 15 0>; -+ }; -+ }; -+ -+ gpio_keys { -+ compatible = "gpio-keys"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ autorepeat; -+ -+ pinctrl-names = "default"; -+ pinctrl-0 = <&volume_up_gpio>; -+ -+ button@0 { -+ label = "Volume Up"; -+ linux,code = ; -+ gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>; -+ }; -+ }; -+ -+ usb2_id: usb2-id { -+ compatible = "linux,extcon-usb-gpio"; -+ id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&usb2_vbus_det_gpio>; -+ }; -+ -+ usb3_id: usb3-id { -+ compatible = "linux,extcon-usb-gpio"; -+ id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&usb3_vbus_det_gpio>; -+ }; -+ -+ vph_pwr: vph-pwr-regulator { -+ compatible = "regulator-fixed"; -+ regulator-name = "vph_pwr"; -+ regulator-always-on; -+ regulator-boot-on; -+ -+ regulator-min-microvolt = <3700000>; -+ regulator-max-microvolt = <3700000>; -+ }; -+ -+ wlan_en: wlan-en-1-8v { -+ pinctrl-names = "default"; -+ pinctrl-0 = <&wlan_en_gpios>; -+ compatible = "regulator-fixed"; -+ regulator-name = "wlan-en-regulator"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ -+ gpio = <&pm8994_gpios 8 0>; -+ -+ /* WLAN card specific delay */ -+ startup-delay-us = <70000>; -+ enable-active-high; -+ }; -+}; -+ -+&blsp1_i2c3 { -+ /* On Low speed expansion */ -+ label = "LS-I2C0"; -+ status = "okay"; -+}; -+ -+&blsp1_spi1 { -+ /* On Low speed expansion */ -+ label = "LS-SPI0"; -+ status = "okay"; -+}; -+ -+&blsp1_uart2 { -+ label = "BT-UART"; -+ status = "okay"; -+ -+ bluetooth { -+ compatible = "qcom,qca6174-bt"; -+ -+ /* bt_disable_n gpio */ -+ enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>; -+ -+ clocks = <&divclk4>; -+ }; -+}; -+ -+&adsp_pil { -+ status = "okay"; -+}; -+ -+&blsp2_i2c1 { -+ /* On High speed expansion */ -+ label = "HS-I2C2"; -+ status = "okay"; -+}; -+ -+&blsp2_i2c1 { -+ /* On Low speed expansion */ -+ label = "LS-I2C1"; -+ status = "okay"; -+}; -+ -+&blsp2_spi6 { -+ /* On High speed expansion */ -+ label = "HS-SPI1"; -+ status = "okay"; -+}; -+ -+&blsp2_uart2 { -+ label = "LS-UART1"; -+ status = "okay"; -+ pinctrl-names = "default", "sleep"; -+ pinctrl-0 = <&blsp2_uart2_2pins_default>; -+ pinctrl-1 = <&blsp2_uart2_2pins_sleep>; -+}; -+ -+&blsp2_uart3 { -+ label = "LS-UART0"; -+ status = "disabled"; -+ pinctrl-names = "default", "sleep"; -+ pinctrl-0 = <&blsp2_uart3_4pins_default>; -+ pinctrl-1 = <&blsp2_uart3_4pins_sleep>; -+}; -+ -+&camss { -+ vdda-supply = <&vreg_l2a_1p25>; -+}; -+ -+&gpu { -+ status = "okay"; -+}; -+ -+&hdmi { -+ status = "okay"; -+ -+ pinctrl-names = "default", "sleep"; -+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>; -+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>; -+ -+ core-vdda-supply = <&vreg_l12a_1p8>; -+ core-vcc-supply = <&vreg_s4a_1p8>; -+}; -+ -+&hdmi_phy { -+ status = "okay"; -+ -+ vddio-supply = <&vreg_l12a_1p8>; -+ vcca-supply = <&vreg_l28a_0p925>; -+ #phy-cells = <0>; -+}; -+ -+&hsusb_phy1 { -+ status = "okay"; -+ -+ vdda-pll-supply = <&vreg_l12a_1p8>; -+ vdda-phy-dpdm-supply = <&vreg_l24a_3p075>; -+}; -+ -+&hsusb_phy2 { -+ status = "okay"; -+ -+ vdda-pll-supply = <&vreg_l12a_1p8>; -+ vdda-phy-dpdm-supply = <&vreg_l24a_3p075>; -+}; -+ -+&mdp { -+ status = "okay"; -+}; -+ -+&mdss { -+ status = "okay"; -+}; -+ -+&mmcc { -+ vdd-gfx-supply = <&vdd_gfx>; -+}; -+ -+&pm8994_resin { -+ status = "okay"; -+ linux,code = ; -+}; -+ -+&tlmm { -+ gpio-line-names = -+ "[SPI0_DOUT]", /* GPIO_0, BLSP1_SPI_MOSI, LSEC pin 14 */ -+ "[SPI0_DIN]", /* GPIO_1, BLSP1_SPI_MISO, LSEC pin 10 */ -+ "[SPI0_CS]", /* GPIO_2, BLSP1_SPI_CS_N, LSEC pin 12 */ -+ "[SPI0_SCLK]", /* GPIO_3, BLSP1_SPI_CLK, LSEC pin 8 */ -+ "[UART1_TxD]", /* GPIO_4, BLSP8_UART_TX, LSEC pin 11 */ -+ "[UART1_RxD]", /* GPIO_5, BLSP8_UART_RX, LSEC pin 13 */ -+ "[I2C1_SDA]", /* GPIO_6, BLSP8_I2C_SDA, LSEC pin 21 */ -+ "[I2C1_SCL]", /* GPIO_7, BLSP8_I2C_SCL, LSEC pin 19 */ -+ "GPIO-H", /* GPIO_8, LCD0_RESET_N, LSEC pin 30 */ -+ "TP93", /* GPIO_9 */ -+ "GPIO-G", /* GPIO_10, MDP_VSYNC_P, LSEC pin 29 */ -+ "[MDP_VSYNC_S]", /* GPIO_11, S HSEC pin 55 */ -+ "NC", /* GPIO_12 */ -+ "[CSI0_MCLK]", /* GPIO_13, CAM_MCLK0, P HSEC pin 15 */ -+ "[CAM_MCLK1]", /* GPIO_14, J14 pin 11 */ -+ "[CSI1_MCLK]", /* GPIO_15, CAM_MCLK2, P HSEC pin 17 */ -+ "TP99", /* GPIO_16 */ -+ "[I2C2_SDA]", /* GPIO_17, CCI_I2C_SDA0, P HSEC pin 34 */ -+ "[I2C2_SCL]", /* GPIO_18, CCI_I2C_SCL0, P HSEC pin 32 */ -+ "[CCI_I2C_SDA1]", /* GPIO_19, S HSEC pin 38 */ -+ "[CCI_I2C_SCL1]", /* GPIO_20, S HSEC pin 36 */ -+ "FLASH_STROBE_EN", /* GPIO_21, S HSEC pin 5 */ -+ "FLASH_STROBE_TRIG", /* GPIO_22, S HSEC pin 1 */ -+ "GPIO-K", /* GPIO_23, CAM2_RST_N, LSEC pin 33 */ -+ "GPIO-D", /* GPIO_24, LSEC pin 26 */ -+ "GPIO-I", /* GPIO_25, CAM0_RST_N, LSEC pin 31 */ -+ "GPIO-J", /* GPIO_26, CAM0_STANDBY_N, LSEC pin 32 */ -+ "BLSP6_I2C_SDA", /* GPIO_27 */ -+ "BLSP6_I2C_SCL", /* GPIO_28 */ -+ "GPIO-B", /* GPIO_29, TS0_RESET_N, LSEC pin 24 */ -+ "GPIO30", /* GPIO_30, S HSEC pin 4 */ -+ "HDMI_CEC", /* GPIO_31 */ -+ "HDMI_DDC_CLOCK", /* GPIO_32 */ -+ "HDMI_DDC_DATA", /* GPIO_33 */ -+ "HDMI_HOT_PLUG_DETECT", /* GPIO_34 */ -+ "PCIE0_RST_N", /* GPIO_35 */ -+ "PCIE0_CLKREQ_N", /* GPIO_36 */ -+ "PCIE0_WAKE", /* GPIO_37 */ -+ "SD_CARD_DET_N", /* GPIO_38 */ -+ "TSIF1_SYNC", /* GPIO_39, S HSEC pin 48 */ -+ "W_DISABLE_N", /* GPIO_40 */ -+ "[BLSP9_UART_TX]", /* GPIO_41 */ -+ "[BLSP9_UART_RX]", /* GPIO_42 */ -+ "[BLSP2_UART_CTS_N]", /* GPIO_43 */ -+ "[BLSP2_UART_RFR_N]", /* GPIO_44 */ -+ "[BLSP3_UART_TX]", /* GPIO_45 */ -+ "[BLSP3_UART_RX]", /* GPIO_46 */ -+ "[I2C0_SDA]", /* GPIO_47, LS_I2C0_SDA, LSEC pin 17 */ -+ "[I2C0_SCL]", /* GPIO_48, LS_I2C0_SCL, LSEC pin 15 */ -+ "[UART0_TxD]", /* GPIO_49, BLSP9_UART_TX, LSEC pin 5 */ -+ "[UART0_RxD]", /* GPIO_50, BLSP9_UART_RX, LSEC pin 7 */ -+ "[UART0_CTS]", /* GPIO_51, BLSP9_UART_CTS_N, LSEC pin 3 */ -+ "[UART0_RTS]", /* GPIO_52, BLSP9_UART_RFR_N, LSEC pin 9 */ -+ "[CODEC_INT1_N]", /* GPIO_53 */ -+ "[CODEC_INT2_N]", /* GPIO_54 */ -+ "[BLSP7_I2C_SDA]", /* GPIO_55 */ -+ "[BLSP7_I2C_SCL]", /* GPIO_56 */ -+ "MI2S_MCLK", /* GPIO_57, S HSEC pin 3 */ -+ "[PCM_CLK]", /* GPIO_58, QUA_MI2S_SCK, LSEC pin 18 */ -+ "[PCM_FS]", /* GPIO_59, QUA_MI2S_WS, LSEC pin 16 */ -+ "[PCM_DO]", /* GPIO_60, QUA_MI2S_DATA0, LSEC pin 20 */ -+ "[PCM_DI]", /* GPIO_61, QUA_MI2S_DATA1, LSEC pin 22 */ -+ "GPIO-E", /* GPIO_62, LSEC pin 27 */ -+ "TP87", /* GPIO_63 */ -+ "[CODEC_RST_N]", /* GPIO_64 */ -+ "[PCM1_CLK]", /* GPIO_65 */ -+ "[PCM1_SYNC]", /* GPIO_66 */ -+ "[PCM1_DIN]", /* GPIO_67 */ -+ "[PCM1_DOUT]", /* GPIO_68 */ -+ "AUDIO_REF_CLK", /* GPIO_69 */ -+ "SLIMBUS_CLK", /* GPIO_70 */ -+ "SLIMBUS_DATA0", /* GPIO_71 */ -+ "SLIMBUS_DATA1", /* GPIO_72 */ -+ "NC", /* GPIO_73 */ -+ "NC", /* GPIO_74 */ -+ "NC", /* GPIO_75 */ -+ "NC", /* GPIO_76 */ -+ "TP94", /* GPIO_77 */ -+ "NC", /* GPIO_78 */ -+ "TP95", /* GPIO_79 */ -+ "GPIO-A", /* GPIO_80, MEMS_RESET_N, LSEC pin 23 */ -+ "TP88", /* GPIO_81 */ -+ "TP89", /* GPIO_82 */ -+ "TP90", /* GPIO_83 */ -+ "TP91", /* GPIO_84 */ -+ "[SD_DAT0]", /* GPIO_85, BLSP12_SPI_MOSI, P HSEC pin 1 */ -+ "[SD_CMD]", /* GPIO_86, BLSP12_SPI_MISO, P HSEC pin 11 */ -+ "[SD_DAT3]", /* GPIO_87, BLSP12_SPI_CS_N, P HSEC pin 7 */ -+ "[SD_SCLK]", /* GPIO_88, BLSP12_SPI_CLK, P HSEC pin 9 */ -+ "TSIF1_CLK", /* GPIO_89, S HSEC pin 42 */ -+ "TSIF1_EN", /* GPIO_90, S HSEC pin 46 */ -+ "TSIF1_DATA", /* GPIO_91, S HSEC pin 44 */ -+ "NC", /* GPIO_92 */ -+ "TSIF2_CLK", /* GPIO_93, S HSEC pin 52 */ -+ "TSIF2_EN", /* GPIO_94, S HSEC pin 56 */ -+ "TSIF2_DATA", /* GPIO_95, S HSEC pin 54 */ -+ "TSIF2_SYNC", /* GPIO_96, S HSEC pin 58 */ -+ "NC", /* GPIO_97 */ -+ "CAM1_STANDBY_N", /* GPIO_98 */ -+ "NC", /* GPIO_99 */ -+ "NC", /* GPIO_100 */ -+ "[LCD1_RESET_N]", /* GPIO_101, S HSEC pin 51 */ -+ "BOOT_CONFIG1", /* GPIO_102 */ -+ "USB_HUB_RESET", /* GPIO_103 */ -+ "CAM1_RST_N", /* GPIO_104 */ -+ "NC", /* GPIO_105 */ -+ "NC", /* GPIO_106 */ -+ "NC", /* GPIO_107 */ -+ "NC", /* GPIO_108 */ -+ "NC", /* GPIO_109 */ -+ "NC", /* GPIO_110 */ -+ "NC", /* GPIO_111 */ -+ "NC", /* GPIO_112 */ -+ "PMI8994_BUA", /* GPIO_113 */ -+ "PCIE2_RST_N", /* GPIO_114 */ -+ "PCIE2_CLKREQ_N", /* GPIO_115 */ -+ "PCIE2_WAKE", /* GPIO_116 */ -+ "SSC_IRQ_0", /* GPIO_117 */ -+ "SSC_IRQ_1", /* GPIO_118 */ -+ "SSC_IRQ_2", /* GPIO_119 */ -+ "NC", /* GPIO_120 */ -+ "GPIO121", /* GPIO_121, S HSEC pin 2 */ -+ "NC", /* GPIO_122 */ -+ "SSC_IRQ_6", /* GPIO_123 */ -+ "SSC_IRQ_7", /* GPIO_124 */ -+ "GPIO-C", /* GPIO_125, TS_INT0, LSEC pin 25 */ -+ "BOOT_CONFIG5", /* GPIO_126 */ -+ "NC", /* GPIO_127 */ -+ "NC", /* GPIO_128 */ -+ "BOOT_CONFIG7", /* GPIO_129 */ -+ "PCIE1_RST_N", /* GPIO_130 */ -+ "PCIE1_CLKREQ_N", /* GPIO_131 */ -+ "PCIE1_WAKE", /* GPIO_132 */ -+ "GPIO-L", /* GPIO_133, CAM2_STANDBY_N, LSEC pin 34 */ -+ "NC", /* GPIO_134 */ -+ "NC", /* GPIO_135 */ -+ "BOOT_CONFIG8", /* GPIO_136 */ -+ "NC", /* GPIO_137 */ -+ "NC", /* GPIO_138 */ -+ "GPS_SSBI2", /* GPIO_139 */ -+ "GPS_SSBI1", /* GPIO_140 */ -+ "NC", /* GPIO_141 */ -+ "NC", /* GPIO_142 */ -+ "NC", /* GPIO_143 */ -+ "BOOT_CONFIG6", /* GPIO_144 */ -+ "NC", /* GPIO_145 */ -+ "NC", /* GPIO_146 */ -+ "NC", /* GPIO_147 */ -+ "NC", /* GPIO_148 */ -+ "NC"; /* GPIO_149 */ -+ -+ sdc2_cd_on: sdc2_cd_on { -+ mux { -+ pins = "gpio38"; -+ function = "gpio"; -+ }; -+ -+ config { -+ pins = "gpio38"; -+ bias-pull-up; /* pull up */ -+ drive-strength = <16>; /* 16 MA */ -+ }; -+ }; -+ -+ sdc2_cd_off: sdc2_cd_off { -+ mux { -+ pins = "gpio38"; -+ function = "gpio"; -+ }; -+ -+ config { -+ pins = "gpio38"; -+ bias-pull-up; /* pull up */ -+ drive-strength = <2>; /* 2 MA */ -+ }; -+ }; -+ -+ hdmi_hpd_active: hdmi_hpd_active { -+ mux { -+ pins = "gpio34"; -+ function = "hdmi_hot"; -+ }; -+ -+ config { -+ pins = "gpio34"; -+ bias-pull-down; -+ drive-strength = <16>; -+ }; -+ }; -+ -+ hdmi_hpd_suspend: hdmi_hpd_suspend { -+ mux { -+ pins = "gpio34"; -+ function = "hdmi_hot"; -+ }; -+ -+ config { -+ pins = "gpio34"; -+ bias-pull-down; -+ drive-strength = <2>; -+ }; -+ }; -+ -+ hdmi_ddc_active: hdmi_ddc_active { -+ mux { -+ pins = "gpio32", "gpio33"; -+ function = "hdmi_ddc"; -+ }; -+ -+ config { -+ pins = "gpio32", "gpio33"; -+ drive-strength = <2>; -+ bias-pull-up; -+ }; -+ }; -+ -+ hdmi_ddc_suspend: hdmi_ddc_suspend { -+ mux { -+ pins = "gpio32", "gpio33"; -+ function = "hdmi_ddc"; -+ }; -+ -+ config { -+ pins = "gpio32", "gpio33"; -+ drive-strength = <2>; -+ bias-pull-down; -+ }; -+ }; -+}; -+ -+&pcie0 { -+ status = "okay"; -+ perst-gpio = <&tlmm 35 GPIO_ACTIVE_LOW>; -+ vddpe-3v3-supply = <&wlan_en>; -+ vdda-supply = <&vreg_l28a_0p925>; -+}; -+ -+&pcie1 { -+ status = "okay"; -+ perst-gpio = <&tlmm 130 GPIO_ACTIVE_LOW>; -+ vdda-supply = <&vreg_l28a_0p925>; -+}; -+ -+&pcie2 { -+ status = "okay"; -+ perst-gpio = <&tlmm 114 GPIO_ACTIVE_LOW>; -+ vdda-supply = <&vreg_l28a_0p925>; -+}; -+ -+&pcie_phy { -+ status = "okay"; -+ -+ vdda-phy-supply = <&vreg_l28a_0p925>; -+ vdda-pll-supply = <&vreg_l12a_1p8>; -+}; -+ -+&pm8994_gpios { -+ gpio-line-names = -+ "NC", -+ "KEY_VOLP_N", -+ "NC", -+ "BL1_PWM", -+ "GPIO-F", /* BL0_PWM, LSEC pin 28 */ -+ "BL1_EN", -+ "NC", -+ "WLAN_EN", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "DIVCLK1", -+ "DIVCLK2", -+ "DIVCLK3", -+ "DIVCLK4", -+ "BT_EN", -+ "PMIC_SLB", -+ "PMIC_BUA", -+ "USB_VBUS_DET"; -+ -+ pinctrl-names = "default"; -+ pinctrl-0 = <&ls_exp_gpio_f &bt_en_gpios>; -+ -+ ls_exp_gpio_f: pm8994_gpio5 { -+ pinconf { -+ pins = "gpio5"; -+ output-low; -+ power-source = <2>; // PM8994_GPIO_S4, 1.8V -+ }; -+ }; -+ -+ bt_en_gpios: bt_en_gpios { -+ pinconf { -+ pins = "gpio19"; -+ function = PMIC_GPIO_FUNC_NORMAL; -+ output-low; -+ power-source = ; // 1.8V -+ qcom,drive-strength = ; -+ bias-pull-down; -+ }; -+ }; -+ -+ wlan_en_gpios: wlan_en_gpios { -+ pinconf { -+ pins = "gpio8"; -+ function = PMIC_GPIO_FUNC_NORMAL; -+ output-low; -+ power-source = ; // 1.8V -+ qcom,drive-strength = ; -+ bias-pull-down; -+ }; -+ }; -+ -+ audio_mclk: clk_div1 { -+ pinconf { -+ pins = "gpio15"; -+ function = "func1"; -+ power-source = ; // 1.8V -+ }; -+ }; -+ -+ volume_up_gpio: pm8996_gpio2 { -+ pinconf { -+ pins = "gpio2"; -+ function = "normal"; -+ input-enable; -+ drive-push-pull; -+ bias-pull-up; -+ qcom,drive-strength = ; -+ power-source = ; // 1.8V -+ }; -+ }; -+ -+ divclk4_pin_a: divclk4 { -+ pinconf { -+ pins = "gpio18"; -+ function = PMIC_GPIO_FUNC_FUNC2; -+ -+ bias-disable; -+ power-source = ; -+ }; -+ }; -+ -+ usb3_vbus_det_gpio: pm8996_gpio22 { -+ pinconf { -+ pins = "gpio22"; -+ function = PMIC_GPIO_FUNC_NORMAL; -+ input-enable; -+ bias-pull-down; -+ qcom,drive-strength = ; -+ power-source = ; // 1.8V -+ }; -+ }; -+}; -+ -+&pm8994_mpps { -+ gpio-line-names = -+ "VDDPX_BIAS", -+ "WIFI_LED", -+ "NC", -+ "BT_LED", -+ "PM_MPP05", -+ "PM_MPP06", -+ "PM_MPP07", -+ "NC"; -+}; -+ -+&pm8994_spmi_regulators { -+ qcom,saw-reg = <&saw3>; -+ s9 { -+ qcom,saw-slave; -+ }; -+ s10 { -+ qcom,saw-slave; -+ }; -+ s11 { -+ qcom,saw-leader; -+ regulator-always-on; -+ regulator-min-microvolt = <980000>; -+ regulator-max-microvolt = <980000>; -+ }; -+}; -+ -+&pmi8994_gpios { -+ gpio-line-names = -+ "NC", -+ "SPKR_AMP_EN1", -+ "SPKR_AMP_EN2", -+ "TP61", -+ "NC", -+ "USB2_VBUS_DET", -+ "NC", -+ "NC", -+ "NC", -+ "NC"; -+ -+ usb2_vbus_det_gpio: pmi8996_gpio6 { -+ pinconf { -+ pins = "gpio6"; -+ function = PMIC_GPIO_FUNC_NORMAL; -+ input-enable; -+ bias-pull-down; -+ qcom,drive-strength = ; -+ power-source = ; // 1.8V -+ }; -+ }; -+}; -+ -+&pmi8994_spmi_regulators { -+ vdd_gfx: s2@1700 { -+ reg = <0x1700 0x100>; -+ regulator-name = "VDD_GFX"; -+ regulator-min-microvolt = <980000>; -+ regulator-max-microvolt = <980000>; -+ }; -+}; -+ -+&rpm_requests { -+ pm8994-regulators { -+ compatible = "qcom,rpm-pm8994-regulators"; -+ -+ vdd_s1-supply = <&vph_pwr>; -+ vdd_s2-supply = <&vph_pwr>; -+ vdd_s3-supply = <&vph_pwr>; -+ vdd_s4-supply = <&vph_pwr>; -+ vdd_s5-supply = <&vph_pwr>; -+ vdd_s6-supply = <&vph_pwr>; -+ vdd_s7-supply = <&vph_pwr>; -+ vdd_s8-supply = <&vph_pwr>; -+ vdd_s9-supply = <&vph_pwr>; -+ vdd_s10-supply = <&vph_pwr>; -+ vdd_s11-supply = <&vph_pwr>; -+ vdd_s12-supply = <&vph_pwr>; -+ vdd_l1-supply = <&vreg_s1b_1p025>; -+ vdd_l2_l26_l28-supply = <&vreg_s3a_1p3>; -+ vdd_l3_l11-supply = <&vreg_s3a_1p3>; -+ vdd_l4_l27_l31-supply = <&vreg_s3a_1p3>; -+ vdd_l5_l7-supply = <&vreg_s5a_2p15>; -+ vdd_l6_l12_l32-supply = <&vreg_s5a_2p15>; -+ vdd_l8_l16_l30-supply = <&vph_pwr>; -+ vdd_l9_l10_l18_l22-supply = <&vph_pwr_bbyp>; -+ vdd_l13_l19_l23_l24-supply = <&vph_pwr_bbyp>; -+ vdd_l14_l15-supply = <&vreg_s5a_2p15>; -+ vdd_l17_l29-supply = <&vph_pwr_bbyp>; -+ vdd_l20_l21-supply = <&vph_pwr_bbyp>; -+ vdd_l25-supply = <&vreg_s3a_1p3>; -+ vdd_lvs1_lvs2-supply = <&vreg_s4a_1p8>; -+ -+ vreg_s3a_1p3: s3 { -+ regulator-name = "vreg_s3a_1p3"; -+ regulator-min-microvolt = <1300000>; -+ regulator-max-microvolt = <1300000>; -+ }; -+ -+ /** -+ * 1.8v required on LS expansion -+ * for mezzanine boards -+ */ -+ vreg_s4a_1p8: s4 { -+ regulator-name = "vreg_s4a_1p8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ regulator-always-on; -+ }; -+ vreg_s5a_2p15: s5 { -+ regulator-name = "vreg_s5a_2p15"; -+ regulator-min-microvolt = <2150000>; -+ regulator-max-microvolt = <2150000>; -+ }; -+ vreg_s7a_1p0: s7 { -+ regulator-name = "vreg_s7a_1p0"; -+ regulator-min-microvolt = <800000>; -+ regulator-max-microvolt = <800000>; -+ }; -+ -+ vreg_l1a_1p0: l1 { -+ regulator-name = "vreg_l1a_1p0"; -+ regulator-min-microvolt = <1000000>; -+ regulator-max-microvolt = <1000000>; -+ }; -+ vreg_l2a_1p25: l2 { -+ regulator-name = "vreg_l2a_1p25"; -+ regulator-min-microvolt = <1250000>; -+ regulator-max-microvolt = <1250000>; -+ }; -+ vreg_l3a_0p875: l3 { -+ regulator-name = "vreg_l3a_0p875"; -+ regulator-min-microvolt = <850000>; -+ regulator-max-microvolt = <850000>; -+ }; -+ vreg_l4a_1p225: l4 { -+ regulator-name = "vreg_l4a_1p225"; -+ regulator-min-microvolt = <1225000>; -+ regulator-max-microvolt = <1225000>; -+ }; -+ vreg_l6a_1p2: l6 { -+ regulator-name = "vreg_l6a_1p2"; -+ regulator-min-microvolt = <1200000>; -+ regulator-max-microvolt = <1200000>; -+ }; -+ vreg_l8a_1p8: l8 { -+ regulator-name = "vreg_l8a_1p8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ vreg_l9a_1p8: l9 { -+ regulator-name = "vreg_l9a_1p8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ vreg_l10a_1p8: l10 { -+ regulator-name = "vreg_l10a_1p8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ vreg_l11a_1p15: l11 { -+ regulator-name = "vreg_l11a_1p15"; -+ regulator-min-microvolt = <1150000>; -+ regulator-max-microvolt = <1150000>; -+ }; -+ vreg_l12a_1p8: l12 { -+ regulator-name = "vreg_l12a_1p8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ vreg_l13a_2p95: l13 { -+ regulator-name = "vreg_l13a_2p95"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <2950000>; -+ }; -+ vreg_l14a_1p8: l14 { -+ regulator-name = "vreg_l14a_1p8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ vreg_l15a_1p8: l15 { -+ regulator-name = "vreg_l15a_1p8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ vreg_l16a_2p7: l16 { -+ regulator-name = "vreg_l16a_2p7"; -+ regulator-min-microvolt = <2700000>; -+ regulator-max-microvolt = <2700000>; -+ }; -+ vreg_l17a_2p8: l17 { -+ regulator-name = "vreg_l17a_2p8"; -+ regulator-min-microvolt = <2500000>; -+ regulator-max-microvolt = <2500000>; -+ }; -+ vreg_l18a_2p85: l18 { -+ regulator-name = "vreg_l18a_2p85"; -+ regulator-min-microvolt = <2700000>; -+ regulator-max-microvolt = <2900000>; -+ }; -+ vreg_l19a_2p8: l19 { -+ regulator-name = "vreg_l19a_2p8"; -+ regulator-min-microvolt = <3000000>; -+ regulator-max-microvolt = <3000000>; -+ }; -+ vreg_l20a_2p95: l20 { -+ regulator-name = "vreg_l20a_2p95"; -+ regulator-min-microvolt = <2950000>; -+ regulator-max-microvolt = <2950000>; -+ regulator-allow-set-load; -+ }; -+ vreg_l21a_2p95: l21 { -+ regulator-name = "vreg_l21a_2p95"; -+ regulator-min-microvolt = <2950000>; -+ regulator-max-microvolt = <2950000>; -+ regulator-allow-set-load; -+ regulator-system-load = <200000>; -+ }; -+ vreg_l22a_3p0: l22 { -+ regulator-name = "vreg_l22a_3p0"; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; -+ }; -+ vreg_l23a_2p8: l23 { -+ regulator-name = "vreg_l23a_2p8"; -+ regulator-min-microvolt = <2800000>; -+ regulator-max-microvolt = <2800000>; -+ }; -+ vreg_l24a_3p075: l24 { -+ regulator-name = "vreg_l24a_3p075"; -+ regulator-min-microvolt = <3075000>; -+ regulator-max-microvolt = <3075000>; -+ }; -+ vreg_l25a_1p2: l25 { -+ regulator-name = "vreg_l25a_1p2"; -+ regulator-min-microvolt = <1200000>; -+ regulator-max-microvolt = <1200000>; -+ regulator-allow-set-load; -+ }; -+ vreg_l26a_0p8: l27 { -+ regulator-name = "vreg_l26a_0p8"; -+ regulator-min-microvolt = <1000000>; -+ regulator-max-microvolt = <1000000>; -+ }; -+ vreg_l28a_0p925: l28 { -+ regulator-name = "vreg_l28a_0p925"; -+ regulator-min-microvolt = <925000>; -+ regulator-max-microvolt = <925000>; -+ regulator-allow-set-load; -+ }; -+ vreg_l29a_2p8: l29 { -+ regulator-name = "vreg_l29a_2p8"; -+ regulator-min-microvolt = <2800000>; -+ regulator-max-microvolt = <2800000>; -+ }; -+ vreg_l30a_1p8: l30 { -+ regulator-name = "vreg_l30a_1p8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ vreg_l32a_1p8: l32 { -+ regulator-name = "vreg_l32a_1p8"; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; -+ }; -+ -+ vreg_lvs1a_1p8: lvs1 { -+ regulator-name = "vreg_lvs1a_1p8"; -+ }; -+ -+ vreg_lvs2a_1p8: lvs2 { -+ regulator-name = "vreg_lvs2a_1p8"; -+ }; -+ }; -+ -+ pmi8994-regulators { -+ compatible = "qcom,rpm-pmi8994-regulators"; -+ -+ vdd_s1-supply = <&vph_pwr>; -+ vdd_s2-supply = <&vph_pwr>; -+ vdd_s3-supply = <&vph_pwr>; -+ vdd_bst_byp-supply = <&vph_pwr>; -+ -+ vph_pwr_bbyp: boost-bypass { -+ regulator-name = "vph_pwr_bbyp"; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; -+ }; -+ -+ vreg_s1b_1p025: s1 { -+ regulator-name = "vreg_s1b_1p025"; -+ regulator-min-microvolt = <1025000>; -+ regulator-max-microvolt = <1025000>; -+ }; -+ }; -+}; -+ -+&sdhc2 { -+ /* External SD card */ -+ pinctrl-names = "default", "sleep"; -+ pinctrl-0 = <&sdc2_state_on &sdc2_cd_on>; -+ pinctrl-1 = <&sdc2_state_off &sdc2_cd_off>; -+ cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>; -+ vmmc-supply = <&vreg_l21a_2p95>; -+ vqmmc-supply = <&vreg_l13a_2p95>; -+ status = "okay"; -+}; -+ -+&q6asmdai { -+ dai@0 { -+ reg = <0>; -+ }; -+ -+ dai@1 { -+ reg = <1>; -+ }; -+ -+ dai@2 { -+ reg = <2>; -+ }; -+}; -+ -+&sound { -+ compatible = "qcom,apq8096-sndcard"; -+ model = "DB820c"; -+ audio-routing = "RX_BIAS", "MCLK", -+ "MM_DL1", "MultiMedia1 Playback", -+ "MM_DL2", "MultiMedia2 Playback", -+ "MultiMedia3 Capture", "MM_UL3"; -+ -+ mm1-dai-link { -+ link-name = "MultiMedia1"; -+ cpu { -+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>; -+ }; -+ }; -+ -+ mm2-dai-link { -+ link-name = "MultiMedia2"; -+ cpu { -+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>; -+ }; -+ }; -+ -+ mm3-dai-link { -+ link-name = "MultiMedia3"; -+ cpu { -+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>; -+ }; -+ }; -+ -+ hdmi-dai-link { -+ link-name = "HDMI"; -+ cpu { -+ sound-dai = <&q6afedai HDMI_RX>; -+ }; -+ -+ platform { -+ sound-dai = <&q6routing>; -+ }; -+ -+ codec { -+ sound-dai = <&hdmi 0>; -+ }; -+ }; -+ -+ slim-dai-link { -+ link-name = "SLIM Playback"; -+ cpu { -+ sound-dai = <&q6afedai SLIMBUS_6_RX>; -+ }; -+ -+ platform { -+ sound-dai = <&q6routing>; -+ }; -+ -+ codec { -+ sound-dai = <&wcd9335 6>; -+ }; -+ }; -+ -+ slimcap-dai-link { -+ link-name = "SLIM Capture"; -+ cpu { -+ sound-dai = <&q6afedai SLIMBUS_0_TX>; -+ }; -+ -+ platform { -+ sound-dai = <&q6routing>; -+ }; -+ -+ codec { -+ sound-dai = <&wcd9335 1>; -+ }; -+ }; -+}; -+ -+&ufsphy { -+ status = "okay"; -+ -+ vdda-phy-supply = <&vreg_l28a_0p925>; -+ vdda-pll-supply = <&vreg_l12a_1p8>; -+ vddp-ref-clk-supply = <&vreg_l25a_1p2>; -+}; -+ -+&ufshc { -+ status = "okay"; -+ -+ vcc-supply = <&vreg_l20a_2p95>; -+ vccq-supply = <&vreg_l25a_1p2>; -+ vccq2-supply = <&vreg_s4a_1p8>; -+ -+ vcc-max-microamp = <600000>; -+ vccq-max-microamp = <450000>; -+ vccq2-max-microamp = <450000>; -+}; -+ -+&usb2 { -+ status = "okay"; -+ extcon = <&usb2_id>; -+ -+ dwc3@7600000 { -+ extcon = <&usb2_id>; -+ dr_mode = "otg"; -+ maximum-speed = "high-speed"; -+ }; -+}; -+ -+&usb3 { -+ status = "okay"; -+ extcon = <&usb3_id>; -+ -+ dwc3@6a00000 { -+ extcon = <&usb3_id>; -+ dr_mode = "otg"; -+ }; -+}; -+ -+&usb3phy { -+ status = "okay"; -+ -+ vdda-phy-supply = <&vreg_l28a_0p925>; -+ vdda-pll-supply = <&vreg_l12a_1p8>; -+ -+}; -+ -+&venus { -+ status = "okay"; -+}; -+ -+&wcd9335 { -+ clock-names = "mclk", "slimbus"; -+ clocks = <&div1_mclk>, -+ <&rpmcc RPM_SMD_BB_CLK1>; -+ -+ vdd-buck-supply = <&vreg_s4a_1p8>; -+ vdd-buck-sido-supply = <&vreg_s4a_1p8>; -+ vdd-tx-supply = <&vreg_s4a_1p8>; -+ vdd-rx-supply = <&vreg_s4a_1p8>; -+ vdd-io-supply = <&vreg_s4a_1p8>; - }; -diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi -deleted file mode 100644 -index 51e17094d7b18..0000000000000 ---- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi -+++ /dev/null -@@ -1,1105 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-only --/* -- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. -- */ -- --#include "msm8996.dtsi" --#include "pm8994.dtsi" --#include "pmi8994.dtsi" --#include --#include --#include --#include --#include -- --/* -- * GPIO name legend: proper name = the GPIO line is used as GPIO -- * NC = not connected (pin out but not routed from the chip to -- * anything the board) -- * "[PER]" = pin is muxed for [peripheral] (not GPIO) -- * LSEC = Low Speed External Connector -- * P HSEC = Primary High Speed External Connector -- * S HSEC = Secondary High Speed External Connector -- * J14 = Camera Connector -- * TP = Test Points -- * -- * Line names are taken from the schematic "DragonBoard 820c", -- * drawing no: LM25-P2751-1 -- * -- * For the lines routed to the external connectors the -- * lines are named after the 96Boards CE Specification 1.0, -- * Appendix "Expansion Connector Signal Description". -- * -- * When the 96Board naming of a line and the schematic name of -- * the same line are in conflict, the 96Board specification -- * takes precedence, which means that the external UART on the -- * LSEC is named UART0 while the schematic and SoC names this -- * UART3. This is only for the informational lines i.e. "[FOO]", -- * the GPIO named lines "GPIO-A" thru "GPIO-L" are the only -- * ones actually used for GPIO. -- */ -- --/ { -- aliases { -- serial0 = &blsp2_uart2; -- serial1 = &blsp2_uart3; -- serial2 = &blsp1_uart2; -- i2c0 = &blsp1_i2c3; -- i2c1 = &blsp2_i2c1; -- i2c2 = &blsp2_i2c1; -- spi0 = &blsp1_spi1; -- spi1 = &blsp2_spi6; -- }; -- -- chosen { -- stdout-path = "serial0:115200n8"; -- }; -- -- clocks { -- compatible = "simple-bus"; -- divclk4: divclk4 { -- compatible = "fixed-clock"; -- #clock-cells = <0>; -- clock-frequency = <32768>; -- clock-output-names = "divclk4"; -- -- pinctrl-names = "default"; -- pinctrl-0 = <&divclk4_pin_a>; -- }; -- -- div1_mclk: divclk1 { -- compatible = "gpio-gate-clock"; -- pinctrl-0 = <&audio_mclk>; -- pinctrl-names = "default"; -- clocks = <&rpmcc RPM_SMD_DIV_CLK1>; -- #clock-cells = <0>; -- enable-gpios = <&pm8994_gpios 15 0>; -- }; -- }; -- -- gpio_keys { -- compatible = "gpio-keys"; -- #address-cells = <1>; -- #size-cells = <0>; -- autorepeat; -- -- pinctrl-names = "default"; -- pinctrl-0 = <&volume_up_gpio>; -- -- button@0 { -- label = "Volume Up"; -- linux,code = ; -- gpios = <&pm8994_gpios 2 GPIO_ACTIVE_LOW>; -- }; -- }; -- -- usb2_id: usb2-id { -- compatible = "linux,extcon-usb-gpio"; -- id-gpio = <&pmi8994_gpios 6 GPIO_ACTIVE_HIGH>; -- pinctrl-names = "default"; -- pinctrl-0 = <&usb2_vbus_det_gpio>; -- }; -- -- usb3_id: usb3-id { -- compatible = "linux,extcon-usb-gpio"; -- id-gpio = <&pm8994_gpios 22 GPIO_ACTIVE_HIGH>; -- pinctrl-names = "default"; -- pinctrl-0 = <&usb3_vbus_det_gpio>; -- }; -- -- vph_pwr: vph-pwr-regulator { -- compatible = "regulator-fixed"; -- regulator-name = "vph_pwr"; -- regulator-always-on; -- regulator-boot-on; -- -- regulator-min-microvolt = <3700000>; -- regulator-max-microvolt = <3700000>; -- }; -- -- wlan_en: wlan-en-1-8v { -- pinctrl-names = "default"; -- pinctrl-0 = <&wlan_en_gpios>; -- compatible = "regulator-fixed"; -- regulator-name = "wlan-en-regulator"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- -- gpio = <&pm8994_gpios 8 0>; -- -- /* WLAN card specific delay */ -- startup-delay-us = <70000>; -- enable-active-high; -- }; --}; -- --&blsp1_i2c3 { -- /* On Low speed expansion */ -- label = "LS-I2C0"; -- status = "okay"; --}; -- --&blsp1_spi1 { -- /* On Low speed expansion */ -- label = "LS-SPI0"; -- status = "okay"; --}; -- --&blsp1_uart2 { -- label = "BT-UART"; -- status = "okay"; -- pinctrl-names = "default", "sleep"; -- pinctrl-0 = <&blsp1_uart2_default>; -- pinctrl-1 = <&blsp1_uart2_sleep>; -- -- bluetooth { -- compatible = "qcom,qca6174-bt"; -- -- /* bt_disable_n gpio */ -- enable-gpios = <&pm8994_gpios 19 GPIO_ACTIVE_HIGH>; -- -- clocks = <&divclk4>; -- }; --}; -- --&adsp_pil { -- status = "okay"; --}; -- --&blsp2_i2c1 { -- /* On High speed expansion */ -- label = "HS-I2C2"; -- status = "okay"; --}; -- --&blsp2_i2c1 { -- /* On Low speed expansion */ -- label = "LS-I2C1"; -- status = "okay"; --}; -- --&blsp2_spi6 { -- /* On High speed expansion */ -- label = "HS-SPI1"; -- status = "okay"; --}; -- --&blsp2_uart2 { -- label = "LS-UART1"; -- status = "okay"; -- pinctrl-names = "default", "sleep"; -- pinctrl-0 = <&blsp2_uart2_2pins_default>; -- pinctrl-1 = <&blsp2_uart2_2pins_sleep>; --}; -- --&blsp2_uart3 { -- label = "LS-UART0"; -- status = "disabled"; -- pinctrl-names = "default", "sleep"; -- pinctrl-0 = <&blsp2_uart3_4pins_default>; -- pinctrl-1 = <&blsp2_uart3_4pins_sleep>; --}; -- --&camss { -- vdda-supply = <&vreg_l2a_1p25>; --}; -- --&gpu { -- status = "okay"; --}; -- --&hdmi { -- status = "okay"; -- -- pinctrl-names = "default", "sleep"; -- pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>; -- pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>; -- -- core-vdda-supply = <&vreg_l12a_1p8>; -- core-vcc-supply = <&vreg_s4a_1p8>; --}; -- --&hdmi_phy { -- status = "okay"; -- -- vddio-supply = <&vreg_l12a_1p8>; -- vcca-supply = <&vreg_l28a_0p925>; -- #phy-cells = <0>; --}; -- --&hsusb_phy1 { -- status = "okay"; -- -- vdda-pll-supply = <&vreg_l12a_1p8>; -- vdda-phy-dpdm-supply = <&vreg_l24a_3p075>; --}; -- --&hsusb_phy2 { -- status = "okay"; -- -- vdda-pll-supply = <&vreg_l12a_1p8>; -- vdda-phy-dpdm-supply = <&vreg_l24a_3p075>; --}; -- --&mdp { -- status = "okay"; --}; -- --&mdss { -- status = "okay"; --}; -- --&mmcc { -- vdd-gfx-supply = <&vdd_gfx>; --}; -- --&pm8994_resin { -- status = "okay"; -- linux,code = ; --}; -- --&tlmm { -- gpio-line-names = -- "[SPI0_DOUT]", /* GPIO_0, BLSP1_SPI_MOSI, LSEC pin 14 */ -- "[SPI0_DIN]", /* GPIO_1, BLSP1_SPI_MISO, LSEC pin 10 */ -- "[SPI0_CS]", /* GPIO_2, BLSP1_SPI_CS_N, LSEC pin 12 */ -- "[SPI0_SCLK]", /* GPIO_3, BLSP1_SPI_CLK, LSEC pin 8 */ -- "[UART1_TxD]", /* GPIO_4, BLSP8_UART_TX, LSEC pin 11 */ -- "[UART1_RxD]", /* GPIO_5, BLSP8_UART_RX, LSEC pin 13 */ -- "[I2C1_SDA]", /* GPIO_6, BLSP8_I2C_SDA, LSEC pin 21 */ -- "[I2C1_SCL]", /* GPIO_7, BLSP8_I2C_SCL, LSEC pin 19 */ -- "GPIO-H", /* GPIO_8, LCD0_RESET_N, LSEC pin 30 */ -- "TP93", /* GPIO_9 */ -- "GPIO-G", /* GPIO_10, MDP_VSYNC_P, LSEC pin 29 */ -- "[MDP_VSYNC_S]", /* GPIO_11, S HSEC pin 55 */ -- "NC", /* GPIO_12 */ -- "[CSI0_MCLK]", /* GPIO_13, CAM_MCLK0, P HSEC pin 15 */ -- "[CAM_MCLK1]", /* GPIO_14, J14 pin 11 */ -- "[CSI1_MCLK]", /* GPIO_15, CAM_MCLK2, P HSEC pin 17 */ -- "TP99", /* GPIO_16 */ -- "[I2C2_SDA]", /* GPIO_17, CCI_I2C_SDA0, P HSEC pin 34 */ -- "[I2C2_SCL]", /* GPIO_18, CCI_I2C_SCL0, P HSEC pin 32 */ -- "[CCI_I2C_SDA1]", /* GPIO_19, S HSEC pin 38 */ -- "[CCI_I2C_SCL1]", /* GPIO_20, S HSEC pin 36 */ -- "FLASH_STROBE_EN", /* GPIO_21, S HSEC pin 5 */ -- "FLASH_STROBE_TRIG", /* GPIO_22, S HSEC pin 1 */ -- "GPIO-K", /* GPIO_23, CAM2_RST_N, LSEC pin 33 */ -- "GPIO-D", /* GPIO_24, LSEC pin 26 */ -- "GPIO-I", /* GPIO_25, CAM0_RST_N, LSEC pin 31 */ -- "GPIO-J", /* GPIO_26, CAM0_STANDBY_N, LSEC pin 32 */ -- "BLSP6_I2C_SDA", /* GPIO_27 */ -- "BLSP6_I2C_SCL", /* GPIO_28 */ -- "GPIO-B", /* GPIO_29, TS0_RESET_N, LSEC pin 24 */ -- "GPIO30", /* GPIO_30, S HSEC pin 4 */ -- "HDMI_CEC", /* GPIO_31 */ -- "HDMI_DDC_CLOCK", /* GPIO_32 */ -- "HDMI_DDC_DATA", /* GPIO_33 */ -- "HDMI_HOT_PLUG_DETECT", /* GPIO_34 */ -- "PCIE0_RST_N", /* GPIO_35 */ -- "PCIE0_CLKREQ_N", /* GPIO_36 */ -- "PCIE0_WAKE", /* GPIO_37 */ -- "SD_CARD_DET_N", /* GPIO_38 */ -- "TSIF1_SYNC", /* GPIO_39, S HSEC pin 48 */ -- "W_DISABLE_N", /* GPIO_40 */ -- "[BLSP9_UART_TX]", /* GPIO_41 */ -- "[BLSP9_UART_RX]", /* GPIO_42 */ -- "[BLSP2_UART_CTS_N]", /* GPIO_43 */ -- "[BLSP2_UART_RFR_N]", /* GPIO_44 */ -- "[BLSP3_UART_TX]", /* GPIO_45 */ -- "[BLSP3_UART_RX]", /* GPIO_46 */ -- "[I2C0_SDA]", /* GPIO_47, LS_I2C0_SDA, LSEC pin 17 */ -- "[I2C0_SCL]", /* GPIO_48, LS_I2C0_SCL, LSEC pin 15 */ -- "[UART0_TxD]", /* GPIO_49, BLSP9_UART_TX, LSEC pin 5 */ -- "[UART0_RxD]", /* GPIO_50, BLSP9_UART_RX, LSEC pin 7 */ -- "[UART0_CTS]", /* GPIO_51, BLSP9_UART_CTS_N, LSEC pin 3 */ -- "[UART0_RTS]", /* GPIO_52, BLSP9_UART_RFR_N, LSEC pin 9 */ -- "[CODEC_INT1_N]", /* GPIO_53 */ -- "[CODEC_INT2_N]", /* GPIO_54 */ -- "[BLSP7_I2C_SDA]", /* GPIO_55 */ -- "[BLSP7_I2C_SCL]", /* GPIO_56 */ -- "MI2S_MCLK", /* GPIO_57, S HSEC pin 3 */ -- "[PCM_CLK]", /* GPIO_58, QUA_MI2S_SCK, LSEC pin 18 */ -- "[PCM_FS]", /* GPIO_59, QUA_MI2S_WS, LSEC pin 16 */ -- "[PCM_DO]", /* GPIO_60, QUA_MI2S_DATA0, LSEC pin 20 */ -- "[PCM_DI]", /* GPIO_61, QUA_MI2S_DATA1, LSEC pin 22 */ -- "GPIO-E", /* GPIO_62, LSEC pin 27 */ -- "TP87", /* GPIO_63 */ -- "[CODEC_RST_N]", /* GPIO_64 */ -- "[PCM1_CLK]", /* GPIO_65 */ -- "[PCM1_SYNC]", /* GPIO_66 */ -- "[PCM1_DIN]", /* GPIO_67 */ -- "[PCM1_DOUT]", /* GPIO_68 */ -- "AUDIO_REF_CLK", /* GPIO_69 */ -- "SLIMBUS_CLK", /* GPIO_70 */ -- "SLIMBUS_DATA0", /* GPIO_71 */ -- "SLIMBUS_DATA1", /* GPIO_72 */ -- "NC", /* GPIO_73 */ -- "NC", /* GPIO_74 */ -- "NC", /* GPIO_75 */ -- "NC", /* GPIO_76 */ -- "TP94", /* GPIO_77 */ -- "NC", /* GPIO_78 */ -- "TP95", /* GPIO_79 */ -- "GPIO-A", /* GPIO_80, MEMS_RESET_N, LSEC pin 23 */ -- "TP88", /* GPIO_81 */ -- "TP89", /* GPIO_82 */ -- "TP90", /* GPIO_83 */ -- "TP91", /* GPIO_84 */ -- "[SD_DAT0]", /* GPIO_85, BLSP12_SPI_MOSI, P HSEC pin 1 */ -- "[SD_CMD]", /* GPIO_86, BLSP12_SPI_MISO, P HSEC pin 11 */ -- "[SD_DAT3]", /* GPIO_87, BLSP12_SPI_CS_N, P HSEC pin 7 */ -- "[SD_SCLK]", /* GPIO_88, BLSP12_SPI_CLK, P HSEC pin 9 */ -- "TSIF1_CLK", /* GPIO_89, S HSEC pin 42 */ -- "TSIF1_EN", /* GPIO_90, S HSEC pin 46 */ -- "TSIF1_DATA", /* GPIO_91, S HSEC pin 44 */ -- "NC", /* GPIO_92 */ -- "TSIF2_CLK", /* GPIO_93, S HSEC pin 52 */ -- "TSIF2_EN", /* GPIO_94, S HSEC pin 56 */ -- "TSIF2_DATA", /* GPIO_95, S HSEC pin 54 */ -- "TSIF2_SYNC", /* GPIO_96, S HSEC pin 58 */ -- "NC", /* GPIO_97 */ -- "CAM1_STANDBY_N", /* GPIO_98 */ -- "NC", /* GPIO_99 */ -- "NC", /* GPIO_100 */ -- "[LCD1_RESET_N]", /* GPIO_101, S HSEC pin 51 */ -- "BOOT_CONFIG1", /* GPIO_102 */ -- "USB_HUB_RESET", /* GPIO_103 */ -- "CAM1_RST_N", /* GPIO_104 */ -- "NC", /* GPIO_105 */ -- "NC", /* GPIO_106 */ -- "NC", /* GPIO_107 */ -- "NC", /* GPIO_108 */ -- "NC", /* GPIO_109 */ -- "NC", /* GPIO_110 */ -- "NC", /* GPIO_111 */ -- "NC", /* GPIO_112 */ -- "PMI8994_BUA", /* GPIO_113 */ -- "PCIE2_RST_N", /* GPIO_114 */ -- "PCIE2_CLKREQ_N", /* GPIO_115 */ -- "PCIE2_WAKE", /* GPIO_116 */ -- "SSC_IRQ_0", /* GPIO_117 */ -- "SSC_IRQ_1", /* GPIO_118 */ -- "SSC_IRQ_2", /* GPIO_119 */ -- "NC", /* GPIO_120 */ -- "GPIO121", /* GPIO_121, S HSEC pin 2 */ -- "NC", /* GPIO_122 */ -- "SSC_IRQ_6", /* GPIO_123 */ -- "SSC_IRQ_7", /* GPIO_124 */ -- "GPIO-C", /* GPIO_125, TS_INT0, LSEC pin 25 */ -- "BOOT_CONFIG5", /* GPIO_126 */ -- "NC", /* GPIO_127 */ -- "NC", /* GPIO_128 */ -- "BOOT_CONFIG7", /* GPIO_129 */ -- "PCIE1_RST_N", /* GPIO_130 */ -- "PCIE1_CLKREQ_N", /* GPIO_131 */ -- "PCIE1_WAKE", /* GPIO_132 */ -- "GPIO-L", /* GPIO_133, CAM2_STANDBY_N, LSEC pin 34 */ -- "NC", /* GPIO_134 */ -- "NC", /* GPIO_135 */ -- "BOOT_CONFIG8", /* GPIO_136 */ -- "NC", /* GPIO_137 */ -- "NC", /* GPIO_138 */ -- "GPS_SSBI2", /* GPIO_139 */ -- "GPS_SSBI1", /* GPIO_140 */ -- "NC", /* GPIO_141 */ -- "NC", /* GPIO_142 */ -- "NC", /* GPIO_143 */ -- "BOOT_CONFIG6", /* GPIO_144 */ -- "NC", /* GPIO_145 */ -- "NC", /* GPIO_146 */ -- "NC", /* GPIO_147 */ -- "NC", /* GPIO_148 */ -- "NC"; /* GPIO_149 */ -- -- sdc2_cd_on: sdc2_cd_on { -- mux { -- pins = "gpio38"; -- function = "gpio"; -- }; -- -- config { -- pins = "gpio38"; -- bias-pull-up; /* pull up */ -- drive-strength = <16>; /* 16 MA */ -- }; -- }; -- -- sdc2_cd_off: sdc2_cd_off { -- mux { -- pins = "gpio38"; -- function = "gpio"; -- }; -- -- config { -- pins = "gpio38"; -- bias-pull-up; /* pull up */ -- drive-strength = <2>; /* 2 MA */ -- }; -- }; -- -- blsp1_uart2_default: blsp1_uart2_default { -- mux { -- pins = "gpio41", "gpio42", "gpio43", "gpio44"; -- function = "blsp_uart2"; -- }; -- -- config { -- pins = "gpio41", "gpio42", "gpio43", "gpio44"; -- drive-strength = <16>; -- bias-disable; -- }; -- }; -- -- blsp1_uart2_sleep: blsp1_uart2_sleep { -- mux { -- pins = "gpio41", "gpio42", "gpio43", "gpio44"; -- function = "gpio"; -- }; -- -- config { -- pins = "gpio41", "gpio42", "gpio43", "gpio44"; -- drive-strength = <2>; -- bias-disable; -- }; -- }; -- -- hdmi_hpd_active: hdmi_hpd_active { -- mux { -- pins = "gpio34"; -- function = "hdmi_hot"; -- }; -- -- config { -- pins = "gpio34"; -- bias-pull-down; -- drive-strength = <16>; -- }; -- }; -- -- hdmi_hpd_suspend: hdmi_hpd_suspend { -- mux { -- pins = "gpio34"; -- function = "hdmi_hot"; -- }; -- -- config { -- pins = "gpio34"; -- bias-pull-down; -- drive-strength = <2>; -- }; -- }; -- -- hdmi_ddc_active: hdmi_ddc_active { -- mux { -- pins = "gpio32", "gpio33"; -- function = "hdmi_ddc"; -- }; -- -- config { -- pins = "gpio32", "gpio33"; -- drive-strength = <2>; -- bias-pull-up; -- }; -- }; -- -- hdmi_ddc_suspend: hdmi_ddc_suspend { -- mux { -- pins = "gpio32", "gpio33"; -- function = "hdmi_ddc"; -- }; -- -- config { -- pins = "gpio32", "gpio33"; -- drive-strength = <2>; -- bias-pull-down; -- }; -- }; --}; -- --&pcie0 { -- status = "okay"; -- perst-gpio = <&tlmm 35 GPIO_ACTIVE_LOW>; -- vddpe-3v3-supply = <&wlan_en>; -- vdda-supply = <&vreg_l28a_0p925>; --}; -- --&pcie1 { -- status = "okay"; -- perst-gpio = <&tlmm 130 GPIO_ACTIVE_LOW>; -- vdda-supply = <&vreg_l28a_0p925>; --}; -- --&pcie2 { -- status = "okay"; -- perst-gpio = <&tlmm 114 GPIO_ACTIVE_LOW>; -- vdda-supply = <&vreg_l28a_0p925>; --}; -- --&pcie_phy { -- status = "okay"; -- -- vdda-phy-supply = <&vreg_l28a_0p925>; -- vdda-pll-supply = <&vreg_l12a_1p8>; --}; -- --&pm8994_gpios { -- gpio-line-names = -- "NC", -- "KEY_VOLP_N", -- "NC", -- "BL1_PWM", -- "GPIO-F", /* BL0_PWM, LSEC pin 28 */ -- "BL1_EN", -- "NC", -- "WLAN_EN", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "NC", -- "DIVCLK1", -- "DIVCLK2", -- "DIVCLK3", -- "DIVCLK4", -- "BT_EN", -- "PMIC_SLB", -- "PMIC_BUA", -- "USB_VBUS_DET"; -- -- pinctrl-names = "default"; -- pinctrl-0 = <&ls_exp_gpio_f &bt_en_gpios>; -- -- ls_exp_gpio_f: pm8994_gpio5 { -- pinconf { -- pins = "gpio5"; -- output-low; -- power-source = <2>; // PM8994_GPIO_S4, 1.8V -- }; -- }; -- -- bt_en_gpios: bt_en_gpios { -- pinconf { -- pins = "gpio19"; -- function = PMIC_GPIO_FUNC_NORMAL; -- output-low; -- power-source = ; // 1.8V -- qcom,drive-strength = ; -- bias-pull-down; -- }; -- }; -- -- wlan_en_gpios: wlan_en_gpios { -- pinconf { -- pins = "gpio8"; -- function = PMIC_GPIO_FUNC_NORMAL; -- output-low; -- power-source = ; // 1.8V -- qcom,drive-strength = ; -- bias-pull-down; -- }; -- }; -- -- audio_mclk: clk_div1 { -- pinconf { -- pins = "gpio15"; -- function = "func1"; -- power-source = ; // 1.8V -- }; -- }; -- -- volume_up_gpio: pm8996_gpio2 { -- pinconf { -- pins = "gpio2"; -- function = "normal"; -- input-enable; -- drive-push-pull; -- bias-pull-up; -- qcom,drive-strength = ; -- power-source = ; // 1.8V -- }; -- }; -- -- divclk4_pin_a: divclk4 { -- pinconf { -- pins = "gpio18"; -- function = PMIC_GPIO_FUNC_FUNC2; -- -- bias-disable; -- power-source = ; -- }; -- }; -- -- usb3_vbus_det_gpio: pm8996_gpio22 { -- pinconf { -- pins = "gpio22"; -- function = PMIC_GPIO_FUNC_NORMAL; -- input-enable; -- bias-pull-down; -- qcom,drive-strength = ; -- power-source = ; // 1.8V -- }; -- }; --}; -- --&pm8994_mpps { -- gpio-line-names = -- "VDDPX_BIAS", -- "WIFI_LED", -- "NC", -- "BT_LED", -- "PM_MPP05", -- "PM_MPP06", -- "PM_MPP07", -- "NC"; --}; -- --&pm8994_spmi_regulators { -- qcom,saw-reg = <&saw3>; -- s9 { -- qcom,saw-slave; -- }; -- s10 { -- qcom,saw-slave; -- }; -- s11 { -- qcom,saw-leader; -- regulator-always-on; -- regulator-min-microvolt = <980000>; -- regulator-max-microvolt = <980000>; -- }; --}; -- --&pmi8994_gpios { -- gpio-line-names = -- "NC", -- "SPKR_AMP_EN1", -- "SPKR_AMP_EN2", -- "TP61", -- "NC", -- "USB2_VBUS_DET", -- "NC", -- "NC", -- "NC", -- "NC"; -- -- usb2_vbus_det_gpio: pmi8996_gpio6 { -- pinconf { -- pins = "gpio6"; -- function = PMIC_GPIO_FUNC_NORMAL; -- input-enable; -- bias-pull-down; -- qcom,drive-strength = ; -- power-source = ; // 1.8V -- }; -- }; --}; -- --&pmi8994_spmi_regulators { -- vdd_gfx: s2@1700 { -- reg = <0x1700 0x100>; -- regulator-name = "VDD_GFX"; -- regulator-min-microvolt = <980000>; -- regulator-max-microvolt = <980000>; -- }; --}; -- --&rpm_requests { -- pm8994-regulators { -- compatible = "qcom,rpm-pm8994-regulators"; -- -- vdd_s1-supply = <&vph_pwr>; -- vdd_s2-supply = <&vph_pwr>; -- vdd_s3-supply = <&vph_pwr>; -- vdd_s4-supply = <&vph_pwr>; -- vdd_s5-supply = <&vph_pwr>; -- vdd_s6-supply = <&vph_pwr>; -- vdd_s7-supply = <&vph_pwr>; -- vdd_s8-supply = <&vph_pwr>; -- vdd_s9-supply = <&vph_pwr>; -- vdd_s10-supply = <&vph_pwr>; -- vdd_s11-supply = <&vph_pwr>; -- vdd_s12-supply = <&vph_pwr>; -- vdd_l1-supply = <&vreg_s1b_1p025>; -- vdd_l2_l26_l28-supply = <&vreg_s3a_1p3>; -- vdd_l3_l11-supply = <&vreg_s3a_1p3>; -- vdd_l4_l27_l31-supply = <&vreg_s3a_1p3>; -- vdd_l5_l7-supply = <&vreg_s5a_2p15>; -- vdd_l6_l12_l32-supply = <&vreg_s5a_2p15>; -- vdd_l8_l16_l30-supply = <&vph_pwr>; -- vdd_l9_l10_l18_l22-supply = <&vph_pwr_bbyp>; -- vdd_l13_l19_l23_l24-supply = <&vph_pwr_bbyp>; -- vdd_l14_l15-supply = <&vreg_s5a_2p15>; -- vdd_l17_l29-supply = <&vph_pwr_bbyp>; -- vdd_l20_l21-supply = <&vph_pwr_bbyp>; -- vdd_l25-supply = <&vreg_s3a_1p3>; -- vdd_lvs1_lvs2-supply = <&vreg_s4a_1p8>; -- -- vreg_s3a_1p3: s3 { -- regulator-name = "vreg_s3a_1p3"; -- regulator-min-microvolt = <1300000>; -- regulator-max-microvolt = <1300000>; -- }; -- -- /** -- * 1.8v required on LS expansion -- * for mezzanine boards -- */ -- vreg_s4a_1p8: s4 { -- regulator-name = "vreg_s4a_1p8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- regulator-always-on; -- }; -- vreg_s5a_2p15: s5 { -- regulator-name = "vreg_s5a_2p15"; -- regulator-min-microvolt = <2150000>; -- regulator-max-microvolt = <2150000>; -- }; -- vreg_s7a_1p0: s7 { -- regulator-name = "vreg_s7a_1p0"; -- regulator-min-microvolt = <800000>; -- regulator-max-microvolt = <800000>; -- }; -- -- vreg_l1a_1p0: l1 { -- regulator-name = "vreg_l1a_1p0"; -- regulator-min-microvolt = <1000000>; -- regulator-max-microvolt = <1000000>; -- }; -- vreg_l2a_1p25: l2 { -- regulator-name = "vreg_l2a_1p25"; -- regulator-min-microvolt = <1250000>; -- regulator-max-microvolt = <1250000>; -- }; -- vreg_l3a_0p875: l3 { -- regulator-name = "vreg_l3a_0p875"; -- regulator-min-microvolt = <850000>; -- regulator-max-microvolt = <850000>; -- }; -- vreg_l4a_1p225: l4 { -- regulator-name = "vreg_l4a_1p225"; -- regulator-min-microvolt = <1225000>; -- regulator-max-microvolt = <1225000>; -- }; -- vreg_l6a_1p2: l6 { -- regulator-name = "vreg_l6a_1p2"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <1200000>; -- }; -- vreg_l8a_1p8: l8 { -- regulator-name = "vreg_l8a_1p8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- }; -- vreg_l9a_1p8: l9 { -- regulator-name = "vreg_l9a_1p8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- }; -- vreg_l10a_1p8: l10 { -- regulator-name = "vreg_l10a_1p8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- }; -- vreg_l11a_1p15: l11 { -- regulator-name = "vreg_l11a_1p15"; -- regulator-min-microvolt = <1150000>; -- regulator-max-microvolt = <1150000>; -- }; -- vreg_l12a_1p8: l12 { -- regulator-name = "vreg_l12a_1p8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- }; -- vreg_l13a_2p95: l13 { -- regulator-name = "vreg_l13a_2p95"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <2950000>; -- }; -- vreg_l14a_1p8: l14 { -- regulator-name = "vreg_l14a_1p8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- }; -- vreg_l15a_1p8: l15 { -- regulator-name = "vreg_l15a_1p8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- }; -- vreg_l16a_2p7: l16 { -- regulator-name = "vreg_l16a_2p7"; -- regulator-min-microvolt = <2700000>; -- regulator-max-microvolt = <2700000>; -- }; -- vreg_l17a_2p8: l17 { -- regulator-name = "vreg_l17a_2p8"; -- regulator-min-microvolt = <2500000>; -- regulator-max-microvolt = <2500000>; -- }; -- vreg_l18a_2p85: l18 { -- regulator-name = "vreg_l18a_2p85"; -- regulator-min-microvolt = <2700000>; -- regulator-max-microvolt = <2900000>; -- }; -- vreg_l19a_2p8: l19 { -- regulator-name = "vreg_l19a_2p8"; -- regulator-min-microvolt = <3000000>; -- regulator-max-microvolt = <3000000>; -- }; -- vreg_l20a_2p95: l20 { -- regulator-name = "vreg_l20a_2p95"; -- regulator-min-microvolt = <2950000>; -- regulator-max-microvolt = <2950000>; -- regulator-allow-set-load; -- }; -- vreg_l21a_2p95: l21 { -- regulator-name = "vreg_l21a_2p95"; -- regulator-min-microvolt = <2950000>; -- regulator-max-microvolt = <2950000>; -- regulator-allow-set-load; -- regulator-system-load = <200000>; -- }; -- vreg_l22a_3p0: l22 { -- regulator-name = "vreg_l22a_3p0"; -- regulator-min-microvolt = <3300000>; -- regulator-max-microvolt = <3300000>; -- }; -- vreg_l23a_2p8: l23 { -- regulator-name = "vreg_l23a_2p8"; -- regulator-min-microvolt = <2800000>; -- regulator-max-microvolt = <2800000>; -- }; -- vreg_l24a_3p075: l24 { -- regulator-name = "vreg_l24a_3p075"; -- regulator-min-microvolt = <3075000>; -- regulator-max-microvolt = <3075000>; -- }; -- vreg_l25a_1p2: l25 { -- regulator-name = "vreg_l25a_1p2"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <1200000>; -- regulator-allow-set-load; -- }; -- vreg_l26a_0p8: l27 { -- regulator-name = "vreg_l26a_0p8"; -- regulator-min-microvolt = <1000000>; -- regulator-max-microvolt = <1000000>; -- }; -- vreg_l28a_0p925: l28 { -- regulator-name = "vreg_l28a_0p925"; -- regulator-min-microvolt = <925000>; -- regulator-max-microvolt = <925000>; -- regulator-allow-set-load; -- }; -- vreg_l29a_2p8: l29 { -- regulator-name = "vreg_l29a_2p8"; -- regulator-min-microvolt = <2800000>; -- regulator-max-microvolt = <2800000>; -- }; -- vreg_l30a_1p8: l30 { -- regulator-name = "vreg_l30a_1p8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- }; -- vreg_l32a_1p8: l32 { -- regulator-name = "vreg_l32a_1p8"; -- regulator-min-microvolt = <1800000>; -- regulator-max-microvolt = <1800000>; -- }; -- -- vreg_lvs1a_1p8: lvs1 { -- regulator-name = "vreg_lvs1a_1p8"; -- }; -- -- vreg_lvs2a_1p8: lvs2 { -- regulator-name = "vreg_lvs2a_1p8"; -- }; -- }; -- -- pmi8994-regulators { -- compatible = "qcom,rpm-pmi8994-regulators"; -- -- vdd_s1-supply = <&vph_pwr>; -- vdd_s2-supply = <&vph_pwr>; -- vdd_s3-supply = <&vph_pwr>; -- vdd_bst_byp-supply = <&vph_pwr>; -- -- vph_pwr_bbyp: boost-bypass { -- regulator-name = "vph_pwr_bbyp"; -- regulator-min-microvolt = <3300000>; -- regulator-max-microvolt = <3300000>; -- }; -- -- vreg_s1b_1p025: s1 { -- regulator-name = "vreg_s1b_1p025"; -- regulator-min-microvolt = <1025000>; -- regulator-max-microvolt = <1025000>; -- }; -- }; --}; -- --&sdhc2 { -- /* External SD card */ -- pinctrl-names = "default", "sleep"; -- pinctrl-0 = <&sdc2_state_on &sdc2_cd_on>; -- pinctrl-1 = <&sdc2_state_off &sdc2_cd_off>; -- cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>; -- vmmc-supply = <&vreg_l21a_2p95>; -- vqmmc-supply = <&vreg_l13a_2p95>; -- status = "okay"; --}; -- --&q6asmdai { -- dai@0 { -- reg = <0>; -- }; -- -- dai@1 { -- reg = <1>; -- }; -- -- dai@2 { -- reg = <2>; -- }; --}; -- --&sound { -- compatible = "qcom,apq8096-sndcard"; -- model = "DB820c"; -- audio-routing = "RX_BIAS", "MCLK", -- "MM_DL1", "MultiMedia1 Playback", -- "MM_DL2", "MultiMedia2 Playback", -- "MultiMedia3 Capture", "MM_UL3"; -- -- mm1-dai-link { -- link-name = "MultiMedia1"; -- cpu { -- sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>; -- }; -- }; -- -- mm2-dai-link { -- link-name = "MultiMedia2"; -- cpu { -- sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>; -- }; -- }; -- -- mm3-dai-link { -- link-name = "MultiMedia3"; -- cpu { -- sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>; -- }; -- }; -- -- hdmi-dai-link { -- link-name = "HDMI"; -- cpu { -- sound-dai = <&q6afedai HDMI_RX>; -- }; -- -- platform { -- sound-dai = <&q6routing>; -- }; -- -- codec { -- sound-dai = <&hdmi 0>; -- }; -- }; -- -- slim-dai-link { -- link-name = "SLIM Playback"; -- cpu { -- sound-dai = <&q6afedai SLIMBUS_6_RX>; -- }; -- -- platform { -- sound-dai = <&q6routing>; -- }; -- -- codec { -- sound-dai = <&wcd9335 6>; -- }; -- }; -- -- slimcap-dai-link { -- link-name = "SLIM Capture"; -- cpu { -- sound-dai = <&q6afedai SLIMBUS_0_TX>; -- }; -- -- platform { -- sound-dai = <&q6routing>; -- }; -- -- codec { -- sound-dai = <&wcd9335 1>; -- }; -- }; --}; -- --&ufsphy { -- status = "okay"; -- -- vdda-phy-supply = <&vreg_l28a_0p925>; -- vdda-pll-supply = <&vreg_l12a_1p8>; -- vddp-ref-clk-supply = <&vreg_l25a_1p2>; --}; -- --&ufshc { -- status = "okay"; -- -- vcc-supply = <&vreg_l20a_2p95>; -- vccq-supply = <&vreg_l25a_1p2>; -- vccq2-supply = <&vreg_s4a_1p8>; -- -- vcc-max-microamp = <600000>; -- vccq-max-microamp = <450000>; -- vccq2-max-microamp = <450000>; --}; -- --&usb2 { -- status = "okay"; -- extcon = <&usb2_id>; -- -- dwc3@7600000 { -- extcon = <&usb2_id>; -- dr_mode = "otg"; -- maximum-speed = "high-speed"; -- }; --}; -- --&usb3 { -- status = "okay"; -- extcon = <&usb3_id>; -- -- dwc3@6a00000 { -- extcon = <&usb3_id>; -- dr_mode = "otg"; -- }; --}; -- --&usb3phy { -- status = "okay"; -- -- vdda-phy-supply = <&vreg_l28a_0p925>; -- vdda-pll-supply = <&vreg_l12a_1p8>; -- --}; -- --&venus { -- status = "okay"; --}; -- --&wcd9335 { -- clock-names = "mclk", "slimbus"; -- clocks = <&div1_mclk>, -- <&rpmcc RPM_SMD_BB_CLK1>; -- -- vdd-buck-supply = <&vreg_s4a_1p8>; -- vdd-buck-sido-supply = <&vreg_s4a_1p8>; -- vdd-tx-supply = <&vreg_s4a_1p8>; -- vdd-rx-supply = <&vreg_s4a_1p8>; -- vdd-io-supply = <&vreg_s4a_1p8>; --}; -diff --git a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts -index a57c60070cdc2..f0a98ab1616ab 100644 ---- a/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts -+++ b/arch/arm64/boot/dts/qcom/apq8096-ifc6640.dts -@@ -26,7 +26,7 @@ - - v1p05: v1p05-regulator { - compatible = "regulator-fixed"; -- reglator-name = "v1p05"; -+ regulator-name = "v1p05"; - regulator-always-on; - regulator-boot-on; - -@@ -38,7 +38,7 @@ - - v12_poe: v12-poe-regulator { - compatible = "regulator-fixed"; -- reglator-name = "v12_poe"; -+ regulator-name = "v12_poe"; - regulator-always-on; - regulator-boot-on; - -diff --git a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts -index 5aec183087128..5310259d03dc5 100644 ---- a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts -+++ b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts -@@ -37,6 +37,8 @@ - - &spi_0 { - cs-select = <0>; -+ pinctrl-0 = <&spi_0_pins>; -+ pinctrl-names = "default"; - status = "okay"; - - m25p80@0 { -diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi -index d2fe58e0eb7aa..3ca198f866c3c 100644 ---- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi -+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi -@@ -200,7 +200,7 @@ - clock-names = "bam_clk"; - #dma-cells = <1>; - qcom,ee = <1>; -- qcom,controlled-remotely = <1>; -+ qcom,controlled-remotely; - qcom,config-pipe-trust-reg = <0>; - }; - -@@ -221,7 +221,7 @@ - interrupts = ; - gpio-controller; - #gpio-cells = <2>; -- gpio-ranges = <&tlmm 0 80>; -+ gpio-ranges = <&tlmm 0 0 80>; - interrupt-controller; - #interrupt-cells = <2>; - -@@ -401,7 +401,7 @@ - reset-names = "phy", - "common"; - -- pcie_phy0: lane@84200 { -+ pcie_phy0: phy@84200 { - reg = <0x0 0x84200 0x0 0x16c>, /* Serdes Tx */ - <0x0 0x84400 0x0 0x200>, /* Serdes Rx */ - <0x0 0x84800 0x0 0x4f4>; /* PCS: Lane0, COM, PCIE */ -@@ -433,10 +433,8 @@ - phys = <&pcie_phy0>; - phy-names = "pciephy"; - -- ranges = <0x81000000 0 0x20200000 0 0x20200000 -- 0 0x10000>, /* downstream I/O */ -- <0x82000000 0 0x20220000 0 0x20220000 -- 0 0xfde0000>; /* non-prefetchable memory */ -+ ranges = <0x81000000 0x0 0x00000000 0x0 0x20200000 0x0 0x10000>, -+ <0x82000000 0x0 0x20220000 0x0 0x20220000 0x0 0xfde0000>; - - interrupts = ; - interrupt-names = "msi"; -diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts -index cc08dc4eb56a5..68698cdf56c46 100644 ---- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts -+++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts -@@ -60,11 +60,11 @@ - perst-gpio = <&tlmm 58 0x1>; - }; - --&pcie_phy0 { -+&pcie_qmp0 { - status = "okay"; - }; - --&pcie_phy1 { -+&pcie_qmp1 { - status = "okay"; - }; - -diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi -index db333001df4d6..17eeff106bab7 100644 ---- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi -+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi -@@ -13,7 +13,7 @@ - clocks { - sleep_clk: sleep_clk { - compatible = "fixed-clock"; -- clock-frequency = <32000>; -+ clock-frequency = <32768>; - #clock-cells = <0>; - }; - -@@ -106,7 +106,7 @@ - reset-names = "phy","common"; - status = "disabled"; - -- usb1_ssphy: lane@58200 { -+ usb1_ssphy: phy@58200 { - reg = <0x00058200 0x130>, /* Tx */ - <0x00058400 0x200>, /* Rx */ - <0x00058800 0x1f8>, /* PCS */ -@@ -114,7 +114,7 @@ - #phy-cells = <0>; - clocks = <&gcc GCC_USB1_PIPE_CLK>; - clock-names = "pipe0"; -- clock-output-names = "gcc_usb1_pipe_clk_src"; -+ clock-output-names = "usb3phy_1_cc_pipe_clk"; - }; - }; - -@@ -149,7 +149,7 @@ - reset-names = "phy","common"; - status = "disabled"; - -- usb0_ssphy: lane@78200 { -+ usb0_ssphy: phy@78200 { - reg = <0x00078200 0x130>, /* Tx */ - <0x00078400 0x200>, /* Rx */ - <0x00078800 0x1f8>, /* PCS */ -@@ -157,7 +157,7 @@ - #phy-cells = <0>; - clocks = <&gcc GCC_USB0_PIPE_CLK>; - clock-names = "pipe0"; -- clock-output-names = "gcc_usb0_pipe_clk_src"; -+ clock-output-names = "usb3phy_0_cc_pipe_clk"; - }; - }; - -@@ -174,34 +174,61 @@ - status = "disabled"; - }; - -- pcie_phy0: phy@86000 { -- compatible = "qcom,ipq8074-qmp-pcie-phy"; -- reg = <0x00086000 0x1000>; -- #phy-cells = <0>; -- clocks = <&gcc GCC_PCIE0_PIPE_CLK>; -- clock-names = "pipe_clk"; -- clock-output-names = "pcie20_phy0_pipe_clk"; -+ pcie_qmp0: phy@84000 { -+ compatible = "qcom,ipq8074-qmp-gen3-pcie-phy"; -+ reg = <0x00084000 0x1bc>; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges; - -+ clocks = <&gcc GCC_PCIE0_AUX_CLK>, -+ <&gcc GCC_PCIE0_AHB_CLK>; -+ clock-names = "aux", "cfg_ahb"; - resets = <&gcc GCC_PCIE0_PHY_BCR>, - <&gcc GCC_PCIE0PHY_PHY_BCR>; - reset-names = "phy", - "common"; - status = "disabled"; -+ -+ pcie_phy0: phy@84200 { -+ reg = <0x84200 0x16c>, -+ <0x84400 0x200>, -+ <0x84800 0x1f0>, -+ <0x84c00 0xf4>; -+ #phy-cells = <0>; -+ #clock-cells = <0>; -+ clocks = <&gcc GCC_PCIE0_PIPE_CLK>; -+ clock-names = "pipe0"; -+ clock-output-names = "pcie20_phy0_pipe_clk"; -+ }; - }; - -- pcie_phy1: phy@8e000 { -+ pcie_qmp1: phy@8e000 { - compatible = "qcom,ipq8074-qmp-pcie-phy"; -- reg = <0x0008e000 0x1000>; -- #phy-cells = <0>; -- clocks = <&gcc GCC_PCIE1_PIPE_CLK>; -- clock-names = "pipe_clk"; -- clock-output-names = "pcie20_phy1_pipe_clk"; -+ reg = <0x0008e000 0x1c4>; -+ #address-cells = <1>; -+ #size-cells = <1>; -+ ranges; - -+ clocks = <&gcc GCC_PCIE1_AUX_CLK>, -+ <&gcc GCC_PCIE1_AHB_CLK>; -+ clock-names = "aux", "cfg_ahb"; - resets = <&gcc GCC_PCIE1_PHY_BCR>, - <&gcc GCC_PCIE1PHY_PHY_BCR>; - reset-names = "phy", - "common"; - status = "disabled"; -+ -+ pcie_phy1: phy@8e200 { -+ reg = <0x8e200 0x130>, -+ <0x8e400 0x200>, -+ <0x8e800 0x1f8>; -+ #phy-cells = <0>; -+ #clock-cells = <0>; -+ clocks = <&gcc GCC_PCIE1_PIPE_CLK>; -+ clock-names = "pipe0"; -+ clock-output-names = "pcie20_phy1_pipe_clk"; -+ }; - }; - - prng: rng@e3000 { -@@ -220,7 +247,7 @@ - clock-names = "bam_clk"; - #dma-cells = <1>; - qcom,ee = <1>; -- qcom,controlled-remotely = <1>; -+ qcom,controlled-remotely; - status = "disabled"; - }; - -@@ -437,7 +464,7 @@ - status = "disabled"; - }; - -- qpic_nand: nand@79b0000 { -+ qpic_nand: nand-controller@79b0000 { - compatible = "qcom,ipq8074-nand"; - reg = <0x079b0000 0x10000>; - #address-cells = <1>; -@@ -634,10 +661,8 @@ - phys = <&pcie_phy1>; - phy-names = "pciephy"; - -- ranges = <0x81000000 0 0x10200000 0x10200000 -- 0 0x100000 /* downstream I/O */ -- 0x82000000 0 0x10300000 0x10300000 -- 0 0xd00000>; /* non-prefetchable memory */ -+ ranges = <0x81000000 0x0 0x00000000 0x10200000 0x0 0x10000>, /* I/O */ -+ <0x82000000 0x0 0x10220000 0x10220000 0x0 0xfde0000>; /* MEM */ - - interrupts = ; - interrupt-names = "msi"; -@@ -680,26 +705,26 @@ - }; - - pcie0: pci@20000000 { -- compatible = "qcom,pcie-ipq8074"; -+ compatible = "qcom,pcie-ipq8074-gen3"; - reg = <0x20000000 0xf1d>, - <0x20000f20 0xa8>, -- <0x00080000 0x2000>, -+ <0x20001000 0x1000>, -+ <0x00080000 0x4000>, - <0x20100000 0x1000>; -- reg-names = "dbi", "elbi", "parf", "config"; -+ reg-names = "dbi", "elbi", "atu", "parf", "config"; - device_type = "pci"; - linux,pci-domain = <0>; - bus-range = <0x00 0xff>; - num-lanes = <1>; -+ max-link-speed = <3>; - #address-cells = <3>; - #size-cells = <2>; - - phys = <&pcie_phy0>; - phy-names = "pciephy"; - -- ranges = <0x81000000 0 0x20200000 0x20200000 -- 0 0x100000 /* downstream I/O */ -- 0x82000000 0 0x20300000 0x20300000 -- 0 0xd00000>; /* non-prefetchable memory */ -+ ranges = <0x81000000 0x0 0x00000000 0x20200000 0x0 0x10000>, /* I/O */ -+ <0x82000000 0x0 0x20220000 0x20220000 0x0 0xfde0000>; /* MEM */ - - interrupts = ; - interrupt-names = "msi"; -@@ -717,28 +742,30 @@ - clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>, - <&gcc GCC_PCIE0_AXI_M_CLK>, - <&gcc GCC_PCIE0_AXI_S_CLK>, -- <&gcc GCC_PCIE0_AHB_CLK>, -- <&gcc GCC_PCIE0_AUX_CLK>; -- -+ <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>, -+ <&gcc GCC_PCIE0_RCHNG_CLK>; - clock-names = "iface", - "axi_m", - "axi_s", -- "ahb", -- "aux"; -+ "axi_bridge", -+ "rchng"; -+ - resets = <&gcc GCC_PCIE0_PIPE_ARES>, - <&gcc GCC_PCIE0_SLEEP_ARES>, - <&gcc GCC_PCIE0_CORE_STICKY_ARES>, - <&gcc GCC_PCIE0_AXI_MASTER_ARES>, - <&gcc GCC_PCIE0_AXI_SLAVE_ARES>, - <&gcc GCC_PCIE0_AHB_ARES>, -- <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>; -+ <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>, -+ <&gcc GCC_PCIE0_AXI_SLAVE_STICKY_ARES>; - reset-names = "pipe", - "sleep", - "sticky", - "axi_m", - "axi_s", - "ahb", -- "axi_m_sticky"; -+ "axi_m_sticky", -+ "axi_s_sticky"; - status = "disabled"; - }; - }; -diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts -index d66c155387850..7c0ceb3cff45e 100644 ---- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts -+++ b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts -@@ -5,9 +5,22 @@ - - /dts-v1/; - --#include "msm8916-mtp.dtsi" -+#include "msm8916-pm8916.dtsi" - - / { - model = "Qualcomm Technologies, Inc. MSM 8916 MTP"; - compatible = "qcom,msm8916-mtp", "qcom,msm8916-mtp/1", "qcom,msm8916"; -+ -+ aliases { -+ serial0 = &blsp1_uart2; -+ usid0 = &pm8916_0; -+ }; -+ -+ chosen { -+ stdout-path = "serial0"; -+ }; -+}; -+ -+&blsp1_uart2 { -+ status = "okay"; - }; -diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi -deleted file mode 100644 -index 1bd05046cdeba..0000000000000 ---- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi -+++ /dev/null -@@ -1,21 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-only --/* -- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. -- */ -- --#include "msm8916-pm8916.dtsi" -- --/ { -- aliases { -- serial0 = &blsp1_uart2; -- usid0 = &pm8916_0; -- }; -- -- chosen { -- stdout-path = "serial0"; -- }; --}; -- --&blsp1_uart2 { -- status = "okay"; --}; -diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi -index 3f85e34a8ce6f..fcc9f757c9e14 100644 ---- a/arch/arm64/boot/dts/qcom/msm8916.dtsi -+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi -@@ -19,8 +19,8 @@ - #size-cells = <2>; - - aliases { -- sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ -- sdhc2 = &sdhc_2; /* SDC2 SD card slot */ -+ mmc0 = &sdhc_1; /* SDC1 eMMC slot */ -+ mmc1 = &sdhc_2; /* SDC2 SD card slot */ - }; - - chosen { }; -@@ -445,7 +445,7 @@ - }; - }; - -- rpm_msg_ram: memory@60000 { -+ rpm_msg_ram: sram@60000 { - compatible = "qcom,rpm-msg-ram"; - reg = <0x00060000 0x8000>; - }; -@@ -1064,7 +1064,7 @@ - }; - }; - -- camss: camss@1b00000 { -+ camss: camss@1b0ac00 { - compatible = "qcom,msm8916-camss"; - reg = <0x01b0ac00 0x200>, - <0x01b00030 0x4>, -@@ -1307,7 +1307,7 @@ - }; - - mpss: remoteproc@4080000 { -- compatible = "qcom,msm8916-mss-pil", "qcom,q6v5-pil"; -+ compatible = "qcom,msm8916-mss-pil"; - reg = <0x04080000 0x100>, - <0x04020000 0x040>; - -@@ -1384,11 +1384,17 @@ - lpass: audio-controller@7708000 { - status = "disabled"; - compatible = "qcom,lpass-cpu-apq8016"; -+ -+ /* -+ * Note: Unlike the name would suggest, the SEC_I2S_CLK -+ * is actually only used by Tertiary MI2S while -+ * Primary/Secondary MI2S both use the PRI_I2S_CLK. -+ */ - clocks = <&gcc GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK>, - <&gcc GCC_ULTAUDIO_PCNOC_MPORT_CLK>, - <&gcc GCC_ULTAUDIO_PCNOC_SWAY_CLK>, - <&gcc GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK>, -- <&gcc GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK>, -+ <&gcc GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK>, - <&gcc GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK>, - <&gcc GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK>; - -@@ -1741,8 +1747,8 @@ - <&rpmpd MSM8916_VDDMX>; - power-domain-names = "cx", "mx"; - -- qcom,state = <&wcnss_smp2p_out 0>; -- qcom,state-names = "stop"; -+ qcom,smem-states = <&wcnss_smp2p_out 0>; -+ qcom,smem-state-names = "stop"; - - pinctrl-names = "default"; - pinctrl-0 = <&wcnss_pin_a>; -@@ -1765,7 +1771,7 @@ - - label = "pronto"; - -- wcnss { -+ wcnss_ctrl: wcnss { - compatible = "qcom,wcnss"; - qcom,smd-channels = "WCNSS_CTRL"; - -diff --git a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts -index 1ccca83292ac9..60fcb024c8879 100644 ---- a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts -+++ b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts -@@ -1,7 +1,8 @@ - // SPDX-License-Identifier: GPL-2.0-only - /* Copyright (c) 2015, LGE Inc. All rights reserved. - * Copyright (c) 2016, The Linux Foundation. All rights reserved. -- * Copyright (c) 2021, Petr Vorel -+ * Copyright (c) 2021-2022, Petr Vorel -+ * Copyright (c) 2022, Dominik Kobinski - */ - - /dts-v1/; -@@ -13,6 +14,9 @@ - /* cont_splash_mem has different memory mapping */ - /delete-node/ &cont_splash_mem; - -+/* disabled on downstream, conflicts with cont_splash_mem */ -+/delete-node/ &dfps_data_mem; -+ - / { - model = "LG Nexus 5X"; - compatible = "lg,bullhead", "qcom,msm8992"; -@@ -47,7 +51,17 @@ - }; - - cont_splash_mem: memory@3400000 { -- reg = <0 0x03400000 0 0x1200000>; -+ reg = <0 0x03400000 0 0xc00000>; -+ no-map; -+ }; -+ -+ reserved@5000000 { -+ reg = <0x0 0x05000000 0x0 0x1a00000>; -+ no-map; -+ }; -+ -+ reserved@6c00000 { -+ reg = <0x0 0x06c00000 0x0 0x400000>; - no-map; - }; - }; -@@ -74,7 +88,7 @@ - vdd_l17_29-supply = <&vph_pwr>; - vdd_l20_21-supply = <&vph_pwr>; - vdd_l25-supply = <&pm8994_s5>; -- vdd_lvs1_2 = <&pm8994_s4>; -+ vdd_lvs1_2-supply = <&pm8994_s4>; - - /* S1, S2, S6 and S12 are managed by RPMPD */ - -diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts -index 357d55496e750..d08659c606b9a 100644 ---- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts -+++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts -@@ -11,6 +11,12 @@ - #include - #include - -+/delete-node/ &adsp_mem; -+/delete-node/ &audio_mem; -+/delete-node/ &mpss_mem; -+/delete-node/ &peripheral_region; -+/delete-node/ &rmtfs_mem; -+ - / { - model = "Xiaomi Mi 4C"; - compatible = "xiaomi,libra", "qcom,msm8992"; -@@ -60,24 +66,66 @@ - #size-cells = <2>; - ranges; - -- /* This is for getting crash logs using Android downstream kernels */ -- ramoops@dfc00000 { -- compatible = "ramoops"; -- reg = <0x0 0xdfc00000 0x0 0x40000>; -- console-size = <0x10000>; -- record-size = <0x10000>; -- ftrace-size = <0x10000>; -- pmsg-size = <0x20000>; -+ memory_hole: hole@6400000 { -+ reg = <0 0x06400000 0 0x600000>; -+ no-map; -+ }; -+ -+ memory_hole2: hole2@6c00000 { -+ reg = <0 0x06c00000 0 0x2400000>; -+ no-map; -+ }; -+ -+ mpss_mem: mpss@9000000 { -+ reg = <0 0x09000000 0 0x5a00000>; -+ no-map; -+ }; -+ -+ tzapp: tzapp@ea00000 { -+ reg = <0 0x0ea00000 0 0x1900000>; -+ no-map; - }; - -- modem_region: modem_region@9000000 { -- reg = <0x0 0x9000000 0x0 0x5a00000>; -+ mdm_rfsa_mem: mdm-rfsa@ca0b0000 { -+ reg = <0 0xca0b0000 0 0x10000>; - no-map; - }; - -- tzapp: modem_region@ea00000 { -- reg = <0x0 0xea00000 0x0 0x1900000>; -+ rmtfs_mem: rmtfs@ca100000 { -+ compatible = "qcom,rmtfs-mem"; -+ reg = <0 0xca100000 0 0x180000>; - no-map; -+ -+ qcom,client-id = <1>; -+ }; -+ -+ audio_mem: audio@cb400000 { -+ reg = <0 0xcb000000 0 0x400000>; -+ no-mem; -+ }; -+ -+ qseecom_mem: qseecom@cb400000 { -+ reg = <0 0xcb400000 0 0x1c00000>; -+ no-mem; -+ }; -+ -+ adsp_rfsa_mem: adsp-rfsa@cd000000 { -+ reg = <0 0xcd000000 0 0x10000>; -+ no-map; -+ }; -+ -+ sensor_rfsa_mem: sensor-rfsa@cd010000 { -+ reg = <0 0xcd010000 0 0x10000>; -+ no-map; -+ }; -+ -+ ramoops@dfc00000 { -+ compatible = "ramoops"; -+ reg = <0 0xdfc00000 0 0x40000>; -+ console-size = <0x10000>; -+ record-size = <0x10000>; -+ ftrace-size = <0x10000>; -+ pmsg-size = <0x20000>; - }; - }; - }; -@@ -120,9 +168,21 @@ - status = "okay"; - }; - --&peripheral_region { -- reg = <0x0 0x7400000 0x0 0x1c00000>; -- no-map; -+&pm8994_spmi_regulators { -+ VDD_APC0: s8 { -+ regulator-min-microvolt = <680000>; -+ regulator-max-microvolt = <1180000>; -+ regulator-always-on; -+ regulator-boot-on; -+ }; -+ -+ /* APC1 is 3-phase, but quoting downstream, s11 is "the gang leader" */ -+ VDD_APC1: s11 { -+ regulator-min-microvolt = <700000>; -+ regulator-max-microvolt = <1225000>; -+ regulator-always-on; -+ regulator-boot-on; -+ }; - }; - - &rpm_requests { -@@ -142,7 +202,7 @@ - vdd_l17_29-supply = <&vph_pwr>; - vdd_l20_21-supply = <&vph_pwr>; - vdd_l25-supply = <&pm8994_s5>; -- vdd_lvs1_2 = <&pm8994_s4>; -+ vdd_lvs1_2-supply = <&pm8994_s4>; - - /* S1, S2, S6 and S12 are managed by RPMPD */ - -diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi -index 58fe58cc77036..765e1f1989b58 100644 ---- a/arch/arm64/boot/dts/qcom/msm8992.dtsi -+++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi -@@ -14,10 +14,6 @@ - compatible = "qcom,rpmcc-msm8992"; - }; - --&tcsr_mutex { -- compatible = "qcom,sfpb-mutex"; --}; -- - &timer { - interrupts = , - , -diff --git a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi -index 3a3790a52a2ce..e2d08915ec426 100644 ---- a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi -+++ b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi -@@ -540,8 +540,7 @@ - }; - - &pmi8994_spmi_regulators { -- vdd_gfx: s2@1700 { -- reg = <0x1700 0x100>; -+ vdd_gfx: s2 { - regulator-min-microvolt = <980000>; - regulator-max-microvolt = <980000>; - }; -diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi -index 48de66bf19c4c..55198190bbeaa 100644 ---- a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi -+++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi -@@ -183,8 +183,7 @@ - * power domain.. which still isn't enough and forces us to bind - * OXILI_CX and OXILI_GX together! - */ -- vdd_gfx: s2@1700 { -- reg = <0x1700 0x100>; -+ vdd_gfx: s2 { - regulator-name = "VDD_GFX"; - regulator-min-microvolt = <980000>; - regulator-max-microvolt = <980000>; -diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi -index 986fe60dec5fb..4447ed146b3ac 100644 ---- a/arch/arm64/boot/dts/qcom/msm8994.dtsi -+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi -@@ -93,7 +93,7 @@ - CPU6: cpu@102 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x101>; -+ reg = <0x0 0x102>; - enable-method = "psci"; - next-level-cache = <&L2_1>; - }; -@@ -101,7 +101,7 @@ - CPU7: cpu@103 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; -- reg = <0x0 0x101>; -+ reg = <0x0 0x103>; - enable-method = "psci"; - next-level-cache = <&L2_1>; - }; -@@ -183,8 +183,8 @@ - no-map; - }; - -- cont_splash_mem: memory@3800000 { -- reg = <0 0x03800000 0 0x2400000>; -+ cont_splash_mem: memory@3401000 { -+ reg = <0 0x03401000 0 0x2200000>; - no-map; - }; - -@@ -498,7 +498,7 @@ - #dma-cells = <1>; - qcom,ee = <0>; - qcom,controlled-remotely; -- num-channels = <18>; -+ num-channels = <24>; - qcom,num-ees = <4>; - }; - -@@ -634,7 +634,7 @@ - #dma-cells = <1>; - qcom,ee = <0>; - qcom,controlled-remotely; -- num-channels = <18>; -+ num-channels = <24>; - qcom,num-ees = <4>; - }; - -@@ -715,7 +715,7 @@ - reg = <0xfc400000 0x2000>; - }; - -- rpm_msg_ram: memory@fc428000 { -+ rpm_msg_ram: sram@fc428000 { - compatible = "qcom,rpm-msg-ram"; - reg = <0xfc428000 0x4000>; - }; -@@ -725,7 +725,7 @@ - reg = <0xfc4ab000 0x4>; - }; - -- spmi_bus: spmi@fc4c0000 { -+ spmi_bus: spmi@fc4cf000 { - compatible = "qcom,spmi-pmic-arb"; - reg = <0xfc4cf000 0x1000>, - <0xfc4cb000 0x1000>, -diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dts b/arch/arm64/boot/dts/qcom/msm8996-mtp.dts -index 45ed594c1b9c2..7d9fc35bc7a06 100644 ---- a/arch/arm64/boot/dts/qcom/msm8996-mtp.dts -+++ b/arch/arm64/boot/dts/qcom/msm8996-mtp.dts -@@ -5,9 +5,31 @@ - - /dts-v1/; - --#include "msm8996-mtp.dtsi" -+#include "msm8996.dtsi" - - / { - model = "Qualcomm Technologies, Inc. MSM 8996 MTP"; - compatible = "qcom,msm8996-mtp"; -+ -+ aliases { -+ serial0 = &blsp2_uart2; -+ }; -+ -+ chosen { -+ stdout-path = "serial0"; -+ }; -+ -+ soc { -+ serial@75b0000 { -+ status = "okay"; -+ }; -+ }; -+}; -+ -+&hdmi { -+ status = "okay"; -+}; -+ -+&hdmi_phy { -+ status = "okay"; - }; -diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi -deleted file mode 100644 -index ac43a91f11048..0000000000000 ---- a/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi -+++ /dev/null -@@ -1,30 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-only --/* -- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. -- */ -- --#include "msm8996.dtsi" -- --/ { -- aliases { -- serial0 = &blsp2_uart2; -- }; -- -- chosen { -- stdout-path = "serial0"; -- }; -- -- soc { -- serial@75b0000 { -- status = "okay"; -- }; -- }; --}; -- --&hdmi { -- status = "okay"; --}; -- --&hdmi_phy { -- status = "okay"; --}; -diff --git a/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi b/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi -index 507396c4d23b6..e85f7cf4a56ce 100644 ---- a/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi -+++ b/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi -@@ -620,6 +620,7 @@ - &pmi8994_wled { - status = "okay"; - default-brightness = <512>; -+ qcom,num-strings = <3>; - }; - - &rpm_requests { -@@ -938,10 +939,6 @@ - }; - }; - --/* -- * For reasons that are currently unknown (but probably related to fusb301), USB takes about -- * 6 minutes to wake up (nothing interesting in kernel logs), but then it works as it should. -- */ - &usb3 { - status = "okay"; - qcom,select-utmi-as-pipe-clk; -@@ -950,6 +947,7 @@ - &usb3_dwc3 { - extcon = <&usb3_id>; - dr_mode = "peripheral"; -+ maximum-speed = "high-speed"; - phys = <&hsusb_phy1>; - phy-names = "usb2-phy"; - snps,hird-threshold = /bits/ 8 <0>; -diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi -index 52df22ab3f6ae..8a7c651785073 100644 ---- a/arch/arm64/boot/dts/qcom/msm8996.dtsi -+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi -@@ -142,82 +142,92 @@ - /* Nominal fmax for now */ - opp-307200000 { - opp-hz = /bits/ 64 <307200000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-422400000 { - opp-hz = /bits/ 64 <422400000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-480000000 { - opp-hz = /bits/ 64 <480000000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-556800000 { - opp-hz = /bits/ 64 <556800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-652800000 { - opp-hz = /bits/ 64 <652800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-729600000 { - opp-hz = /bits/ 64 <729600000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-844800000 { - opp-hz = /bits/ 64 <844800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-960000000 { - opp-hz = /bits/ 64 <960000000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1036800000 { - opp-hz = /bits/ 64 <1036800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1113600000 { - opp-hz = /bits/ 64 <1113600000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1190400000 { - opp-hz = /bits/ 64 <1190400000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1228800000 { - opp-hz = /bits/ 64 <1228800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1324800000 { - opp-hz = /bits/ 64 <1324800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x5>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1363200000 { -+ opp-hz = /bits/ 64 <1363200000>; -+ opp-supported-hw = <0x2>; - clock-latency-ns = <200000>; - }; - opp-1401600000 { - opp-hz = /bits/ 64 <1401600000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x5>; - clock-latency-ns = <200000>; - }; - opp-1478400000 { - opp-hz = /bits/ 64 <1478400000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x1>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1497600000 { -+ opp-hz = /bits/ 64 <1497600000>; -+ opp-supported-hw = <0x04>; - clock-latency-ns = <200000>; - }; - opp-1593600000 { - opp-hz = /bits/ 64 <1593600000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x1>; - clock-latency-ns = <200000>; - }; - }; -@@ -230,127 +240,137 @@ - /* Nominal fmax for now */ - opp-307200000 { - opp-hz = /bits/ 64 <307200000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-403200000 { - opp-hz = /bits/ 64 <403200000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-480000000 { - opp-hz = /bits/ 64 <480000000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-556800000 { - opp-hz = /bits/ 64 <556800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-652800000 { - opp-hz = /bits/ 64 <652800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-729600000 { - opp-hz = /bits/ 64 <729600000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-806400000 { - opp-hz = /bits/ 64 <806400000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-883200000 { - opp-hz = /bits/ 64 <883200000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-940800000 { - opp-hz = /bits/ 64 <940800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1036800000 { - opp-hz = /bits/ 64 <1036800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1113600000 { - opp-hz = /bits/ 64 <1113600000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1190400000 { - opp-hz = /bits/ 64 <1190400000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1248000000 { - opp-hz = /bits/ 64 <1248000000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1324800000 { - opp-hz = /bits/ 64 <1324800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1401600000 { - opp-hz = /bits/ 64 <1401600000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1478400000 { - opp-hz = /bits/ 64 <1478400000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1555200000 { - opp-hz = /bits/ 64 <1555200000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1632000000 { - opp-hz = /bits/ 64 <1632000000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1708800000 { - opp-hz = /bits/ 64 <1708800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; - clock-latency-ns = <200000>; - }; - opp-1785600000 { - opp-hz = /bits/ 64 <1785600000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x7>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1804800000 { -+ opp-hz = /bits/ 64 <1804800000>; -+ opp-supported-hw = <0x6>; - clock-latency-ns = <200000>; - }; - opp-1824000000 { - opp-hz = /bits/ 64 <1824000000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x1>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1900800000 { -+ opp-hz = /bits/ 64 <1900800000>; -+ opp-supported-hw = <0x4>; - clock-latency-ns = <200000>; - }; - opp-1920000000 { - opp-hz = /bits/ 64 <1920000000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x1>; - clock-latency-ns = <200000>; - }; - opp-1996800000 { - opp-hz = /bits/ 64 <1996800000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x1>; - clock-latency-ns = <200000>; - }; - opp-2073600000 { - opp-hz = /bits/ 64 <2073600000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x1>; - clock-latency-ns = <200000>; - }; - opp-2150400000 { - opp-hz = /bits/ 64 <2150400000>; -- opp-supported-hw = <0x77>; -+ opp-supported-hw = <0x1>; - clock-latency-ns = <200000>; - }; - }; -@@ -598,7 +618,7 @@ - reset-names = "phy", "common", "cfg"; - status = "disabled"; - -- pciephy_0: lane@35000 { -+ pciephy_0: phy@35000 { - reg = <0x00035000 0x130>, - <0x00035200 0x200>, - <0x00035400 0x1dc>; -@@ -611,7 +631,7 @@ - reset-names = "lane0"; - }; - -- pciephy_1: lane@36000 { -+ pciephy_1: phy@36000 { - reg = <0x00036000 0x130>, - <0x00036200 0x200>, - <0x00036400 0x1dc>; -@@ -624,7 +644,7 @@ - reset-names = "lane1"; - }; - -- pciephy_2: lane@37000 { -+ pciephy_2: phy@37000 { - reg = <0x00037000 0x130>, - <0x00037200 0x200>, - <0x00037400 0x1dc>; -@@ -638,7 +658,7 @@ - }; - }; - -- rpm_msg_ram: memory@68000 { -+ rpm_msg_ram: sram@68000 { - compatible = "qcom,rpm-msg-ram"; - reg = <0x00068000 0x6000>; - }; -@@ -965,9 +985,6 @@ - nvmem-cells = <&speedbin_efuse>; - nvmem-cell-names = "speed_bin"; - -- qcom,gpu-quirk-two-pass-use-wfi; -- qcom,gpu-quirk-fault-detect-mask; -- - operating-points-v2 = <&gpu_opp_table>; - - status = "disabled"; -@@ -978,17 +995,17 @@ - compatible ="operating-points-v2"; - - /* -- * 624Mhz and 560Mhz are only available on speed -- * bin (1 << 0). All the rest are available on -- * all bins of the hardware -+ * 624Mhz is only available on speed bins 0 and 3. -+ * 560Mhz is only available on speed bins 0, 2 and 3. -+ * All the rest are available on all bins of the hardware. - */ - opp-624000000 { - opp-hz = /bits/ 64 <624000000>; -- opp-supported-hw = <0x01>; -+ opp-supported-hw = <0x09>; - }; - opp-560000000 { - opp-hz = /bits/ 64 <560000000>; -- opp-supported-hw = <0x01>; -+ opp-supported-hw = <0x0d>; - }; - opp-510000000 { - opp-hz = /bits/ 64 <510000000>; -@@ -1211,6 +1228,20 @@ - }; - }; - -+ blsp1_uart2_default: blsp1-uart2-default { -+ pins = "gpio41", "gpio42", "gpio43", "gpio44"; -+ function = "blsp_uart2"; -+ drive-strength = <16>; -+ bias-disable; -+ }; -+ -+ blsp1_uart2_sleep: blsp1-uart2-sleep { -+ pins = "gpio41", "gpio42", "gpio43", "gpio44"; -+ function = "gpio"; -+ drive-strength = <2>; -+ bias-disable; -+ }; -+ - blsp1_i2c3_default: blsp1-i2c2-default { - pins = "gpio47", "gpio48"; - function = "blsp_i2c3"; -@@ -1538,8 +1569,8 @@ - - #address-cells = <3>; - #size-cells = <2>; -- ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>, -- <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>; -+ ranges = <0x01000000 0x0 0x00000000 0x0c200000 0x0 0x100000>, -+ <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>; - - device_type = "pci"; - -@@ -1592,8 +1623,8 @@ - - #address-cells = <3>; - #size-cells = <2>; -- ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>, -- <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>; -+ ranges = <0x01000000 0x0 0x00000000 0x0d200000 0x0 0x100000>, -+ <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>; - - device_type = "pci"; - -@@ -1643,8 +1674,8 @@ - - #address-cells = <3>; - #size-cells = <2>; -- ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>, -- <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>; -+ ranges = <0x01000000 0x0 0x00000000 0x0e200000 0x0 0x100000>, -+ <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>; - - device_type = "pci"; - -@@ -1746,7 +1777,7 @@ - reset-names = "ufsphy"; - status = "disabled"; - -- ufsphy_lane: lanes@627400 { -+ ufsphy_lane: phy@627400 { - reg = <0x627400 0x12c>, - <0x627600 0x200>, - <0x627c00 0x1b4>; -@@ -1754,7 +1785,7 @@ - }; - }; - -- camss: camss@a00000 { -+ camss: camss@a34000 { - compatible = "qcom,msm8996-camss"; - reg = <0x00a34000 0x1000>, - <0x00a00030 0x4>, -@@ -2578,8 +2609,11 @@ - interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>; - phys = <&hsusb_phy1>, <&ssusb_phy_0>; - phy-names = "usb2-phy", "usb3-phy"; -+ snps,hird-threshold = /bits/ 8 <0>; - snps,dis_u2_susphy_quirk; - snps,dis_enblslpm_quirk; -+ snps,is-utmi-l1-suspend; -+ tx-fifo-resize; - }; - }; - -@@ -2601,7 +2635,7 @@ - reset-names = "phy", "common"; - status = "disabled"; - -- ssusb_phy_0: lane@7410200 { -+ ssusb_phy_0: phy@7410200 { - reg = <0x07410200 0x200>, - <0x07410400 0x130>, - <0x07410600 0x1a8>; -@@ -2704,6 +2738,9 @@ - clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, - <&gcc GCC_BLSP1_AHB_CLK>; - clock-names = "core", "iface"; -+ pinctrl-names = "default", "sleep"; -+ pinctrl-0 = <&blsp1_uart2_default>; -+ pinctrl-1 = <&blsp1_uart2_sleep>; - dmas = <&blsp1_dma 2>, <&blsp1_dma 3>; - dma-names = "tx", "rx"; - status = "disabled"; -@@ -2865,6 +2902,9 @@ - #size-cells = <1>; - ranges; - -+ interrupts = ; -+ interrupt-names = "hs_phy_irq"; -+ - clocks = <&gcc GCC_PERIPH_NOC_USB20_AHB_CLK>, - <&gcc GCC_USB20_MASTER_CLK>, - <&gcc GCC_USB20_MOCK_UTMI_CLK>, -diff --git a/arch/arm64/boot/dts/qcom/msm8996pro.dtsi b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi -new file mode 100644 -index 0000000000000..63e1b4ec7a360 ---- /dev/null -+++ b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi -@@ -0,0 +1,266 @@ -+// SPDX-License-Identifier: BSD-3-Clause -+/* -+ * Copyright (c) 2022, Linaro Limited -+ */ -+ -+#include "msm8996.dtsi" -+ -+/ { -+ /delete-node/ opp-table-cluster0; -+ /delete-node/ opp-table-cluster1; -+ -+ /* -+ * On MSM8996 Pro the cpufreq driver shifts speed bins into the high -+ * nibble of supported hw, so speed bin 0 becomes 0x10, speed bin 1 -+ * becomes 0x20, speed 2 becomes 0x40. -+ */ -+ -+ cluster0_opp: opp-table-cluster0 { -+ compatible = "operating-points-v2-kryo-cpu"; -+ nvmem-cells = <&speedbin_efuse>; -+ opp-shared; -+ -+ opp-307200000 { -+ opp-hz = /bits/ 64 <307200000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-384000000 { -+ opp-hz = /bits/ 64 <384000000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-460800000 { -+ opp-hz = /bits/ 64 <460800000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-537600000 { -+ opp-hz = /bits/ 64 <537600000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-614400000 { -+ opp-hz = /bits/ 64 <614400000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-691200000 { -+ opp-hz = /bits/ 64 <691200000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-768000000 { -+ opp-hz = /bits/ 64 <768000000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-844800000 { -+ opp-hz = /bits/ 64 <844800000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-902400000 { -+ opp-hz = /bits/ 64 <902400000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-979200000 { -+ opp-hz = /bits/ 64 <979200000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1056000000 { -+ opp-hz = /bits/ 64 <1056000000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1132800000 { -+ opp-hz = /bits/ 64 <1132800000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1209600000 { -+ opp-hz = /bits/ 64 <1209600000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1286400000 { -+ opp-hz = /bits/ 64 <1286400000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1363200000 { -+ opp-hz = /bits/ 64 <1363200000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1440000000 { -+ opp-hz = /bits/ 64 <1440000000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1516800000 { -+ opp-hz = /bits/ 64 <1516800000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1593600000 { -+ opp-hz = /bits/ 64 <1593600000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1996800000 { -+ opp-hz = /bits/ 64 <1996800000>; -+ opp-supported-hw = <0x20>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-2188800000 { -+ opp-hz = /bits/ 64 <2188800000>; -+ opp-supported-hw = <0x10>; -+ clock-latency-ns = <200000>; -+ }; -+ }; -+ -+ cluster1_opp: opp-table-cluster1 { -+ compatible = "operating-points-v2-kryo-cpu"; -+ nvmem-cells = <&speedbin_efuse>; -+ opp-shared; -+ -+ opp-307200000 { -+ opp-hz = /bits/ 64 <307200000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-384000000 { -+ opp-hz = /bits/ 64 <384000000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-460800000 { -+ opp-hz = /bits/ 64 <460800000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-537600000 { -+ opp-hz = /bits/ 64 <537600000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-614400000 { -+ opp-hz = /bits/ 64 <614400000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-691200000 { -+ opp-hz = /bits/ 64 <691200000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-748800000 { -+ opp-hz = /bits/ 64 <748800000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-825600000 { -+ opp-hz = /bits/ 64 <825600000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-902400000 { -+ opp-hz = /bits/ 64 <902400000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-979200000 { -+ opp-hz = /bits/ 64 <979200000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1056000000 { -+ opp-hz = /bits/ 64 <1056000000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1132800000 { -+ opp-hz = /bits/ 64 <1132800000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1209600000 { -+ opp-hz = /bits/ 64 <1209600000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1286400000 { -+ opp-hz = /bits/ 64 <1286400000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1363200000 { -+ opp-hz = /bits/ 64 <1363200000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1440000000 { -+ opp-hz = /bits/ 64 <1440000000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1516800000 { -+ opp-hz = /bits/ 64 <1516800000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1593600000 { -+ opp-hz = /bits/ 64 <1593600000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1670400000 { -+ opp-hz = /bits/ 64 <1670400000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1747200000 { -+ opp-hz = /bits/ 64 <1747200000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1824000000 { -+ opp-hz = /bits/ 64 <1824000000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1900800000 { -+ opp-hz = /bits/ 64 <1900800000>; -+ opp-supported-hw = <0x70>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-1977600000 { -+ opp-hz = /bits/ 64 <1977600000>; -+ opp-supported-hw = <0x30>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-2054400000 { -+ opp-hz = /bits/ 64 <2054400000>; -+ opp-supported-hw = <0x30>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-2150400000 { -+ opp-hz = /bits/ 64 <2150400000>; -+ opp-supported-hw = <0x30>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-2246400000 { -+ opp-hz = /bits/ 64 <2246400000>; -+ opp-supported-hw = <0x10>; -+ clock-latency-ns = <200000>; -+ }; -+ opp-2342400000 { -+ opp-hz = /bits/ 64 <2342400000>; -+ opp-supported-hw = <0x10>; -+ clock-latency-ns = <200000>; -+ }; -+ }; -+}; -diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi -index 34039b5c80175..b7d72b0d579e4 100644 ---- a/arch/arm64/boot/dts/qcom/msm8998.dtsi -+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi -@@ -308,38 +308,42 @@ - LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 { - compatible = "arm,idle-state"; - idle-state-name = "little-retention"; -+ /* CPU Retention (C2D), L2 Active */ - arm,psci-suspend-param = <0x00000002>; - entry-latency-us = <81>; - exit-latency-us = <86>; -- min-residency-us = <200>; -+ min-residency-us = <504>; - }; - - LITTLE_CPU_SLEEP_1: cpu-sleep-0-1 { - compatible = "arm,idle-state"; - idle-state-name = "little-power-collapse"; -+ /* CPU + L2 Power Collapse (C3, D4) */ - arm,psci-suspend-param = <0x40000003>; -- entry-latency-us = <273>; -- exit-latency-us = <612>; -- min-residency-us = <1000>; -+ entry-latency-us = <814>; -+ exit-latency-us = <4562>; -+ min-residency-us = <9183>; - local-timer-stop; - }; - - BIG_CPU_SLEEP_0: cpu-sleep-1-0 { - compatible = "arm,idle-state"; - idle-state-name = "big-retention"; -+ /* CPU Retention (C2D), L2 Active */ - arm,psci-suspend-param = <0x00000002>; - entry-latency-us = <79>; - exit-latency-us = <82>; -- min-residency-us = <200>; -+ min-residency-us = <1302>; - }; - - BIG_CPU_SLEEP_1: cpu-sleep-1-1 { - compatible = "arm,idle-state"; - idle-state-name = "big-power-collapse"; -+ /* CPU + L2 Power Collapse (C3, D4) */ - arm,psci-suspend-param = <0x40000003>; -- entry-latency-us = <336>; -- exit-latency-us = <525>; -- min-residency-us = <1000>; -+ entry-latency-us = <724>; -+ exit-latency-us = <2027>; -+ min-residency-us = <9419>; - local-timer-stop; - }; - }; -@@ -857,7 +861,7 @@ - reg = <0x00100000 0xb0000>; - }; - -- rpm_msg_ram: memory@778000 { -+ rpm_msg_ram: sram@778000 { - compatible = "qcom,rpm-msg-ram"; - reg = <0x00778000 0x7000>; - }; -@@ -947,7 +951,7 @@ - phy-names = "pciephy"; - status = "disabled"; - -- ranges = <0x01000000 0x0 0x1b200000 0x1b200000 0x0 0x100000>, -+ ranges = <0x01000000 0x0 0x00000000 0x1b200000 0x0 0x100000>, - <0x02000000 0x0 0x1b300000 0x1b300000 0x0 0xd00000>; - - #interrupt-cells = <1>; -@@ -990,7 +994,7 @@ - vdda-phy-supply = <&vreg_l1a_0p875>; - vdda-pll-supply = <&vreg_l2a_1p2>; - -- pciephy: lane@1c06800 { -+ pciephy: phy@1c06800 { - reg = <0x01c06200 0x128>, <0x01c06400 0x1fc>, <0x01c06800 0x20c>; - #phy-cells = <0>; - -@@ -1062,7 +1066,7 @@ - reset-names = "ufsphy"; - resets = <&ufshc 0>; - -- ufsphy_lanes: lanes@1da7400 { -+ ufsphy_lanes: phy@1da7400 { - reg = <0x01da7400 0x128>, - <0x01da7600 0x1fc>, - <0x01da7c00 0x1dc>, -@@ -1469,7 +1473,7 @@ - compatible = "arm,coresight-stm", "arm,primecell"; - reg = <0x06002000 0x1000>, - <0x16280000 0x180000>; -- reg-names = "stm-base", "stm-data-base"; -+ reg-names = "stm-base", "stm-stimulus-base"; - status = "disabled"; - - clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>; -@@ -1995,7 +1999,7 @@ - <&gcc GCC_USB3PHY_PHY_BCR>; - reset-names = "phy", "common"; - -- usb1_ssphy: lane@c010200 { -+ usb1_ssphy: phy@c010200 { - reg = <0xc010200 0x128>, - <0xc010400 0x200>, - <0xc010c00 0x20c>, -diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi -index e847d7209afc6..affc736d154ad 100644 ---- a/arch/arm64/boot/dts/qcom/pm660.dtsi -+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi -@@ -152,7 +152,7 @@ - qcom,pre-scaling = <1 3>; - }; - -- vcoin: vcoin@83 { -+ vcoin: vcoin@85 { - reg = ; - qcom,decimation = <1024>; - qcom,pre-scaling = <1 3>; -diff --git a/arch/arm64/boot/dts/qcom/pm660l.dtsi b/arch/arm64/boot/dts/qcom/pm660l.dtsi -index 05086cbe573be..902e15d05a95b 100644 ---- a/arch/arm64/boot/dts/qcom/pm660l.dtsi -+++ b/arch/arm64/boot/dts/qcom/pm660l.dtsi -@@ -67,9 +67,10 @@ - - pm660l_wled: leds@d800 { - compatible = "qcom,pm660l-wled"; -- reg = <0xd800 0xd900>; -- interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>; -- interrupt-names = "ovp"; -+ reg = <0xd800>, <0xd900>; -+ interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>, -+ <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>; -+ interrupt-names = "ovp", "short"; - label = "backlight"; - - qcom,switching-freq = <800>; -diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi -index f931cb0de231f..42180f1b5dbbb 100644 ---- a/arch/arm64/boot/dts/qcom/pm8916.dtsi -+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi -@@ -86,7 +86,6 @@ - rtc@6000 { - compatible = "qcom,pm8941-rtc"; - reg = <0x6000>; -- reg-names = "rtc", "alarm"; - interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>; - }; - -diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi -index b4ac900ab115f..38cf0f14e8798 100644 ---- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi -+++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi -@@ -35,14 +35,12 @@ - - pmi8994_wled: wled@d800 { - compatible = "qcom,pmi8994-wled"; -- reg = <0xd800 0xd900>; -- interrupts = <3 0xd8 0x02 IRQ_TYPE_EDGE_RISING>; -- interrupt-names = "short"; -- qcom,num-strings = <3>; -- /* Yes, all four strings *have to* be defined or things won't work. */ -- qcom,enabled-strings = <0 1 2 3>; -+ reg = <0xd800>, <0xd900>; -+ interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>, -+ <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>; -+ interrupt-names = "ovp", "short"; - qcom,cabc; -- qcom,eternal-pfet; -+ qcom,external-pfet; - status = "disabled"; - }; - }; -diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi -index d230c510d4b7d..ef29e80c442c7 100644 ---- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi -+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi -@@ -41,5 +41,17 @@ - interrupt-names = "sc-err", "ocp"; - }; - }; -+ -+ pmi8998_wled: leds@d800 { -+ compatible = "qcom,pmi8998-wled"; -+ reg = <0xd800>, <0xd900>; -+ interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>, -+ <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>; -+ interrupt-names = "ovp", "short"; -+ label = "backlight"; -+ -+ status = "disabled"; -+ }; -+ - }; - }; -diff --git a/arch/arm64/boot/dts/qcom/pmk8350.dtsi b/arch/arm64/boot/dts/qcom/pmk8350.dtsi -index 04fc2632a0b20..9e99fcf269dfd 100644 ---- a/arch/arm64/boot/dts/qcom/pmk8350.dtsi -+++ b/arch/arm64/boot/dts/qcom/pmk8350.dtsi -@@ -16,8 +16,9 @@ - #size-cells = <0>; - - pmk8350_pon: pon@1300 { -- compatible = "qcom,pm8998-pon"; -- reg = <0x1300>; -+ compatible = "qcom,pmk8350-pon"; -+ reg = <0x1300>, <0x800>; -+ reg-names = "hlos", "pbs"; - - pwrkey { - compatible = "qcom,pmk8350-pwrkey"; -@@ -44,7 +45,7 @@ - }; - - pmk8350_adc_tm: adc-tm@3400 { -- compatible = "qcom,adc-tm7"; -+ compatible = "qcom,spmi-adc-tm5-gen2"; - reg = <0x3400>; - interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>; - interrupt-names = "threshold"; -diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi -index 339790ba585de..fd0d634a373fc 100644 ---- a/arch/arm64/boot/dts/qcom/qcs404.dtsi -+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi -@@ -318,7 +318,7 @@ - status = "disabled"; - }; - -- rpm_msg_ram: memory@60000 { -+ rpm_msg_ram: sram@60000 { - compatible = "qcom,rpm-msg-ram"; - reg = <0x00060000 0x6000>; - }; -@@ -548,7 +548,7 @@ - compatible = "snps,dwc3"; - reg = <0x07580000 0xcd00>; - interrupts = ; -- phys = <&usb2_phy_sec>, <&usb3_phy>; -+ phys = <&usb2_phy_prim>, <&usb3_phy>; - phy-names = "usb2-phy", "usb3-phy"; - snps,has-lpm-erratum; - snps,hird-threshold = /bits/ 8 <0x10>; -@@ -577,7 +577,7 @@ - compatible = "snps,dwc3"; - reg = <0x078c0000 0xcc00>; - interrupts = ; -- phys = <&usb2_phy_prim>; -+ phys = <&usb2_phy_sec>; - phy-names = "usb2-phy"; - snps,has-lpm-erratum; - snps,hird-threshold = /bits/ 8 <0x10>; -@@ -775,7 +775,7 @@ - - clocks = <&gcc GCC_PCIE_0_PIPE_CLK>; - resets = <&gcc GCC_PCIEPHY_0_PHY_BCR>, -- <&gcc 21>; -+ <&gcc GCC_PCIE_0_PIPE_ARES>; - reset-names = "phy", "pipe"; - - clock-output-names = "pcie_0_pipe_clk"; -@@ -1305,12 +1305,12 @@ - <&gcc GCC_PCIE_0_SLV_AXI_CLK>; - clock-names = "iface", "aux", "master_bus", "slave_bus"; - -- resets = <&gcc 18>, -- <&gcc 17>, -- <&gcc 15>, -- <&gcc 19>, -+ resets = <&gcc GCC_PCIE_0_AXI_MASTER_ARES>, -+ <&gcc GCC_PCIE_0_AXI_SLAVE_ARES>, -+ <&gcc GCC_PCIE_0_AXI_MASTER_STICKY_ARES>, -+ <&gcc GCC_PCIE_0_CORE_STICKY_ARES>, - <&gcc GCC_PCIE_0_BCR>, -- <&gcc 16>; -+ <&gcc GCC_PCIE_0_AHB_ARES>; - reset-names = "axi_m", - "axi_s", - "axi_m_sticky", -diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts -index 28d5b5528516b..d3449cb52defe 100644 ---- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts -+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts -@@ -27,7 +27,7 @@ - }; - - /* Fixed crystal oscillator dedicated to MCP2518FD */ -- clk40M: can_clock { -+ clk40M: can-clock { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <40000000>; -@@ -113,7 +113,7 @@ - }; - }; - -- pm8150l-thermal { -+ pm8150l-pcb-thermal { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-sensors = <&pm8150l_adc_tm 1>; -diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts -index 5ae2ddc65f7e4..56a789a5789e6 100644 ---- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts -+++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts -@@ -43,7 +43,6 @@ - - regulator-always-on; - regulator-boot-on; -- regulator-allow-set-load; - - vin-supply = <&vreg_3p3>; - }; -@@ -114,6 +113,9 @@ - regulator-max-microvolt = <880000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l7a_1p8: ldo7 { -@@ -129,6 +131,9 @@ - regulator-max-microvolt = <2960000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l11a_0p8: ldo11 { -@@ -235,6 +240,9 @@ - regulator-max-microvolt = <1200000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l7c_1p8: ldo7 { -@@ -250,6 +258,9 @@ - regulator-max-microvolt = <1200000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l10c_3p3: ldo10 { -diff --git a/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi b/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi -index d8ed1d7b4ec76..4b306a59d9bec 100644 ---- a/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi -+++ b/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi -@@ -16,3 +16,11 @@ - &cpu6_opp12 { - opp-peak-kBps = <8532000 23347200>; - }; -+ -+&cpu6_opp13 { -+ opp-peak-kBps = <8532000 23347200>; -+}; -+ -+&cpu6_opp14 { -+ opp-peak-kBps = <8532000 23347200>; -+}; -diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi -index a758e4d226122..81098aa9687ba 100644 ---- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi -+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi -@@ -33,7 +33,7 @@ ap_h1_spi: &spi0 {}; - polling-delay = <0>; - - thermal-sensors = <&pm6150_adc_tm 1>; -- sustainable-power = <814>; -+ sustainable-power = <965>; - - trips { - skin_temp_alert0: trip-point0 { -diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts -index 6ebde0828550c..8a98a6f849c4f 100644 ---- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts -+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts -@@ -26,7 +26,7 @@ - interrupt-parent = <&tlmm>; - interrupts = <58 IRQ_TYPE_EDGE_FALLING>; - -- vcc-supply = <&pp3300_fp_tp>; -+ vdd-supply = <&pp3300_fp_tp>; - hid-descr-addr = <0x20>; - - wakeup-source; -diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi -index a246dbd74cc11..b7b5264888b7c 100644 ---- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi -+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi -@@ -44,7 +44,7 @@ ap_h1_spi: &spi0 {}; - }; - - &cpu6_thermal { -- sustainable-power = <948>; -+ sustainable-power = <1124>; - }; - - &cpu7_alert0 { -@@ -56,7 +56,7 @@ ap_h1_spi: &spi0 {}; - }; - - &cpu7_thermal { -- sustainable-power = <948>; -+ sustainable-power = <1124>; - }; - - &cpu8_alert0 { -@@ -68,7 +68,7 @@ ap_h1_spi: &spi0 {}; - }; - - &cpu8_thermal { -- sustainable-power = <948>; -+ sustainable-power = <1124>; - }; - - &cpu9_alert0 { -@@ -80,7 +80,7 @@ ap_h1_spi: &spi0 {}; - }; - - &cpu9_thermal { -- sustainable-power = <948>; -+ sustainable-power = <1124>; - }; - - &gpio_keys { -diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi -index 70c88c37de321..a9d36ac6cb90e 100644 ---- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi -+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi -@@ -42,6 +42,7 @@ - */ - - /delete-node/ &hyp_mem; -+/delete-node/ &ipa_fw_mem; - /delete-node/ &xbl_mem; - /delete-node/ &aop_mem; - /delete-node/ &sec_apps_mem; -diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi -index c8921e2d6480f..12816d60e2494 100644 ---- a/arch/arm64/boot/dts/qcom/sc7180.dtsi -+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi -@@ -137,8 +137,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <1024>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <415>; -+ dynamic-power-coefficient = <137>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>, - <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; -@@ -162,8 +162,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <1024>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <415>; -+ dynamic-power-coefficient = <137>; - next-level-cache = <&L2_100>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>, -@@ -184,8 +184,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <1024>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <415>; -+ dynamic-power-coefficient = <137>; - next-level-cache = <&L2_200>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>, -@@ -206,8 +206,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <1024>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <415>; -+ dynamic-power-coefficient = <137>; - next-level-cache = <&L2_300>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>, -@@ -228,8 +228,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <1024>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <415>; -+ dynamic-power-coefficient = <137>; - next-level-cache = <&L2_400>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>, -@@ -250,8 +250,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <1024>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <415>; -+ dynamic-power-coefficient = <137>; - next-level-cache = <&L2_500>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>, -@@ -272,8 +272,8 @@ - cpu-idle-states = <&BIG_CPU_SLEEP_0 - &BIG_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <1740>; -- dynamic-power-coefficient = <405>; -+ capacity-dmips-mhz = <1024>; -+ dynamic-power-coefficient = <480>; - next-level-cache = <&L2_600>; - operating-points-v2 = <&cpu6_opp_table>; - interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>, -@@ -294,8 +294,8 @@ - cpu-idle-states = <&BIG_CPU_SLEEP_0 - &BIG_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <1740>; -- dynamic-power-coefficient = <405>; -+ capacity-dmips-mhz = <1024>; -+ dynamic-power-coefficient = <480>; - next-level-cache = <&L2_700>; - operating-points-v2 = <&cpu6_opp_table>; - interconnects = <&gem_noc MASTER_APPSS_PROC 3 &mc_virt SLAVE_EBI1 3>, -@@ -1460,6 +1460,8 @@ - "imem", - "config"; - -+ qcom,qmp = <&aoss_qmp>; -+ - qcom,smem-states = <&ipa_smp2p_out 0>, - <&ipa_smp2p_out 1>; - qcom,smem-state-names = "ipa-clock-enabled-valid", -@@ -3239,8 +3241,8 @@ - interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>; - qcom,ee = <0>; - qcom,channel = <0>; -- #address-cells = <1>; -- #size-cells = <1>; -+ #address-cells = <2>; -+ #size-cells = <0>; - interrupt-controller; - #interrupt-cells = <4>; - cell-index = <0>; -@@ -3616,7 +3618,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 1>; -- sustainable-power = <768>; -+ sustainable-power = <1052>; - - trips { - cpu0_alert0: trip-point0 { -@@ -3665,7 +3667,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 2>; -- sustainable-power = <768>; -+ sustainable-power = <1052>; - - trips { - cpu1_alert0: trip-point0 { -@@ -3714,7 +3716,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 3>; -- sustainable-power = <768>; -+ sustainable-power = <1052>; - - trips { - cpu2_alert0: trip-point0 { -@@ -3763,7 +3765,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 4>; -- sustainable-power = <768>; -+ sustainable-power = <1052>; - - trips { - cpu3_alert0: trip-point0 { -@@ -3812,7 +3814,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 5>; -- sustainable-power = <768>; -+ sustainable-power = <1052>; - - trips { - cpu4_alert0: trip-point0 { -@@ -3861,7 +3863,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 6>; -- sustainable-power = <768>; -+ sustainable-power = <1052>; - - trips { - cpu5_alert0: trip-point0 { -@@ -3910,7 +3912,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 9>; -- sustainable-power = <1202>; -+ sustainable-power = <1425>; - - trips { - cpu6_alert0: trip-point0 { -@@ -3951,7 +3953,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 10>; -- sustainable-power = <1202>; -+ sustainable-power = <1425>; - - trips { - cpu7_alert0: trip-point0 { -@@ -3992,7 +3994,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 11>; -- sustainable-power = <1202>; -+ sustainable-power = <1425>; - - trips { - cpu8_alert0: trip-point0 { -@@ -4033,7 +4035,7 @@ - polling-delay = <0>; - - thermal-sensors = <&tsens0 12>; -- sustainable-power = <1202>; -+ sustainable-power = <1425>; - - trips { - cpu9_alert0: trip-point0 { -diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi -index fd78f16181ddd..fb6473a0aa4b3 100644 ---- a/arch/arm64/boot/dts/qcom/sc7280.dtsi -+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi -@@ -429,7 +429,7 @@ - <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>, - <0>, <0>, <0>, <0>, <0>, <0>; - clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk", -- "pcie_0_pipe_clk", "pcie_1_pipe-clk", -+ "pcie_0_pipe_clk", "pcie_1_pipe_clk", - "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk", - "ufs_phy_tx_symbol_0_clk", - "usb3_phy_wrapper_gcc_usb30_pipe_clk"; -@@ -615,6 +615,8 @@ - interconnect-names = "memory", - "config"; - -+ qcom,qmp = <&aoss_qmp>; -+ - qcom,smem-states = <&ipa_smp2p_out 0>, - <&ipa_smp2p_out 1>; - qcom,smem-state-names = "ipa-clock-enabled-valid", -@@ -1258,15 +1260,11 @@ - dp_phy: dp-phy@88ea200 { - reg = <0 0x088ea200 0 0x200>, - <0 0x088ea400 0 0x200>, -- <0 0x088eac00 0 0x400>, -+ <0 0x088eaa00 0 0x200>, - <0 0x088ea600 0 0x200>, -- <0 0x088ea800 0 0x200>, -- <0 0x088eaa00 0 0x100>; -+ <0 0x088ea800 0 0x200>; - #phy-cells = <0>; - #clock-cells = <1>; -- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; -- clock-names = "pipe0"; -- clock-output-names = "usb3_phy_pipe_clk_src"; - }; - }; - -@@ -1496,8 +1494,8 @@ - interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>; - qcom,ee = <0>; - qcom,channel = <0>; -- #address-cells = <1>; -- #size-cells = <1>; -+ #address-cells = <2>; -+ #size-cells = <0>; - interrupt-controller; - #interrupt-cells = <4>; - }; -diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi -index 9c7f87e42fccd..e00c0577cef70 100644 ---- a/arch/arm64/boot/dts/qcom/sdm630.dtsi -+++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi -@@ -8,6 +8,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -541,7 +542,7 @@ - <&sleep_clk>; - }; - -- rpm_msg_ram: memory@778000 { -+ rpm_msg_ram: sram@778000 { - compatible = "qcom,rpm-msg-ram"; - reg = <0x00778000 0x7000>; - }; -@@ -767,7 +768,7 @@ - pins = "gpio17", "gpio18", "gpio19"; - function = "gpio"; - drive-strength = <2>; -- bias-no-pull; -+ bias-disable; - }; - }; - -@@ -1041,11 +1042,13 @@ - nvmem-cells = <&gpu_speed_bin>; - nvmem-cell-names = "speed_bin"; - -- interconnects = <&gnoc 1 &bimc 5>; -+ interconnects = <&bimc MASTER_OXILI &bimc SLAVE_EBI>; - interconnect-names = "gfx-mem"; - - operating-points-v2 = <&gpu_sdm630_opp_table>; - -+ status = "disabled"; -+ - gpu_sdm630_opp_table: opp-table { - compatible = "operating-points-v2"; - opp-775000000 { -@@ -1251,7 +1254,7 @@ - #phy-cells = <0>; - - clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, -- <&gcc GCC_RX1_USB2_CLKREF_CLK>; -+ <&gcc GCC_RX0_USB2_CLKREF_CLK>; - clock-names = "cfg_ahb", "ref"; - - resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>; -@@ -1831,7 +1834,7 @@ - }; - }; - -- camss: camss@ca00000 { -+ camss: camss@ca00020 { - compatible = "qcom,sdm660-camss"; - reg = <0x0c824000 0x1000>, - <0x0ca00120 0x4>, -diff --git a/arch/arm64/boot/dts/qcom/sdm636-sony-xperia-ganges-mermaid.dts b/arch/arm64/boot/dts/qcom/sdm636-sony-xperia-ganges-mermaid.dts -index bba1c2bce2131..0afe9eee025e1 100644 ---- a/arch/arm64/boot/dts/qcom/sdm636-sony-xperia-ganges-mermaid.dts -+++ b/arch/arm64/boot/dts/qcom/sdm636-sony-xperia-ganges-mermaid.dts -@@ -18,7 +18,7 @@ - }; - - &sdc2_state_on { -- pinconf-clk { -+ clk { - drive-strength = <14>; - }; - }; -diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi -index dfd1b42c07fd5..3566db1d7357e 100644 ---- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi -+++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi -@@ -1299,7 +1299,7 @@ ap_ts_i2c: &i2c14 { - config { - pins = "gpio126"; - function = "gpio"; -- bias-no-pull; -+ bias-disable; - drive-strength = <2>; - output-low; - }; -@@ -1309,7 +1309,7 @@ ap_ts_i2c: &i2c14 { - config { - pins = "gpio126"; - function = "gpio"; -- bias-no-pull; -+ bias-disable; - drive-strength = <2>; - output-high; - }; -diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts -index 2d5533dd4ec2d..5ce270f0b2ec1 100644 ---- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts -+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts -@@ -896,7 +896,7 @@ - }; - - wcd_intr_default: wcd_intr_default { -- pins = <54>; -+ pins = "gpio54"; - function = "gpio"; - - input-enable; -@@ -1045,7 +1045,10 @@ - - /* PINCTRL - additions to nodes defined in sdm845.dtsi */ - &qup_spi2_default { -- drive-strength = <16>; -+ pinconf { -+ pins = "gpio27", "gpio28", "gpio29", "gpio30"; -+ drive-strength = <16>; -+ }; - }; - - &qup_uart3_default{ -diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts -index c60c8c640e17f..736951fabb7a9 100644 ---- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts -+++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts -@@ -221,7 +221,7 @@ - panel@0 { - compatible = "tianma,fhd-video"; - reg = <0>; -- vddi0-supply = <&vreg_l14a_1p8>; -+ vddio-supply = <&vreg_l14a_1p8>; - vddpos-supply = <&lab>; - vddneg-supply = <&ibb>; - -diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi -index b3b9119261844..6a0e30cbf88f1 100644 ---- a/arch/arm64/boot/dts/qcom/sdm845.dtsi -+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi -@@ -196,8 +196,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <607>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <611>; -+ dynamic-power-coefficient = <154>; - qcom,freq-domain = <&cpufreq_hw 0>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>, -@@ -221,8 +221,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <607>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <611>; -+ dynamic-power-coefficient = <154>; - qcom,freq-domain = <&cpufreq_hw 0>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>, -@@ -243,8 +243,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <607>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <611>; -+ dynamic-power-coefficient = <154>; - qcom,freq-domain = <&cpufreq_hw 0>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>, -@@ -265,8 +265,8 @@ - cpu-idle-states = <&LITTLE_CPU_SLEEP_0 - &LITTLE_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- capacity-dmips-mhz = <607>; -- dynamic-power-coefficient = <100>; -+ capacity-dmips-mhz = <611>; -+ dynamic-power-coefficient = <154>; - qcom,freq-domain = <&cpufreq_hw 0>; - operating-points-v2 = <&cpu0_opp_table>; - interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>, -@@ -288,7 +288,7 @@ - cpu-idle-states = <&BIG_CPU_SLEEP_0 - &BIG_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- dynamic-power-coefficient = <396>; -+ dynamic-power-coefficient = <442>; - qcom,freq-domain = <&cpufreq_hw 1>; - operating-points-v2 = <&cpu4_opp_table>; - interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>, -@@ -310,7 +310,7 @@ - cpu-idle-states = <&BIG_CPU_SLEEP_0 - &BIG_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- dynamic-power-coefficient = <396>; -+ dynamic-power-coefficient = <442>; - qcom,freq-domain = <&cpufreq_hw 1>; - operating-points-v2 = <&cpu4_opp_table>; - interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>, -@@ -332,7 +332,7 @@ - cpu-idle-states = <&BIG_CPU_SLEEP_0 - &BIG_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- dynamic-power-coefficient = <396>; -+ dynamic-power-coefficient = <442>; - qcom,freq-domain = <&cpufreq_hw 1>; - operating-points-v2 = <&cpu4_opp_table>; - interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>, -@@ -354,7 +354,7 @@ - cpu-idle-states = <&BIG_CPU_SLEEP_0 - &BIG_CPU_SLEEP_1 - &CLUSTER_SLEEP_0>; -- dynamic-power-coefficient = <396>; -+ dynamic-power-coefficient = <442>; - qcom,freq-domain = <&cpufreq_hw 1>; - operating-points-v2 = <&cpu4_opp_table>; - interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>, -@@ -1074,6 +1074,7 @@ - #clock-cells = <1>; - #reset-cells = <1>; - #power-domain-cells = <1>; -+ power-domains = <&rpmhpd SDM845_CX>; - }; - - qfprom@784000 { -@@ -1988,8 +1989,8 @@ - #address-cells = <3>; - #size-cells = <2>; - -- ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>, -- <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0xd00000>; -+ ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>, -+ <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0xd00000>; - - interrupts = ; - interrupt-names = "msi"; -@@ -2064,7 +2065,7 @@ - - status = "disabled"; - -- pcie0_lane: lanes@1c06200 { -+ pcie0_lane: phy@1c06200 { - reg = <0 0x01c06200 0 0x128>, - <0 0x01c06400 0 0x1fc>, - <0 0x01c06800 0 0x218>, -@@ -2093,7 +2094,7 @@ - #address-cells = <3>; - #size-cells = <2>; - -- ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>, -+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>, - <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>; - - interrupts = ; -@@ -2174,7 +2175,7 @@ - - status = "disabled"; - -- pcie1_lane: lanes@1c06200 { -+ pcie1_lane: phy@1c06200 { - reg = <0 0x01c0a800 0 0x800>, - <0 0x01c0a800 0 0x800>, - <0 0x01c0b800 0 0x400>; -@@ -2282,7 +2283,7 @@ - <0 0>, - <0 0>, - <0 0>, -- <0 300000000>; -+ <75000000 300000000>; - - status = "disabled"; - }; -@@ -2302,7 +2303,7 @@ - reset-names = "ufsphy"; - status = "disabled"; - -- ufs_mem_phy_lanes: lanes@1d87400 { -+ ufs_mem_phy_lanes: phy@1d87400 { - reg = <0 0x01d87400 0 0x108>, - <0 0x01d87600 0 0x1e0>, - <0 0x01d87c00 0 0x1dc>, -@@ -2316,11 +2317,11 @@ - compatible = "qcom,bam-v1.7.0"; - reg = <0 0x01dc4000 0 0x24000>; - interrupts = ; -- clocks = <&rpmhcc 15>; -+ clocks = <&rpmhcc RPMH_CE_CLK>; - clock-names = "bam_clk"; - #dma-cells = <1>; - qcom,ee = <0>; -- qcom,controlled-remotely = <1>; -+ qcom,controlled-remotely; - iommus = <&apps_smmu 0x704 0x1>, - <&apps_smmu 0x706 0x1>, - <&apps_smmu 0x714 0x1>, -@@ -2331,8 +2332,8 @@ - compatible = "qcom,crypto-v5.4"; - reg = <0 0x01dfa000 0 0x6000>; - clocks = <&gcc GCC_CE1_AHB_CLK>, -- <&gcc GCC_CE1_AHB_CLK>, -- <&rpmhcc 15>; -+ <&gcc GCC_CE1_AXI_CLK>, -+ <&rpmhcc RPMH_CE_CLK>; - clock-names = "iface", "bus", "core"; - dmas = <&cryptobam 6>, <&cryptobam 7>; - dma-names = "rx", "tx"; -@@ -3608,10 +3609,10 @@ - #clock-cells = <0>; - clock-frequency = <9600000>; - clock-output-names = "mclk"; -- qcom,micbias1-millivolt = <1800>; -- qcom,micbias2-millivolt = <1800>; -- qcom,micbias3-millivolt = <1800>; -- qcom,micbias4-millivolt = <1800>; -+ qcom,micbias1-microvolt = <1800000>; -+ qcom,micbias2-microvolt = <1800000>; -+ qcom,micbias3-microvolt = <1800000>; -+ qcom,micbias4-microvolt = <1800000>; - - #address-cells = <1>; - #size-cells = <1>; -@@ -3699,7 +3700,7 @@ - <&gcc GCC_USB3_PHY_PRIM_BCR>; - reset-names = "phy", "common"; - -- usb_1_ssphy: lanes@88e9200 { -+ usb_1_ssphy: phy@88e9200 { - reg = <0 0x088e9200 0 0x128>, - <0 0x088e9400 0 0x200>, - <0 0x088e9c00 0 0x218>, -@@ -3732,7 +3733,7 @@ - <&gcc GCC_USB3_PHY_SEC_BCR>; - reset-names = "phy", "common"; - -- usb_2_ssphy: lane@88eb200 { -+ usb_2_ssphy: phy@88eb200 { - reg = <0 0x088eb200 0 0x128>, - <0 0x088eb400 0 0x1fc>, - <0 0x088eb800 0 0x218>, -@@ -3921,7 +3922,7 @@ - #reset-cells = <1>; - }; - -- camss: camss@a00000 { -+ camss: camss@acb3000 { - compatible = "qcom,sdm845-camss"; - - reg = <0 0xacb3000 0 0x1000>, -@@ -4147,7 +4148,7 @@ - - power-domains = <&dispcc MDSS_GDSC>; - -- clocks = <&gcc GCC_DISP_AHB_CLK>, -+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, - <&dispcc DISP_CC_MDSS_MDP_CLK>; - clock-names = "iface", "core"; - -diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts -index 2ba23aa582a18..834fb463f99ec 100644 ---- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts -+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts -@@ -475,8 +475,10 @@ - }; - - &qup_i2c12_default { -- drive-strength = <2>; -- bias-disable; -+ pinmux { -+ drive-strength = <2>; -+ bias-disable; -+ }; - }; - - &qup_uart6_default { -@@ -518,6 +520,10 @@ - dai@1 { - reg = <1>; - }; -+ -+ dai@2 { -+ reg = <2>; -+ }; - }; - - &sound { -@@ -530,6 +536,7 @@ - "SpkrLeft IN", "SPK1 OUT", - "SpkrRight IN", "SPK2 OUT", - "MM_DL1", "MultiMedia1 Playback", -+ "MM_DL3", "MultiMedia3 Playback", - "MultiMedia2 Capture", "MM_UL2"; - - mm1-dai-link { -@@ -546,6 +553,13 @@ - }; - }; - -+ mm3-dai-link { -+ link-name = "MultiMedia3"; -+ cpu { -+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>; -+ }; -+ }; -+ - slim-dai-link { - link-name = "SLIM Playback"; - cpu { -@@ -575,6 +589,21 @@ - sound-dai = <&wcd9340 1>; - }; - }; -+ -+ slim-wcd-dai-link { -+ link-name = "SLIM WCD Playback"; -+ cpu { -+ sound-dai = <&q6afedai SLIMBUS_1_RX>; -+ }; -+ -+ platform { -+ sound-dai = <&q6routing>; -+ }; -+ -+ codec { -+ sound-dai = <&wcd9340 2>; -+ }; -+ }; - }; - - &tlmm { -diff --git a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts -index 58b6b2742d3f9..47f8e5397ebba 100644 ---- a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts -+++ b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts -@@ -88,11 +88,19 @@ - status = "okay"; - }; - --&sdc2_state_off { -+&sdc2_off_state { - sd-cd { - pins = "gpio98"; -+ drive-strength = <2>; - bias-disable; -+ }; -+}; -+ -+&sdc2_on_state { -+ sd-cd { -+ pins = "gpio98"; - drive-strength = <2>; -+ bias-pull-up; - }; - }; - -@@ -102,32 +110,6 @@ - - &tlmm { - gpio-reserved-ranges = <22 2>, <28 6>; -- -- sdc2_state_on: sdc2-on { -- clk { -- pins = "sdc2_clk"; -- bias-disable; -- drive-strength = <16>; -- }; -- -- cmd { -- pins = "sdc2_cmd"; -- bias-pull-up; -- drive-strength = <10>; -- }; -- -- data { -- pins = "sdc2_data"; -- bias-pull-up; -- drive-strength = <10>; -- }; -- -- sd-cd { -- pins = "gpio98"; -- bias-pull-up; -- drive-strength = <2>; -- }; -- }; - }; - - &usb3 { -diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi -index 2b37ce6a9f9c5..2e4fe2bc1e0a8 100644 ---- a/arch/arm64/boot/dts/qcom/sm6125.dtsi -+++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi -@@ -336,23 +336,43 @@ - interrupt-controller; - #interrupt-cells = <2>; - -- sdc2_state_off: sdc2-off { -+ sdc2_off_state: sdc2-off-state { - clk { - pins = "sdc2_clk"; -- bias-disable; - drive-strength = <2>; -+ bias-disable; - }; - - cmd { - pins = "sdc2_cmd"; -+ drive-strength = <2>; - bias-pull-up; -+ }; -+ -+ data { -+ pins = "sdc2_data"; - drive-strength = <2>; -+ bias-pull-up; -+ }; -+ }; -+ -+ sdc2_on_state: sdc2-on-state { -+ clk { -+ pins = "sdc2_clk"; -+ drive-strength = <16>; -+ bias-disable; -+ }; -+ -+ cmd { -+ pins = "sdc2_cmd"; -+ drive-strength = <10>; -+ bias-pull-up; - }; - - data { - pins = "sdc2_data"; -+ drive-strength = <10>; - bias-pull-up; -- drive-strength = <2>; - }; - }; - }; -@@ -372,15 +392,15 @@ - reg = <0x01613000 0x180>; - #phy-cells = <0>; - -- clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>, -- <&gcc GCC_AHB2PHY_USB_CLK>; -- clock-names = "ref", "cfg_ahb"; -+ clocks = <&gcc GCC_AHB2PHY_USB_CLK>, -+ <&rpmcc RPM_SMD_XO_CLK_SRC>; -+ clock-names = "cfg_ahb", "ref"; - - resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>; - status = "disabled"; - }; - -- rpm_msg_ram: memory@45f0000 { -+ rpm_msg_ram: sram@45f0000 { - compatible = "qcom,rpm-msg-ram"; - reg = <0x045f0000 0x7000>; - }; -@@ -388,7 +408,7 @@ - sdhc_1: sdhci@4744000 { - compatible = "qcom,sm6125-sdhci", "qcom,sdhci-msm-v5"; - reg = <0x04744000 0x1000>, <0x04745000 0x1000>; -- reg-names = "hc", "core"; -+ reg-names = "hc", "cqhci"; - - interrupts = , - ; -@@ -417,8 +437,8 @@ - <&xo_board>; - clock-names = "iface", "core", "xo"; - -- pinctrl-0 = <&sdc2_state_on>; -- pinctrl-1 = <&sdc2_state_off>; -+ pinctrl-0 = <&sdc2_on_state>; -+ pinctrl-1 = <&sdc2_off_state>; - pinctrl-names = "default", "sleep"; - - bus-width = <4>; -diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi -index 014fe3a315489..04c71f74ab72d 100644 ---- a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi -+++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi -@@ -33,9 +33,10 @@ - framebuffer: framebuffer@9c000000 { - compatible = "simple-framebuffer"; - reg = <0 0x9c000000 0 0x2300000>; -- width = <1644>; -- height = <3840>; -- stride = <(1644 * 4)>; -+ /* Griffin BL initializes in 2.5k mode, not 4k */ -+ width = <1096>; -+ height = <2560>; -+ stride = <(1096 * 4)>; - format = "a8r8g8b8"; - /* - * That's (going to be) a lot of clocks, but it's necessary due -@@ -348,6 +349,9 @@ - regulator-max-microvolt = <2960000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l7c_3p0: ldo7 { -@@ -367,6 +371,9 @@ - regulator-max-microvolt = <2960000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l10c_3p3: ldo10 { -diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi -index ef0232c2cf45b..e8cb20c4cbf22 100644 ---- a/arch/arm64/boot/dts/qcom/sm8150.dtsi -+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi -@@ -1131,7 +1131,7 @@ - clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>; - pinctrl-names = "default"; - pinctrl-0 = <&qup_i2c7_default>; -- interrupts = ; -+ interrupts = ; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; -@@ -1692,12 +1692,12 @@ - reset-names = "ufsphy"; - status = "disabled"; - -- ufs_mem_phy_lanes: lanes@1d87400 { -- reg = <0 0x01d87400 0 0x108>, -- <0 0x01d87600 0 0x1e0>, -- <0 0x01d87c00 0 0x1dc>, -- <0 0x01d87800 0 0x108>, -- <0 0x01d87a00 0 0x1e0>; -+ ufs_mem_phy_lanes: phy@1d87400 { -+ reg = <0 0x01d87400 0 0x16c>, -+ <0 0x01d87600 0 0x200>, -+ <0 0x01d87c00 0 0x200>, -+ <0 0x01d87800 0 0x16c>, -+ <0 0x01d87a00 0 0x200>; - #phy-cells = <0>; - }; - }; -@@ -3010,7 +3010,7 @@ - <&gcc GCC_USB3_PHY_PRIM_BCR>; - reset-names = "phy", "common"; - -- usb_1_ssphy: lanes@88e9200 { -+ usb_1_ssphy: phy@88e9200 { - reg = <0 0x088e9200 0 0x200>, - <0 0x088e9400 0 0x200>, - <0 0x088e9c00 0 0x218>, -@@ -3043,7 +3043,7 @@ - <&gcc GCC_USB3_PHY_SEC_BCR>; - reset-names = "phy", "common"; - -- usb_2_ssphy: lane@88eb200 { -+ usb_2_ssphy: phy@88eb200 { - reg = <0 0x088eb200 0 0x200>, - <0 0x088eb400 0 0x200>, - <0 0x088eb800 0 0x800>, -@@ -3434,9 +3434,9 @@ - qcom,tcs-offset = <0xd00>; - qcom,drv-id = <2>; - qcom,tcs-config = , -- , -- , -- ; -+ , -+ , -+ ; - - rpmhcc: clock-controller { - compatible = "qcom,sm8150-rpmh-clk"; -diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts -index 79afeb07f4a24..792911af1637b 100644 ---- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts -+++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx203.dts -@@ -13,3 +13,236 @@ - }; - - /delete-node/ &vreg_l7f_1p8; -+ -+&pm8009_gpios { -+ gpio-line-names = "NC", /* GPIO_1 */ -+ "CAM_PWR_LD_EN", -+ "WIDEC_PWR_EN", -+ "NC"; -+}; -+ -+&pm8150_gpios { -+ gpio-line-names = "VOL_DOWN_N", /* GPIO_1 */ -+ "OPTION_2", -+ "NC", -+ "PM_SLP_CLK_IN", -+ "OPTION_1", -+ "NC", -+ "NC", -+ "SP_ARI_PWR_ALARM", -+ "NC", -+ "NC"; /* GPIO_10 */ -+}; -+ -+&pm8150b_gpios { -+ gpio-line-names = "SNAPSHOT_N", /* GPIO_1 */ -+ "FOCUS_N", -+ "NC", -+ "NC", -+ "RF_LCD_ID_EN", -+ "NC", -+ "NC", -+ "LCD_ID", -+ "NC", -+ "WLC_EN_N", /* GPIO_10 */ -+ "NC", -+ "RF_ID"; -+}; -+ -+&pm8150l_gpios { -+ gpio-line-names = "NC", /* GPIO_1 */ -+ "PM3003A_EN", -+ "NC", -+ "NC", -+ "NC", -+ "AUX2_THERM", -+ "BB_HP_EN", -+ "FP_LDO_EN", -+ "PMX_RESET_N", -+ "AUX3_THERM", /* GPIO_10 */ -+ "DTV_PWR_EN", -+ "PM3003A_MODE"; -+}; -+ -+&tlmm { -+ gpio-line-names = "AP_CTI_IN", /* GPIO_0 */ -+ "MDM2AP_ERR_FATAL", -+ "AP_CTI_OUT", -+ "MDM2AP_STATUS", -+ "NFC_I2C_SDA", -+ "NFC_I2C_SCL", -+ "NFC_EN", -+ "NFC_CLK_REQ", -+ "NFC_ESE_PWR_REQ", -+ "DVDT_WRT_DET_AND", -+ "SPK_AMP_RESET_N", /* GPIO_10 */ -+ "SPK_AMP_INT_N", -+ "APPS_I2C_1_SDA", -+ "APPS_I2C_1_SCL", -+ "NC", -+ "TX_GTR_THRES_IN", -+ "HST_BT_UART_CTS", -+ "HST_BT_UART_RFR", -+ "HST_BT_UART_TX", -+ "HST_BT_UART_RX", -+ "HST_WLAN_EN", /* GPIO_20 */ -+ "HST_BT_EN", -+ "RGBC_IR_PWR_EN", -+ "FP_INT_N", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NFC_ESE_SPI_MISO", -+ "NFC_ESE_SPI_MOSI", -+ "NFC_ESE_SPI_SCLK", /* GPIO_30 */ -+ "NFC_ESE_SPI_CS_N", -+ "WCD_RST_N", -+ "NC", -+ "SDM_DEBUG_UART_TX", -+ "SDM_DEBUG_UART_RX", -+ "TS_I2C_SDA", -+ "TS_I2C_SCL", -+ "TS_INT_N", -+ "FP_SPI_MISO", /* GPIO_40 */ -+ "FP_SPI_MOSI", -+ "FP_SPI_SCLK", -+ "FP_SPI_CS_N", -+ "APPS_I2C_0_SDA", -+ "APPS_I2C_0_SCL", -+ "DISP_ERR_FG", -+ "UIM2_DETECT_EN", -+ "NC", -+ "NC", -+ "NC", /* GPIO_50 */ -+ "NC", -+ "MDM_UART_CTS", -+ "MDM_UART_RFR", -+ "MDM_UART_TX", -+ "MDM_UART_RX", -+ "AP2MDM_STATUS", -+ "AP2MDM_ERR_FATAL", -+ "MDM_IPC_HS_UART_TX", -+ "MDM_IPC_HS_UART_RX", -+ "NC", /* GPIO_60 */ -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "USB_CC_DIR", -+ "DISP_VSYNC", -+ "NC", -+ "NC", -+ "CAM_PWR_B_CS", -+ "NC", /* GPIO_70 */ -+ "CAM_PWR_A_CS", -+ "SBU_SW_SEL", -+ "SBU_SW_OE", -+ "FP_RESET_N", -+ "FP_RESET_N", -+ "DISP_RESET_N", -+ "DEBUG_GPIO0", -+ "TRAY_DET", -+ "CAM2_RST_N", -+ "PCIE0_RST_N", -+ "PCIE0_CLK_REQ_N", /* GPIO_80 */ -+ "PCIE0_WAKE_N", -+ "DVDT_ENABLE", -+ "DVDT_WRT_DET_OR", -+ "NC", -+ "PCIE2_RST_N", -+ "PCIE2_CLK_REQ_N", -+ "PCIE2_WAKE_N", -+ "MDM_VFR_IRQ0", -+ "MDM_VFR_IRQ1", -+ "SW_SERVICE", /* GPIO_90 */ -+ "CAM_SOF", -+ "CAM1_RST_N", -+ "CAM0_RST_N", -+ "CAM0_MCLK", -+ "CAM1_MCLK", -+ "CAM2_MCLK", -+ "CAM3_MCLK", -+ "CAM4_MCLK", -+ "TOF_RST_N", -+ "NC", /* GPIO_100 */ -+ "CCI0_I2C_SDA", -+ "CCI0_I2C_SCL", -+ "CCI1_I2C_SDA", -+ "CCI1_I2C_SCL_", -+ "CCI2_I2C_SDA", -+ "CCI2_I2C_SCL", -+ "CCI3_I2C_SDA", -+ "CCI3_I2C_SCL", -+ "CAM3_RST_N", -+ "NFC_DWL_REQ", /* GPIO_110 */ -+ "NFC_IRQ", -+ "XVS", -+ "NC", -+ "RF_ID_EXTENSION", -+ "SPK_AMP_I2C_SDA", -+ "SPK_AMP_I2C_SCL", -+ "NC", -+ "NC", -+ "WLC_I2C_SDA", -+ "WLC_I2C_SCL", /* GPIO_120 */ -+ "ACC_COVER_OPEN", -+ "ALS_PROX_INT_N", -+ "ACCEL_INT", -+ "WLAN_SW_CTRL", -+ "CAMSENSOR_I2C_SDA", -+ "CAMSENSOR_I2C_SCL", -+ "UDON_SWITCH_SEL", -+ "WDOG_DISABLE", -+ "BAROMETER_INT", -+ "NC", /* GPIO_130 */ -+ "NC", -+ "FORCED_USB_BOOT", -+ "NC", -+ "NC", -+ "WLC_INT_N", -+ "NC", -+ "NC", -+ "RGBC_IR_INT", -+ "NC", -+ "NC", /* GPIO_140 */ -+ "NC", -+ "BT_SLIMBUS_CLK", -+ "BT_SLIMBUS_DATA", -+ "HW_ID_0", -+ "HW_ID_1", -+ "WCD_SWR_TX_CLK", -+ "WCD_SWR_TX_DATA0", -+ "WCD_SWR_TX_DATA1", -+ "WCD_SWR_RX_CLK", -+ "WCD_SWR_RX_DATA0", /* GPIO_150 */ -+ "WCD_SWR_RX_DATA1", -+ "SDM_DMIC_CLK1", -+ "SDM_DMIC_DATA1", -+ "SDM_DMIC_CLK2", -+ "SDM_DMIC_DATA2", -+ "SPK_AMP_I2S_CLK", -+ "SPK_AMP_I2S_WS", -+ "SPK_AMP_I2S_ASP_DIN", -+ "SPK_AMP_I2S_ASP_DOUT", -+ "COMPASS_I2C_SDA", /* GPIO_160 */ -+ "COMPASS_I2C_SCL", -+ "NC", -+ "NC", -+ "SSC_SPI_1_MISO", -+ "SSC_SPI_1_MOSI", -+ "SSC_SPI_1_CLK", -+ "SSC_SPI_1_CS_N", -+ "NC", -+ "NC", -+ "SSC_SENSOR_I2C_SDA", /* GPIO_170 */ -+ "SSC_SENSOR_I2C_SCL", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "HST_BLE_SNS_UART6_TX", -+ "HST_BLE_SNS_UART6_RX", -+ "HST_WLAN_UART_TX", -+ "HST_WLAN_UART_RX"; -+}; -diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts -index 16c96e8385348..b0f4ecc911144 100644 ---- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts -+++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo-pdx206.dts -@@ -19,6 +19,8 @@ - }; - - &gpio_keys { -+ pinctrl-0 = <&focus_n &snapshot_n &vol_down_n &g_assist_n>; -+ - g-assist-key { - label = "Google Assistant Key"; - linux,code = ; -@@ -29,6 +31,247 @@ - }; - }; - -+&pm8009_gpios { -+ gpio-line-names = "NC", /* GPIO_1 */ -+ "NC", -+ "WIDEC_PWR_EN", -+ "NC"; -+}; -+ -+&pm8150_gpios { -+ gpio-line-names = "VOL_DOWN_N", /* GPIO_1 */ -+ "OPTION_2", -+ "NC", -+ "PM_SLP_CLK_IN", -+ "OPTION_1", -+ "G_ASSIST_N", -+ "NC", -+ "SP_ARI_PWR_ALARM", -+ "NC", -+ "NC"; /* GPIO_10 */ -+ -+ g_assist_n: g-assist-n-state { -+ pins = "gpio6"; -+ function = "normal"; -+ power-source = <1>; -+ bias-pull-up; -+ input-enable; -+ }; -+}; -+ -+&pm8150b_gpios { -+ gpio-line-names = "SNAPSHOT_N", /* GPIO_1 */ -+ "FOCUS_N", -+ "NC", -+ "NC", -+ "RF_LCD_ID_EN", -+ "NC", -+ "NC", -+ "LCD_ID", -+ "NC", -+ "NC", /* GPIO_10 */ -+ "NC", -+ "RF_ID"; -+}; -+ -+&pm8150l_gpios { -+ gpio-line-names = "NC", /* GPIO_1 */ -+ "PM3003A_EN", -+ "NC", -+ "NC", -+ "NC", -+ "AUX2_THERM", -+ "BB_HP_EN", -+ "FP_LDO_EN", -+ "PMX_RESET_N", -+ "NC", /* GPIO_10 */ -+ "NC", -+ "PM3003A_MODE"; -+}; -+ -+&tlmm { -+ gpio-line-names = "AP_CTI_IN", /* GPIO_0 */ -+ "MDM2AP_ERR_FATAL", -+ "AP_CTI_OUT", -+ "MDM2AP_STATUS", -+ "NFC_I2C_SDA", -+ "NFC_I2C_SCL", -+ "NFC_EN", -+ "NFC_CLK_REQ", -+ "NFC_ESE_PWR_REQ", -+ "DVDT_WRT_DET_AND", -+ "SPK_AMP_RESET_N", /* GPIO_10 */ -+ "SPK_AMP_INT_N", -+ "APPS_I2C_1_SDA", -+ "APPS_I2C_1_SCL", -+ "NC", -+ "TX_GTR_THRES_IN", -+ "HST_BT_UART_CTS", -+ "HST_BT_UART_RFR", -+ "HST_BT_UART_TX", -+ "HST_BT_UART_RX", -+ "HST_WLAN_EN", /* GPIO_20 */ -+ "HST_BT_EN", -+ "RGBC_IR_PWR_EN", -+ "FP_INT_N", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NFC_ESE_SPI_MISO", -+ "NFC_ESE_SPI_MOSI", -+ "NFC_ESE_SPI_SCLK", /* GPIO_30 */ -+ "NFC_ESE_SPI_CS_N", -+ "WCD_RST_N", -+ "NC", -+ "SDM_DEBUG_UART_TX", -+ "SDM_DEBUG_UART_RX", -+ "TS_I2C_SDA", -+ "TS_I2C_SCL", -+ "TS_INT_N", -+ "FP_SPI_MISO", /* GPIO_40 */ -+ "FP_SPI_MOSI", -+ "FP_SPI_SCLK", -+ "FP_SPI_CS_N", -+ "APPS_I2C_0_SDA", -+ "APPS_I2C_0_SCL", -+ "DISP_ERR_FG", -+ "UIM2_DETECT_EN", -+ "NC", -+ "NC", -+ "NC", /* GPIO_50 */ -+ "NC", -+ "MDM_UART_CTS", -+ "MDM_UART_RFR", -+ "MDM_UART_TX", -+ "MDM_UART_RX", -+ "AP2MDM_STATUS", -+ "AP2MDM_ERR_FATAL", -+ "MDM_IPC_HS_UART_TX", -+ "MDM_IPC_HS_UART_RX", -+ "NC", /* GPIO_60 */ -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "USB_CC_DIR", -+ "DISP_VSYNC", -+ "NC", -+ "NC", -+ "CAM_PWR_B_CS", -+ "NC", /* GPIO_70 */ -+ "FRONTC_PWR_EN", -+ "SBU_SW_SEL", -+ "SBU_SW_OE", -+ "FP_RESET_N", -+ "FP_RESET_N", -+ "DISP_RESET_N", -+ "DEBUG_GPIO0", -+ "TRAY_DET", -+ "CAM2_RST_N", -+ "PCIE0_RST_N", -+ "PCIE0_CLK_REQ_N", /* GPIO_80 */ -+ "PCIE0_WAKE_N", -+ "DVDT_ENABLE", -+ "DVDT_WRT_DET_OR", -+ "NC", -+ "PCIE2_RST_N", -+ "PCIE2_CLK_REQ_N", -+ "PCIE2_WAKE_N", -+ "MDM_VFR_IRQ0", -+ "MDM_VFR_IRQ1", -+ "SW_SERVICE", /* GPIO_90 */ -+ "CAM_SOF", -+ "CAM1_RST_N", -+ "CAM0_RST_N", -+ "CAM0_MCLK", -+ "CAM1_MCLK", -+ "CAM2_MCLK", -+ "CAM3_MCLK", -+ "NC", -+ "NC", -+ "NC", /* GPIO_100 */ -+ "CCI0_I2C_SDA", -+ "CCI0_I2C_SCL", -+ "CCI1_I2C_SDA", -+ "CCI1_I2C_SCL_", -+ "CCI2_I2C_SDA", -+ "CCI2_I2C_SCL", -+ "CCI3_I2C_SDA", -+ "CCI3_I2C_SCL", -+ "CAM3_RST_N", -+ "NFC_DWL_REQ", /* GPIO_110 */ -+ "NFC_IRQ", -+ "XVS", -+ "NC", -+ "RF_ID_EXTENSION", -+ "SPK_AMP_I2C_SDA", -+ "SPK_AMP_I2C_SCL", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "ACC_COVER_OPEN", -+ "ALS_PROX_INT_N", -+ "ACCEL_INT", -+ "WLAN_SW_CTRL", -+ "CAMSENSOR_I2C_SDA", -+ "CAMSENSOR_I2C_SCL", -+ "UDON_SWITCH_SEL", -+ "WDOG_DISABLE", -+ "BAROMETER_INT", -+ "NC", /* GPIO_130 */ -+ "NC", -+ "FORCED_USB_BOOT", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "RGBC_IR_INT", -+ "NC", -+ "NC", /* GPIO_140 */ -+ "NC", -+ "BT_SLIMBUS_CLK", -+ "BT_SLIMBUS_DATA", -+ "HW_ID_0", -+ "HW_ID_1", -+ "WCD_SWR_TX_CLK", -+ "WCD_SWR_TX_DATA0", -+ "WCD_SWR_TX_DATA1", -+ "WCD_SWR_RX_CLK", -+ "WCD_SWR_RX_DATA0", /* GPIO_150 */ -+ "WCD_SWR_RX_DATA1", -+ "SDM_DMIC_CLK1", -+ "SDM_DMIC_DATA1", -+ "SDM_DMIC_CLK2", -+ "SDM_DMIC_DATA2", -+ "SPK_AMP_I2S_CLK", -+ "SPK_AMP_I2S_WS", -+ "SPK_AMP_I2S_ASP_DIN", -+ "SPK_AMP_I2S_ASP_DOUT", -+ "COMPASS_I2C_SDA", /* GPIO_160 */ -+ "COMPASS_I2C_SCL", -+ "NC", -+ "NC", -+ "SSC_SPI_1_MISO", -+ "SSC_SPI_1_MOSI", -+ "SSC_SPI_1_CLK", -+ "SSC_SPI_1_CS_N", -+ "NC", -+ "NC", -+ "SSC_SENSOR_I2C_SDA", /* GPIO_170 */ -+ "SSC_SENSOR_I2C_SCL", -+ "NC", -+ "NC", -+ "NC", -+ "NC", -+ "HST_BLE_SNS_UART6_TX", -+ "HST_BLE_SNS_UART6_RX", -+ "HST_WLAN_UART_TX", -+ "HST_WLAN_UART_RX"; -+}; -+ - &vreg_l2f_1p3 { - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1200000>; -diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi -index d63f7a9bc4e9a..e622cbe167b0d 100644 ---- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi -+++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi -@@ -26,9 +26,10 @@ - framebuffer: framebuffer@9c000000 { - compatible = "simple-framebuffer"; - reg = <0 0x9c000000 0 0x2300000>; -- width = <1644>; -- height = <3840>; -- stride = <(1644 * 4)>; -+ /* pdx203 BL initializes in 2.5k mode, not 4k */ -+ width = <1096>; -+ height = <2560>; -+ stride = <(1096 * 4)>; - format = "a8r8g8b8"; - /* - * That's a lot of clocks, but it's necessary due -@@ -50,12 +51,26 @@ - gpio_keys: gpio-keys { - compatible = "gpio-keys"; - -- /* -- * Camera focus (light press) and camera snapshot (full press) -- * seem not to work properly.. Adding the former one stalls the CPU -- * and the latter kills the volume down key for whatever reason. In any -- * case, they are both on &pm8150b_gpios: camera focus(2), camera snapshot(1). -- */ -+ pinctrl-0 = <&focus_n &snapshot_n &vol_down_n>; -+ pinctrl-names = "default"; -+ -+ key-camera-focus { -+ label = "Camera Focus"; -+ linux,code = ; -+ gpios = <&pm8150b_gpios 2 GPIO_ACTIVE_LOW>; -+ debounce-interval = <15>; -+ linux,can-disable; -+ wakeup-source; -+ }; -+ -+ key-camera-snapshot { -+ label = "Camera Snapshot"; -+ linux,code = ; -+ gpios = <&pm8150b_gpios 1 GPIO_ACTIVE_LOW>; -+ debounce-interval = <15>; -+ linux,can-disable; -+ wakeup-source; -+ }; - - vol-down { - label = "Volume Down"; -@@ -317,6 +332,9 @@ - regulator-max-microvolt = <2960000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l7c_2p85: ldo7 { -@@ -339,6 +357,9 @@ - regulator-max-microvolt = <2960000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l10c_3p3: ldo10 { -@@ -511,6 +532,34 @@ - vdda-pll-supply = <&vreg_l9a_1p2>; - }; - -+&pm8150_gpios { -+ vol_down_n: vol-down-n-state { -+ pins = "gpio1"; -+ function = "normal"; -+ power-source = <0>; -+ bias-pull-up; -+ input-enable; -+ }; -+}; -+ -+&pm8150b_gpios { -+ snapshot_n: snapshot-n-state { -+ pins = "gpio1"; -+ function = "normal"; -+ power-source = <0>; -+ bias-pull-up; -+ input-enable; -+ }; -+ -+ focus_n: focus-n-state { -+ pins = "gpio2"; -+ function = "normal"; -+ power-source = <0>; -+ bias-pull-up; -+ input-enable; -+ }; -+}; -+ - &pon_pwrkey { - status = "okay"; - }; -@@ -585,7 +634,7 @@ - pins = "gpio39"; - function = "gpio"; - drive-strength = <2>; -- bias-disabled; -+ bias-disable; - input-enable; - }; - -diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi -index d12e4cbfc8527..5d6551e1fcd8d 100644 ---- a/arch/arm64/boot/dts/qcom/sm8250.dtsi -+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi -@@ -97,7 +97,7 @@ - reg = <0x0 0x0>; - enable-method = "psci"; - capacity-dmips-mhz = <448>; -- dynamic-power-coefficient = <205>; -+ dynamic-power-coefficient = <105>; - next-level-cache = <&L2_0>; - qcom,freq-domain = <&cpufreq_hw 0>; - #cooling-cells = <2>; -@@ -116,7 +116,7 @@ - reg = <0x0 0x100>; - enable-method = "psci"; - capacity-dmips-mhz = <448>; -- dynamic-power-coefficient = <205>; -+ dynamic-power-coefficient = <105>; - next-level-cache = <&L2_100>; - qcom,freq-domain = <&cpufreq_hw 0>; - #cooling-cells = <2>; -@@ -132,7 +132,7 @@ - reg = <0x0 0x200>; - enable-method = "psci"; - capacity-dmips-mhz = <448>; -- dynamic-power-coefficient = <205>; -+ dynamic-power-coefficient = <105>; - next-level-cache = <&L2_200>; - qcom,freq-domain = <&cpufreq_hw 0>; - #cooling-cells = <2>; -@@ -148,7 +148,7 @@ - reg = <0x0 0x300>; - enable-method = "psci"; - capacity-dmips-mhz = <448>; -- dynamic-power-coefficient = <205>; -+ dynamic-power-coefficient = <105>; - next-level-cache = <&L2_300>; - qcom,freq-domain = <&cpufreq_hw 0>; - #cooling-cells = <2>; -@@ -1393,8 +1393,8 @@ - #address-cells = <3>; - #size-cells = <2>; - -- ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>, -- <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>; -+ ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>, -+ <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>; - - interrupts = ; - interrupt-names = "msi"; -@@ -1434,11 +1434,12 @@ - phys = <&pcie0_lane>; - phy-names = "pciephy"; - -- perst-gpio = <&tlmm 79 GPIO_ACTIVE_LOW>; -- enable-gpio = <&tlmm 81 GPIO_ACTIVE_HIGH>; -+ perst-gpios = <&tlmm 79 GPIO_ACTIVE_LOW>; -+ wake-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>; - - pinctrl-names = "default"; - pinctrl-0 = <&pcie0_default_state>; -+ dma-coherent; - - status = "disabled"; - }; -@@ -1463,7 +1464,7 @@ - - status = "disabled"; - -- pcie0_lane: lanes@1c06200 { -+ pcie0_lane: phy@1c06200 { - reg = <0 0x1c06200 0 0x170>, /* tx */ - <0 0x1c06400 0 0x200>, /* rx */ - <0 0x1c06800 0 0x1f0>, /* pcs */ -@@ -1472,6 +1473,8 @@ - clock-names = "pipe0"; - - #phy-cells = <0>; -+ -+ #clock-cells = <0>; - clock-output-names = "pcie_0_pipe_clk"; - }; - }; -@@ -1492,10 +1495,10 @@ - #address-cells = <3>; - #size-cells = <2>; - -- ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>, -+ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>, - <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>; - -- interrupts = ; -+ interrupts = ; - interrupt-names = "msi"; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 0x7>; -@@ -1538,11 +1541,12 @@ - phys = <&pcie1_lane>; - phy-names = "pciephy"; - -- perst-gpio = <&tlmm 82 GPIO_ACTIVE_LOW>; -- enable-gpio = <&tlmm 84 GPIO_ACTIVE_HIGH>; -+ perst-gpios = <&tlmm 82 GPIO_ACTIVE_LOW>; -+ wake-gpios = <&tlmm 84 GPIO_ACTIVE_HIGH>; - - pinctrl-names = "default"; - pinctrl-0 = <&pcie1_default_state>; -+ dma-coherent; - - status = "disabled"; - }; -@@ -1567,7 +1571,7 @@ - - status = "disabled"; - -- pcie1_lane: lanes@1c0e200 { -+ pcie1_lane: phy@1c0e200 { - reg = <0 0x1c0e200 0 0x170>, /* tx0 */ - <0 0x1c0e400 0 0x200>, /* rx0 */ - <0 0x1c0ea00 0 0x1f0>, /* pcs */ -@@ -1578,6 +1582,8 @@ - clock-names = "pipe0"; - - #phy-cells = <0>; -+ -+ #clock-cells = <0>; - clock-output-names = "pcie_1_pipe_clk"; - }; - }; -@@ -1598,10 +1604,10 @@ - #address-cells = <3>; - #size-cells = <2>; - -- ranges = <0x01000000 0x0 0x64200000 0x0 0x64200000 0x0 0x100000>, -+ ranges = <0x01000000 0x0 0x00000000 0x0 0x64200000 0x0 0x100000>, - <0x02000000 0x0 0x64300000 0x0 0x64300000 0x0 0x3d00000>; - -- interrupts = ; -+ interrupts = ; - interrupt-names = "msi"; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 0x7>; -@@ -1644,11 +1650,12 @@ - phys = <&pcie2_lane>; - phy-names = "pciephy"; - -- perst-gpio = <&tlmm 85 GPIO_ACTIVE_LOW>; -- enable-gpio = <&tlmm 87 GPIO_ACTIVE_HIGH>; -+ perst-gpios = <&tlmm 85 GPIO_ACTIVE_LOW>; -+ wake-gpios = <&tlmm 87 GPIO_ACTIVE_HIGH>; - - pinctrl-names = "default"; - pinctrl-0 = <&pcie2_default_state>; -+ dma-coherent; - - status = "disabled"; - }; -@@ -1673,7 +1680,7 @@ - - status = "disabled"; - -- pcie2_lane: lanes@1c16200 { -+ pcie2_lane: phy@1c16200 { - reg = <0 0x1c16200 0 0x170>, /* tx0 */ - <0 0x1c16400 0 0x200>, /* rx0 */ - <0 0x1c16a00 0 0x1f0>, /* pcs */ -@@ -1684,6 +1691,8 @@ - clock-names = "pipe0"; - - #phy-cells = <0>; -+ -+ #clock-cells = <0>; - clock-output-names = "pcie_2_pipe_clk"; - }; - }; -@@ -1750,12 +1759,12 @@ - reset-names = "ufsphy"; - status = "disabled"; - -- ufs_mem_phy_lanes: lanes@1d87400 { -- reg = <0 0x01d87400 0 0x108>, -- <0 0x01d87600 0 0x1e0>, -- <0 0x01d87c00 0 0x1dc>, -- <0 0x01d87800 0 0x108>, -- <0 0x01d87a00 0 0x1e0>; -+ ufs_mem_phy_lanes: phy@1d87400 { -+ reg = <0 0x01d87400 0 0x16c>, -+ <0 0x01d87600 0 0x200>, -+ <0 0x01d87c00 0 0x200>, -+ <0 0x01d87800 0 0x16c>, -+ <0 0x01d87a00 0 0x200>; - #phy-cells = <0>; - }; - }; -@@ -1927,7 +1936,7 @@ - pins = "gpio7"; - function = "dmic1_data"; - drive-strength = <2>; -- pull-down; -+ bias-pull-down; - input-enable; - }; - }; -@@ -2300,15 +2309,11 @@ - dp_phy: dp-phy@88ea200 { - reg = <0 0x088ea200 0 0x200>, - <0 0x088ea400 0 0x200>, -- <0 0x088eac00 0 0x400>, -+ <0 0x088eaa00 0 0x200>, - <0 0x088ea600 0 0x200>, -- <0 0x088ea800 0 0x200>, -- <0 0x088eaa00 0 0x100>; -+ <0 0x088ea800 0 0x200>; - #phy-cells = <0>; - #clock-cells = <1>; -- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; -- clock-names = "pipe0"; -- clock-output-names = "usb3_phy_pipe_clk_src"; - }; - }; - -@@ -2330,7 +2335,7 @@ - <&gcc GCC_USB3_PHY_SEC_BCR>; - reset-names = "phy", "common"; - -- usb_2_ssphy: lanes@88eb200 { -+ usb_2_ssphy: phy@88eb200 { - reg = <0 0x088eb200 0 0x200>, - <0 0x088eb400 0 0x200>, - <0 0x088eb800 0 0x800>; -diff --git a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts -index 56093e260ddfd..9ea0d7233add0 100644 ---- a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts -+++ b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts -@@ -108,6 +108,9 @@ - regulator-max-microvolt = <888000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l6b_1p2: ldo6 { -@@ -116,6 +119,9 @@ - regulator-max-microvolt = <1208000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l7b_2p96: ldo7 { -@@ -124,6 +130,9 @@ - regulator-max-microvolt = <2504000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - - vreg_l9b_1p2: ldo9 { -@@ -132,6 +141,9 @@ - regulator-max-microvolt = <1200000>; - regulator-initial-mode = ; - regulator-allow-set-load; -+ regulator-allowed-modes = -+ ; - }; - }; - -diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi -index e91cd8a5e5356..b0ba63b5869d2 100644 ---- a/arch/arm64/boot/dts/qcom/sm8350.dtsi -+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi -@@ -35,6 +35,24 @@ - clock-frequency = <32000>; - #clock-cells = <0>; - }; -+ -+ ufs_phy_rx_symbol_0_clk: ufs-phy-rx-symbol-0 { -+ compatible = "fixed-clock"; -+ clock-frequency = <1000>; -+ #clock-cells = <0>; -+ }; -+ -+ ufs_phy_rx_symbol_1_clk: ufs-phy-rx-symbol-1 { -+ compatible = "fixed-clock"; -+ clock-frequency = <1000>; -+ #clock-cells = <0>; -+ }; -+ -+ ufs_phy_tx_symbol_0_clk: ufs-phy-tx-symbol-0 { -+ compatible = "fixed-clock"; -+ clock-frequency = <1000>; -+ #clock-cells = <0>; -+ }; - }; - - cpus { -@@ -43,7 +61,7 @@ - - CPU0: cpu@0 { - device_type = "cpu"; -- compatible = "qcom,kryo685"; -+ compatible = "arm,cortex-a55"; - reg = <0x0 0x0>; - enable-method = "psci"; - next-level-cache = <&L2_0>; -@@ -60,7 +78,7 @@ - - CPU1: cpu@100 { - device_type = "cpu"; -- compatible = "qcom,kryo685"; -+ compatible = "arm,cortex-a55"; - reg = <0x0 0x100>; - enable-method = "psci"; - next-level-cache = <&L2_100>; -@@ -74,7 +92,7 @@ - - CPU2: cpu@200 { - device_type = "cpu"; -- compatible = "qcom,kryo685"; -+ compatible = "arm,cortex-a55"; - reg = <0x0 0x200>; - enable-method = "psci"; - next-level-cache = <&L2_200>; -@@ -88,7 +106,7 @@ - - CPU3: cpu@300 { - device_type = "cpu"; -- compatible = "qcom,kryo685"; -+ compatible = "arm,cortex-a55"; - reg = <0x0 0x300>; - enable-method = "psci"; - next-level-cache = <&L2_300>; -@@ -102,7 +120,7 @@ - - CPU4: cpu@400 { - device_type = "cpu"; -- compatible = "qcom,kryo685"; -+ compatible = "arm,cortex-a78"; - reg = <0x0 0x400>; - enable-method = "psci"; - next-level-cache = <&L2_400>; -@@ -116,7 +134,7 @@ - - CPU5: cpu@500 { - device_type = "cpu"; -- compatible = "qcom,kryo685"; -+ compatible = "arm,cortex-a78"; - reg = <0x0 0x500>; - enable-method = "psci"; - next-level-cache = <&L2_500>; -@@ -131,7 +149,7 @@ - - CPU6: cpu@600 { - device_type = "cpu"; -- compatible = "qcom,kryo685"; -+ compatible = "arm,cortex-a78"; - reg = <0x0 0x600>; - enable-method = "psci"; - next-level-cache = <&L2_600>; -@@ -145,7 +163,7 @@ - - CPU7: cpu@700 { - device_type = "cpu"; -- compatible = "qcom,kryo685"; -+ compatible = "arm,cortex-x1"; - reg = <0x0 0x700>; - enable-method = "psci"; - next-level-cache = <&L2_700>; -@@ -443,8 +461,30 @@ - #clock-cells = <1>; - #reset-cells = <1>; - #power-domain-cells = <1>; -- clock-names = "bi_tcxo", "sleep_clk"; -- clocks = <&rpmhcc RPMH_CXO_CLK>, <&sleep_clk>; -+ clock-names = "bi_tcxo", -+ "sleep_clk", -+ "pcie_0_pipe_clk", -+ "pcie_1_pipe_clk", -+ "ufs_card_rx_symbol_0_clk", -+ "ufs_card_rx_symbol_1_clk", -+ "ufs_card_tx_symbol_0_clk", -+ "ufs_phy_rx_symbol_0_clk", -+ "ufs_phy_rx_symbol_1_clk", -+ "ufs_phy_tx_symbol_0_clk", -+ "usb3_phy_wrapper_gcc_usb30_pipe_clk", -+ "usb3_uni_phy_sec_gcc_usb30_pipe_clk"; -+ clocks = <&rpmhcc RPMH_CXO_CLK>, -+ <&sleep_clk>, -+ <0>, -+ <0>, -+ <0>, -+ <0>, -+ <0>, -+ <&ufs_phy_rx_symbol_0_clk>, -+ <&ufs_phy_rx_symbol_1_clk>, -+ <&ufs_phy_tx_symbol_0_clk>, -+ <0>, -+ <0>; - }; - - ipcc: mailbox@408000 { -@@ -696,6 +736,8 @@ - interconnect-names = "memory", - "config"; - -+ qcom,qmp = <&aoss_qmp>; -+ - qcom,smem-states = <&ipa_smp2p_out 0>, - <&ipa_smp2p_out 1>; - qcom,smem-state-names = "ipa-clock-enabled-valid", -@@ -939,7 +981,7 @@ - qcom,tcs-offset = <0xd00>; - qcom,drv-id = <2>; - qcom,tcs-config = , , -- , ; -+ , ; - - rpmhcc: clock-controller { - compatible = "qcom,sm8350-rpmh-clk"; -@@ -1010,6 +1052,13 @@ - <0 0x18593000 0 0x1000>; - reg-names = "freq-domain0", "freq-domain1", "freq-domain2"; - -+ interrupts = , -+ , -+ ; -+ interrupt-names = "dcvsh-irq-0", -+ "dcvsh-irq-1", -+ "dcvsh-irq-2"; -+ - clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>; - clock-names = "xo", "alternate"; - -@@ -1060,14 +1109,14 @@ - <75000000 300000000>, - <0 0>, - <0 0>, -- <75000000 300000000>, -- <75000000 300000000>; -+ <0 0>, -+ <0 0>; - status = "disabled"; - }; - - ufs_mem_phy: phy@1d87000 { - compatible = "qcom,sm8350-qmp-ufs-phy"; -- reg = <0 0x01d87000 0 0xe10>; -+ reg = <0 0x01d87000 0 0x1c4>; - #address-cells = <2>; - #size-cells = <2>; - #clock-cells = <1>; -@@ -1081,12 +1130,12 @@ - reset-names = "ufsphy"; - status = "disabled"; - -- ufs_mem_phy_lanes: lanes@1d87400 { -- reg = <0 0x01d87400 0 0x108>, -- <0 0x01d87600 0 0x1e0>, -- <0 0x01d87c00 0 0x1dc>, -- <0 0x01d87800 0 0x108>, -- <0 0x01d87a00 0 0x1e0>; -+ ufs_mem_phy_lanes: phy@1d87400 { -+ reg = <0 0x01d87400 0 0x188>, -+ <0 0x01d87600 0 0x200>, -+ <0 0x01d87c00 0 0x200>, -+ <0 0x01d87800 0 0x188>, -+ <0 0x01d87a00 0 0x200>; - #phy-cells = <0>; - #clock-cells = <0>; - }; -@@ -2185,7 +2234,7 @@ - }; - }; - -- camera-thermal-bottom { -+ cam-thermal-bottom { - polling-delay-passive = <250>; - polling-delay = <1000>; - -diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi -index 2692cc64bff61..f1ab4943c295c 100644 ---- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi -+++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi -@@ -146,7 +146,7 @@ - }; - }; - -- reg_audio: regulator_audio { -+ reg_audio: regulator-audio { - compatible = "regulator-fixed"; - regulator-name = "audio-1.8V"; - regulator-min-microvolt = <1800000>; -@@ -174,7 +174,7 @@ - vin-supply = <®_lcd>; - }; - -- reg_cam0: regulator_camera { -+ reg_cam0: regulator-cam0 { - compatible = "regulator-fixed"; - regulator-name = "reg_cam0"; - regulator-min-microvolt = <1800000>; -@@ -183,7 +183,7 @@ - enable-active-high; - }; - -- reg_cam1: regulator_camera { -+ reg_cam1: regulator-cam1 { - compatible = "regulator-fixed"; - regulator-name = "reg_cam1"; - regulator-min-microvolt = <1800000>; -@@ -432,20 +432,6 @@ - }; - }; - -- /* 0 - lcd_reset */ -- /* 1 - lcd_pwr */ -- /* 2 - lcd_select */ -- /* 3 - backlight-enable */ -- /* 4 - Touch_shdwn */ -- /* 5 - LCD_H_pol */ -- /* 6 - lcd_V_pol */ -- gpio_exp1: gpio@20 { -- compatible = "onnn,pca9654"; -- reg = <0x20>; -- gpio-controller; -- #gpio-cells = <2>; -- }; -- - touchscreen@26 { - compatible = "ilitek,ili2117"; - reg = <0x26>; -@@ -477,6 +463,16 @@ - }; - }; - }; -+ -+ gpio_exp1: gpio@70 { -+ compatible = "nxp,pca9538"; -+ reg = <0x70>; -+ gpio-controller; -+ #gpio-cells = <2>; -+ gpio-line-names = "lcd_reset", "lcd_pwr", "lcd_select", -+ "backlight-enable", "Touch_shdwn", -+ "LCD_H_pol", "lcd_V_pol"; -+ }; - }; - - &lvds0 { -diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi -index 090dc9c4f57b5..937d17a426b66 100644 ---- a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi -+++ b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi -@@ -50,6 +50,7 @@ - &avb { - pinctrl-0 = <&avb_pins>; - pinctrl-names = "default"; -+ phy-mode = "rgmii-rxid"; - phy-handle = <&phy0>; - rx-internal-delay-ps = <1800>; - tx-internal-delay-ps = <2000>; -diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi -index 801ea54b027c4..20f8adc635e72 100644 ---- a/arch/arm64/boot/dts/renesas/cat875.dtsi -+++ b/arch/arm64/boot/dts/renesas/cat875.dtsi -@@ -18,6 +18,7 @@ - pinctrl-names = "default"; - renesas,no-ether-link; - phy-handle = <&phy0>; -+ phy-mode = "rgmii-id"; - status = "okay"; - - phy0: ethernet-phy@0 { -diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi -index 6f4fffacfca21..e70aa5a087402 100644 ---- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi -@@ -2784,7 +2784,7 @@ - }; - - thermal-zones { -- sensor_thermal1: sensor-thermal1 { -+ sensor1_thermal: sensor1-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 0>; -@@ -2799,7 +2799,7 @@ - }; - }; - -- sensor_thermal2: sensor-thermal2 { -+ sensor2_thermal: sensor2-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 1>; -@@ -2814,7 +2814,7 @@ - }; - }; - -- sensor_thermal3: sensor-thermal3 { -+ sensor3_thermal: sensor3-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 2>; -diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi -index 0f7bdfc90a0dc..6c5694fa66900 100644 ---- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi -@@ -2629,7 +2629,7 @@ - }; - - thermal-zones { -- sensor_thermal1: sensor-thermal1 { -+ sensor1_thermal: sensor1-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 0>; -@@ -2644,7 +2644,7 @@ - }; - }; - -- sensor_thermal2: sensor-thermal2 { -+ sensor2_thermal: sensor2-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 1>; -@@ -2659,7 +2659,7 @@ - }; - }; - -- sensor_thermal3: sensor-thermal3 { -+ sensor3_thermal: sensor3-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 2>; -diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi -index d597772c4c37e..50189209b6605 100644 ---- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi -@@ -49,17 +49,14 @@ - opp-shared; - opp-800000000 { - opp-hz = /bits/ 64 <800000000>; -- opp-microvolt = <820000>; - clock-latency-ns = <300000>; - }; - opp-1000000000 { - opp-hz = /bits/ 64 <1000000000>; -- opp-microvolt = <820000>; - clock-latency-ns = <300000>; - }; - opp-1200000000 { - opp-hz = /bits/ 64 <1200000000>; -- opp-microvolt = <820000>; - clock-latency-ns = <300000>; - opp-suspend; - }; -@@ -1953,7 +1950,7 @@ - cpu-thermal { - polling-delay-passive = <250>; - polling-delay = <0>; -- thermal-sensors = <&thermal 0>; -+ thermal-sensors = <&thermal>; - sustainable-power = <717>; - - cooling-maps { -diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi -index 379a1300272ba..62209ab6deb9a 100644 ---- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi -@@ -2904,7 +2904,7 @@ - }; - - thermal-zones { -- sensor_thermal1: sensor-thermal1 { -+ sensor1_thermal: sensor1-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 0>; -@@ -2919,7 +2919,7 @@ - }; - }; - -- sensor_thermal2: sensor-thermal2 { -+ sensor2_thermal: sensor2-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 1>; -@@ -2934,7 +2934,7 @@ - }; - }; - -- sensor_thermal3: sensor-thermal3 { -+ sensor3_thermal: sensor3-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 2>; -diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi -index 1768a3e6bb8da..193d81be40fc4 100644 ---- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi -@@ -3375,7 +3375,7 @@ - }; - - thermal-zones { -- sensor_thermal1: sensor-thermal1 { -+ sensor1_thermal: sensor1-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 0>; -@@ -3390,7 +3390,7 @@ - }; - }; - -- sensor_thermal2: sensor-thermal2 { -+ sensor2_thermal: sensor2-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 1>; -@@ -3405,7 +3405,7 @@ - }; - }; - -- sensor_thermal3: sensor-thermal3 { -+ sensor3_thermal: sensor3-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 2>; -diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi -index 2bd8169735d35..b526e4f0ee6a8 100644 ---- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi -@@ -2972,7 +2972,7 @@ - }; - - thermal-zones { -- sensor_thermal1: sensor-thermal1 { -+ sensor1_thermal: sensor1-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 0>; -@@ -2987,7 +2987,7 @@ - }; - }; - -- sensor_thermal2: sensor-thermal2 { -+ sensor2_thermal: sensor2-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 1>; -@@ -3002,7 +3002,7 @@ - }; - }; - -- sensor_thermal3: sensor-thermal3 { -+ sensor3_thermal: sensor3-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 2>; -diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi -index 041473aa5cd09..21fc95397c3c2 100644 ---- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi -@@ -2719,7 +2719,7 @@ - }; - - thermal-zones { -- sensor_thermal1: sensor-thermal1 { -+ sensor1_thermal: sensor1-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 0>; -@@ -2734,7 +2734,7 @@ - }; - }; - -- sensor_thermal2: sensor-thermal2 { -+ sensor2_thermal: sensor2-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 1>; -@@ -2749,7 +2749,7 @@ - }; - }; - -- sensor_thermal3: sensor-thermal3 { -+ sensor3_thermal: sensor3-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 2>; -diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi -index 08df75606430b..f9679a4dd85fa 100644 ---- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi -@@ -2784,7 +2784,7 @@ - }; - - thermal-zones { -- sensor_thermal1: sensor-thermal1 { -+ sensor1_thermal: sensor1-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 0>; -@@ -2799,7 +2799,7 @@ - }; - }; - -- sensor_thermal2: sensor-thermal2 { -+ sensor2_thermal: sensor2-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 1>; -@@ -2814,7 +2814,7 @@ - }; - }; - -- sensor_thermal3: sensor-thermal3 { -+ sensor3_thermal: sensor3-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 2>; -diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi -index 6347d15e66b64..21fe602bd25af 100644 ---- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi -@@ -1580,7 +1580,7 @@ - }; - - thermal-zones { -- thermal-sensor-1 { -+ sensor1_thermal: sensor1-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 0>; -@@ -1599,7 +1599,7 @@ - }; - }; - -- thermal-sensor-2 { -+ sensor2_thermal: sensor2-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 1>; -diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi -index 0ea300a8147d0..adcb03fa23148 100644 ---- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi -@@ -60,17 +60,14 @@ - opp-shared; - opp-800000000 { - opp-hz = /bits/ 64 <800000000>; -- opp-microvolt = <820000>; - clock-latency-ns = <300000>; - }; - opp-1000000000 { - opp-hz = /bits/ 64 <1000000000>; -- opp-microvolt = <820000>; - clock-latency-ns = <300000>; - }; - opp-1200000000 { - opp-hz = /bits/ 64 <1200000000>; -- opp-microvolt = <820000>; - clock-latency-ns = <300000>; - opp-suspend; - }; -@@ -2102,7 +2099,7 @@ - cpu-thermal { - polling-delay-passive = <250>; - polling-delay = <0>; -- thermal-sensors = <&thermal 0>; -+ thermal-sensors = <&thermal>; - sustainable-power = <717>; - - cooling-maps { -diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi -index 631d520cebee5..26899fb768a73 100644 ---- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi -@@ -1149,7 +1149,7 @@ - }; - - thermal-zones { -- sensor_thermal1: sensor-thermal1 { -+ sensor1_thermal: sensor1-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 0>; -@@ -1163,7 +1163,7 @@ - }; - }; - -- sensor_thermal2: sensor-thermal2 { -+ sensor2_thermal: sensor2-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 1>; -@@ -1177,7 +1177,7 @@ - }; - }; - -- sensor_thermal3: sensor-thermal3 { -+ sensor3_thermal: sensor3-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 2>; -@@ -1191,7 +1191,7 @@ - }; - }; - -- sensor_thermal4: sensor-thermal4 { -+ sensor4_thermal: sensor4-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 3>; -@@ -1205,7 +1205,7 @@ - }; - }; - -- sensor_thermal5: sensor-thermal5 { -+ sensor5_thermal: sensor5-thermal { - polling-delay-passive = <250>; - polling-delay = <1000>; - thermal-sensors = <&tsc 4>; -diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi -index 61bd4df09df0d..26cb5f14f9c7a 100644 ---- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi -+++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi -@@ -270,7 +270,7 @@ - }; - - scif1_pins: scif1 { -- groups = "scif1_data_b", "scif1_ctrl"; -+ groups = "scif1_data_b"; - function = "scif1"; - }; - -@@ -330,7 +330,6 @@ - &scif1 { - pinctrl-0 = <&scif1_pins>; - pinctrl-names = "default"; -- uart-has-rtscts; - - status = "okay"; - }; -diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi -index 7249871530ab9..5eecbefa8a336 100644 ---- a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi -+++ b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi -@@ -2,8 +2,8 @@ - /* - * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd - * Copyright (c) 2020 Engicam srl -- * Copyright (c) 2020 Amarula Solutons -- * Copyright (c) 2020 Amarula Solutons(India) -+ * Copyright (c) 2020 Amarula Solutions -+ * Copyright (c) 2020 Amarula Solutions(India) - */ - - #include -diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi -index 248ebb61aa790..5200d0bbd9e9c 100644 ---- a/arch/arm64/boot/dts/rockchip/px30.dtsi -+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi -@@ -711,7 +711,7 @@ - clock-names = "pclk", "timer"; - }; - -- dmac: dmac@ff240000 { -+ dmac: dma-controller@ff240000 { - compatible = "arm,pl330", "arm,primecell"; - reg = <0x0 0xff240000 0x0 0x4000>; - interrupts = , -diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts -index 665b2e69455dd..7ea48167747c6 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts -@@ -19,7 +19,7 @@ - stdout-path = "serial2:1500000n8"; - }; - -- ir_rx { -+ ir-receiver { - compatible = "gpio-ir-receiver"; - gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; -@@ -97,7 +97,7 @@ - regulator-max-microvolt = <3300000>; - regulator-always-on; - regulator-boot-on; -- vim-supply = <&vcc_io>; -+ vin-supply = <&vcc_io>; - }; - - vdd_core: vdd-core { -diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts -index aa22a0c222655..5d5d9574088ca 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts -@@ -96,7 +96,6 @@ - linux,default-trigger = "heartbeat"; - gpios = <&rk805 1 GPIO_ACTIVE_LOW>; - default-state = "on"; -- mode = <0x23>; - }; - - user_led: led-1 { -@@ -104,7 +103,6 @@ - linux,default-trigger = "mmc1"; - gpios = <&rk805 0 GPIO_ACTIVE_LOW>; - default-state = "off"; -- mode = <0x05>; - }; - }; - }; -diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi -index 8c821acb21ffb..3cbe83e6fb9a4 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi -@@ -489,7 +489,7 @@ - status = "disabled"; - }; - -- dmac: dmac@ff1f0000 { -+ dmac: dma-controller@ff1f0000 { - compatible = "arm,pl330", "arm,primecell"; - reg = <0x0 0xff1f0000 0x0 0x4000>; - interrupts = , -@@ -599,7 +599,7 @@ - - gpu: gpu@ff300000 { - compatible = "rockchip,rk3328-mali", "arm,mali-450"; -- reg = <0x0 0xff300000 0x0 0x40000>; -+ reg = <0x0 0xff300000 0x0 0x30000>; - interrupts = , - , - , -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts -index c4dd2a6b48368..f81ce3240342c 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts -@@ -770,8 +770,8 @@ - sd-uhs-sdr104; - - /* Power supply */ -- vqmmc-supply = &vcc1v8_s3; /* IO line */ -- vmmc-supply = &vcc_sdio; /* card's power */ -+ vqmmc-supply = <&vcc1v8_s3>; /* IO line */ -+ vmmc-supply = <&vcc_sdio>; /* card's power */ - - #address-cells = <1>; - #size-cells = <0>; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts -index e6c1c94c8d69c..07737b65d7a3d 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts -@@ -87,3 +87,8 @@ - }; - }; - }; -+ -+&wlan_host_wake_l { -+ /* Kevin has an external pull up, but Bob does not. */ -+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>; -+}; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi -index 1384dabbdf406..739937f70f8d0 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi -@@ -237,6 +237,14 @@ - &edp { - status = "okay"; - -+ /* -+ * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only -+ * set this here, because rk3399-gru.dtsi ensures we can generate this -+ * off GPLL=600MHz, whereas some other RK3399 boards may not. -+ */ -+ assigned-clocks = <&cru PCLK_EDP>; -+ assigned-clock-rates = <24000000>; -+ - ports { - edp_out: port@1 { - reg = <1>; -@@ -395,6 +403,7 @@ ap_i2c_tp: &i2c5 { - }; - - wlan_host_wake_l: wlan-host-wake-l { -+ /* Kevin has an external pull up, but Bob does not */ - rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>; - }; - }; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi -index c1bcc8ca3769d..2f8e117109699 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi -@@ -286,7 +286,7 @@ - - sound: sound { - compatible = "rockchip,rk3399-gru-sound"; -- rockchip,cpu = <&i2s0 &i2s2>; -+ rockchip,cpu = <&i2s0 &spdif>; - }; - }; - -@@ -437,10 +437,6 @@ ap_i2c_audio: &i2c8 { - status = "okay"; - }; - --&i2s2 { -- status = "okay"; --}; -- - &io_domains { - status = "okay"; - -@@ -537,6 +533,17 @@ ap_i2c_audio: &i2c8 { - vqmmc-supply = <&ppvar_sd_card_io>; - }; - -+&spdif { -+ status = "okay"; -+ -+ /* -+ * SPDIF is routed internally to DP; we either don't use these pins, or -+ * mux them to something else. -+ */ -+ /delete-property/ pinctrl-0; -+ /delete-property/ pinctrl-names; -+}; -+ - &spi1 { - status = "okay"; - -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi -index d5c7648c841dc..f1fcc6b5b402c 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi -@@ -705,7 +705,6 @@ - &sdhci { - bus-width = <8>; - mmc-hs400-1_8v; -- mmc-hs400-enhanced-strobe; - non-removable; - status = "okay"; - }; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts -index 738cfd21df3ef..354f54767bad8 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts -@@ -269,6 +269,7 @@ - clock-output-names = "xin32k", "rk808-clkout2"; - pinctrl-names = "default"; - pinctrl-0 = <&pmic_int_l>; -+ rockchip,system-power-controller; - vcc1-supply = <&vcc5v0_sys>; - vcc2-supply = <&vcc5v0_sys>; - vcc3-supply = <&vcc5v0_sys>; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts -index 7c93f840bc64f..e890166e7fd43 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts -@@ -55,7 +55,7 @@ - regulator-boot-on; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; -- vim-supply = <&vcc3v3_sys>; -+ vin-supply = <&vcc3v3_sys>; - }; - - vcc3v3_sys: vcc3v3-sys { -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts -index 2b5f001ff4a61..9e5d07f5712e6 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts -@@ -385,10 +385,6 @@ - }; - }; - --&cdn_dp { -- status = "okay"; --}; -- - &cpu_b0 { - cpu-supply = <&vdd_cpu_b>; - }; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts -index 292bb7e80cf35..f07f4b8231f91 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts -@@ -207,7 +207,7 @@ - cap-sd-highspeed; - cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>; - disable-wp; -- max-frequency = <150000000>; -+ max-frequency = <40000000>; - pinctrl-names = "default"; - pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; - vmmc-supply = <&vcc3v3_baseboard>; -@@ -232,6 +232,7 @@ - - &usbdrd_dwc3_0 { - dr_mode = "otg"; -+ extcon = <&extcon_usb3>; - status = "okay"; - }; - -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi -index fb67db4619ea0..7b27079fd6116 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi -@@ -25,6 +25,13 @@ - }; - }; - -+ extcon_usb3: extcon-usb3 { -+ compatible = "linux,extcon-usb-gpio"; -+ id-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&usb3_id>; -+ }; -+ - clkin_gmac: external-gmac-clock { - compatible = "fixed-clock"; - clock-frequency = <125000000>; -@@ -55,7 +62,6 @@ - vcc5v0_host: vcc5v0-host-regulator { - compatible = "regulator-fixed"; - gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; -- enable-active-low; - pinctrl-names = "default"; - pinctrl-0 = <&vcc5v0_host_en>; - regulator-name = "vcc5v0_host"; -@@ -422,9 +428,22 @@ - <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>; - }; - }; -+ -+ usb3 { -+ usb3_id: usb3-id { -+ rockchip,pins = -+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>; -+ }; -+ }; - }; - - &sdhci { -+ /* -+ * Signal integrity isn't great at 200MHz but 100MHz has proven stable -+ * enough. -+ */ -+ max-frequency = <100000000>; -+ - bus-width = <8>; - mmc-hs400-1_8v; - mmc-hs400-enhanced-strobe; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi -index b28888ea9262e..8b70e831aff23 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi -@@ -446,7 +446,6 @@ - &i2s1 { - rockchip,playback-channels = <2>; - rockchip,capture-channels = <2>; -- status = "okay"; - }; - - &i2s2 { -@@ -457,7 +456,7 @@ - status = "okay"; - - bt656-supply = <&vcc_3v0>; -- audio-supply = <&vcc_3v0>; -+ audio-supply = <&vcc1v8_codec>; - sdmmc-supply = <&vcc_sdio>; - gpio1830-supply = <&vcc_3v0>; - }; -@@ -596,9 +595,9 @@ - }; - - &sdhci { -+ max-frequency = <150000000>; - bus-width = <8>; -- mmc-hs400-1_8v; -- mmc-hs400-enhanced-strobe; -+ mmc-hs200-1_8v; - non-removable; - status = "okay"; - }; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi -index 3871c7fd83b00..4255e2d7a72fc 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi -@@ -1477,6 +1477,7 @@ - reg = <0xf780 0x24>; - clocks = <&sdhci>; - clock-names = "emmcclk"; -+ drive-impedance-ohm = <50>; - #phy-cells = <0>; - status = "disabled"; - }; -@@ -1487,7 +1488,6 @@ - clock-names = "refclk"; - #phy-cells = <1>; - resets = <&cru SRST_PCIEPHY>; -- drive-impedance-ohm = <50>; - reset-names = "phy"; - status = "disabled"; - }; -@@ -1802,10 +1802,10 @@ - interrupts = ; - clocks = <&cru PCLK_HDMI_CTRL>, - <&cru SCLK_HDMI_SFR>, -- <&cru PLL_VPLL>, -+ <&cru SCLK_HDMI_CEC>, - <&cru PCLK_VIO_GRF>, -- <&cru SCLK_HDMI_CEC>; -- clock-names = "iahb", "isfr", "vpll", "grf", "cec"; -+ <&cru PLL_VPLL>; -+ clock-names = "iahb", "isfr", "cec", "grf", "vpll"; - power-domains = <&power RK3399_PD_HDCP>; - reg-io-width = <4>; - rockchip,grf = <&grf>; -diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi -index be97da1322580..ba75adedbf79b 100644 ---- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi -+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi -@@ -599,8 +599,8 @@ - compatible = "socionext,uniphier-dwc3", "snps,dwc3"; - status = "disabled"; - reg = <0x65a00000 0xcd00>; -- interrupt-names = "host", "peripheral"; -- interrupts = <0 134 4>, <0 135 4>; -+ interrupt-names = "dwc_usb3"; -+ interrupts = <0 134 4>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usb0>, <&pinctrl_usb2>; - clock-names = "ref", "bus_early", "suspend"; -@@ -701,8 +701,8 @@ - compatible = "socionext,uniphier-dwc3", "snps,dwc3"; - status = "disabled"; - reg = <0x65c00000 0xcd00>; -- interrupt-names = "host", "peripheral"; -- interrupts = <0 137 4>, <0 138 4>; -+ interrupt-names = "dwc_usb3"; -+ interrupts = <0 137 4>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usb1>, <&pinctrl_usb3>; - clock-names = "ref", "bus_early", "suspend"; -diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi -index 42d1d219a3fd2..d195b97ab2eef 100644 ---- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi -@@ -59,7 +59,10 @@ - #interrupt-cells = <3>; - interrupt-controller; - reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ -- <0x00 0x01840000 0x00 0xC0000>; /* GICR */ -+ <0x00 0x01840000 0x00 0xC0000>, /* GICR */ -+ <0x01 0x00000000 0x00 0x2000>, /* GICC */ -+ <0x01 0x00010000 0x00 0x1000>, /* GICH */ -+ <0x01 0x00020000 0x00 0x2000>; /* GICV */ - /* - * vcpumntirq: - * virtual CPU interface maintenance interrupt -@@ -453,13 +456,11 @@ - clock-names = "clk_ahb", "clk_xin"; - mmc-ddr-1_8v; - mmc-hs200-1_8v; -- mmc-hs400-1_8v; - ti,trm-icp = <0x2>; - ti,otap-del-sel-legacy = <0x0>; - ti,otap-del-sel-mmc-hs = <0x0>; - ti,otap-del-sel-ddr52 = <0x6>; - ti,otap-del-sel-hs200 = <0x7>; -- ti,otap-del-sel-hs400 = <0x4>; - }; - - sdhci1: mmc@fa00000 { -diff --git a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi -index 59cc58f7d0c87..93e684bbd66cd 100644 ---- a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi -@@ -10,7 +10,6 @@ - compatible = "ti,am64-uart", "ti,am654-uart"; - reg = <0x00 0x04a00000 0x00 0x100>; - interrupts = ; -- clock-frequency = <48000000>; - current-speed = <115200>; - power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>; - clocks = <&k3_clks 149 0>; -@@ -21,7 +20,6 @@ - compatible = "ti,am64-uart", "ti,am654-uart"; - reg = <0x00 0x04a10000 0x00 0x100>; - interrupts = ; -- clock-frequency = <48000000>; - current-speed = <115200>; - power-domains = <&k3_pds 160 TI_SCI_PD_EXCLUSIVE>; - clocks = <&k3_clks 160 0>; -diff --git a/arch/arm64/boot/dts/ti/k3-am64.dtsi b/arch/arm64/boot/dts/ti/k3-am64.dtsi -index de6805b0c72c1..e589c58f60885 100644 ---- a/arch/arm64/boot/dts/ti/k3-am64.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-am64.dtsi -@@ -85,6 +85,7 @@ - <0x00 0x68000000 0x00 0x68000000 0x00 0x08000000>, /* PCIe DAT0 */ - <0x00 0x70000000 0x00 0x70000000 0x00 0x00200000>, /* OC SRAM */ - <0x00 0x78000000 0x00 0x78000000 0x00 0x00800000>, /* Main R5FSS */ -+ <0x01 0x00000000 0x01 0x00000000 0x00 0x00310000>, /* A53 PERIPHBASE */ - <0x06 0x00000000 0x06 0x00000000 0x01 0x00000000>, /* PCIe DAT1 */ - <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS0 DAT3 */ - -diff --git a/arch/arm64/boot/dts/ti/k3-am642.dtsi b/arch/arm64/boot/dts/ti/k3-am642.dtsi -index e2b397c884018..8a76f4821b11b 100644 ---- a/arch/arm64/boot/dts/ti/k3-am642.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-am642.dtsi -@@ -60,6 +60,6 @@ - cache-level = <2>; - cache-size = <0x40000>; - cache-line-size = <64>; -- cache-sets = <512>; -+ cache-sets = <256>; - }; - }; -diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi -index ba4e5d3e1ed7a..4f232f575ab2a 100644 ---- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi -@@ -35,7 +35,10 @@ - #interrupt-cells = <3>; - interrupt-controller; - reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ -- <0x00 0x01880000 0x00 0x90000>; /* GICR */ -+ <0x00 0x01880000 0x00 0x90000>, /* GICR */ -+ <0x00 0x6f000000 0x00 0x2000>, /* GICC */ -+ <0x00 0x6f010000 0x00 0x1000>, /* GICH */ -+ <0x00 0x6f020000 0x00 0x2000>; /* GICV */ - /* - * vcpumntirq: - * virtual CPU interface maintenance interrupt -@@ -117,7 +120,6 @@ - dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, - <&main_udmap 0x4001>; - dma-names = "tx", "rx1", "rx2"; -- dma-coherent; - - rng: rng@4e10000 { - compatible = "inside-secure,safexcel-eip76"; -diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi -index a9fc1af03f27f..1607db9b32dd2 100644 ---- a/arch/arm64/boot/dts/ti/k3-am65.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi -@@ -84,6 +84,7 @@ - <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, - <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, - <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>, -+ <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A53 PERIPHBASE */ - <0x00 0x70000000 0x00 0x70000000 0x00 0x200000>, - <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>, - <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>; -diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts -index d14f3c18b65fc..ee244df75eaea 100644 ---- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts -+++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts -@@ -77,28 +77,28 @@ - }; - }; - --&wkup_pmx0 { -+&wkup_pmx2 { - mcu_cpsw_pins_default: mcu-cpsw-pins-default { - pinctrl-single,pins = < -- J721E_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */ -- J721E_WKUP_IOPAD(0x006c, PIN_INPUT, 0) /* MCU_RGMII1_RX_CTL */ -- J721E_WKUP_IOPAD(0x0070, PIN_OUTPUT, 0) /* MCU_RGMII1_TD3 */ -- J721E_WKUP_IOPAD(0x0074, PIN_OUTPUT, 0) /* MCU_RGMII1_TD2 */ -- J721E_WKUP_IOPAD(0x0078, PIN_OUTPUT, 0) /* MCU_RGMII1_TD1 */ -- J721E_WKUP_IOPAD(0x007c, PIN_OUTPUT, 0) /* MCU_RGMII1_TD0 */ -- J721E_WKUP_IOPAD(0x0088, PIN_INPUT, 0) /* MCU_RGMII1_RD3 */ -- J721E_WKUP_IOPAD(0x008c, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */ -- J721E_WKUP_IOPAD(0x0090, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */ -- J721E_WKUP_IOPAD(0x0094, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */ -- J721E_WKUP_IOPAD(0x0080, PIN_OUTPUT, 0) /* MCU_RGMII1_TXC */ -- J721E_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* MCU_RGMII1_RXC */ -+ J721E_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */ -+ J721E_WKUP_IOPAD(0x0004, PIN_INPUT, 0) /* MCU_RGMII1_RX_CTL */ -+ J721E_WKUP_IOPAD(0x0008, PIN_OUTPUT, 0) /* MCU_RGMII1_TD3 */ -+ J721E_WKUP_IOPAD(0x000c, PIN_OUTPUT, 0) /* MCU_RGMII1_TD2 */ -+ J721E_WKUP_IOPAD(0x0010, PIN_OUTPUT, 0) /* MCU_RGMII1_TD1 */ -+ J721E_WKUP_IOPAD(0x0014, PIN_OUTPUT, 0) /* MCU_RGMII1_TD0 */ -+ J721E_WKUP_IOPAD(0x0020, PIN_INPUT, 0) /* MCU_RGMII1_RD3 */ -+ J721E_WKUP_IOPAD(0x0024, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */ -+ J721E_WKUP_IOPAD(0x0028, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */ -+ J721E_WKUP_IOPAD(0x002c, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */ -+ J721E_WKUP_IOPAD(0x0018, PIN_OUTPUT, 0) /* MCU_RGMII1_TXC */ -+ J721E_WKUP_IOPAD(0x001c, PIN_INPUT, 0) /* MCU_RGMII1_RXC */ - >; - }; - - mcu_mdio_pins_default: mcu-mdio1-pins-default { - pinctrl-single,pins = < -- J721E_WKUP_IOPAD(0x009c, PIN_OUTPUT, 0) /* (L1) MCU_MDIO0_MDC */ -- J721E_WKUP_IOPAD(0x0098, PIN_INPUT, 0) /* (L4) MCU_MDIO0_MDIO */ -+ J721E_WKUP_IOPAD(0x0034, PIN_OUTPUT, 0) /* (L1) MCU_MDIO0_MDC */ -+ J721E_WKUP_IOPAD(0x0030, PIN_INPUT, 0) /* (L4) MCU_MDIO0_MDIO */ - >; - }; - }; -@@ -131,15 +131,17 @@ - >; - }; - -- main_usbss0_pins_default: main-usbss0-pins-default { -+ vdd_sd_dv_pins_default: vdd-sd-dv-pins-default { - pinctrl-single,pins = < -- J721E_IOPAD(0x120, PIN_OUTPUT, 0) /* (T4) USB0_DRVVBUS */ -+ J721E_IOPAD(0xd0, PIN_OUTPUT, 7) /* (T5) SPI0_D1.GPIO0_55 */ - >; - }; -+}; - -- vdd_sd_dv_pins_default: vdd-sd-dv-pins-default { -+&main_pmx1 { -+ main_usbss0_pins_default: main-usbss0-pins-default { - pinctrl-single,pins = < -- J721E_IOPAD(0xd0, PIN_OUTPUT, 7) /* (T5) SPI0_D1.GPIO0_55 */ -+ J721E_IOPAD(0x04, PIN_OUTPUT, 0) /* (T4) USB0_DRVVBUS */ - >; - }; - }; -diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi -index e8a41d09b45f2..b1df17525dea5 100644 ---- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi -@@ -32,7 +32,7 @@ - #size-cells = <1>; - ranges = <0x00 0x00 0x00100000 0x1c000>; - -- serdes_ln_ctrl: serdes-ln-ctrl@4080 { -+ serdes_ln_ctrl: mux-controller@4080 { - compatible = "mmio-mux"; - #mux-control-cells = <1>; - mux-reg-masks = <0x4080 0x3>, <0x4084 0x3>, /* SERDES0 lane0/1 select */ -@@ -54,7 +54,10 @@ - #interrupt-cells = <3>; - interrupt-controller; - reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ -- <0x00 0x01900000 0x00 0x100000>; /* GICR */ -+ <0x00 0x01900000 0x00 0x100000>, /* GICR */ -+ <0x00 0x6f000000 0x00 0x2000>, /* GICC */ -+ <0x00 0x6f010000 0x00 0x1000>, /* GICH */ -+ <0x00 0x6f020000 0x00 0x2000>; /* GICV */ - - /* vcpumntirq: virtual CPU interface maintenance interrupt */ - interrupts = ; -@@ -292,7 +295,16 @@ - main_pmx0: pinctrl@11c000 { - compatible = "pinctrl-single"; - /* Proxy 0 addressing */ -- reg = <0x00 0x11c000 0x00 0x2b4>; -+ reg = <0x00 0x11c000 0x00 0x10c>; -+ #pinctrl-cells = <1>; -+ pinctrl-single,register-width = <32>; -+ pinctrl-single,function-mask = <0xffffffff>; -+ }; -+ -+ main_pmx1: pinctrl@11c11c { -+ compatible = "pinctrl-single"; -+ /* Proxy 0 addressing */ -+ reg = <0x00 0x11c11c 0x00 0xc>; - #pinctrl-cells = <1>; - pinctrl-single,register-width = <32>; - pinctrl-single,function-mask = <0xffffffff>; -@@ -606,10 +618,10 @@ - clock-names = "fck"; - #address-cells = <3>; - #size-cells = <2>; -- bus-range = <0x0 0xf>; -+ bus-range = <0x0 0xff>; - cdns,no-bar-match-nbits = <64>; -- vendor-id = /bits/ 16 <0x104c>; -- device-id = /bits/ 16 <0xb00f>; -+ vendor-id = <0x104c>; -+ device-id = <0xb00f>; - msi-map = <0x0 &gic_its 0x0 0x10000>; - dma-coherent; - ranges = <0x01000000 0x0 0x18001000 0x00 0x18001000 0x0 0x0010000>, -diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi -index 1044ec6c4b0d4..8185c1627c6f1 100644 ---- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi -@@ -56,7 +56,34 @@ - wkup_pmx0: pinctrl@4301c000 { - compatible = "pinctrl-single"; - /* Proxy 0 addressing */ -- reg = <0x00 0x4301c000 0x00 0x178>; -+ reg = <0x00 0x4301c000 0x00 0x34>; -+ #pinctrl-cells = <1>; -+ pinctrl-single,register-width = <32>; -+ pinctrl-single,function-mask = <0xffffffff>; -+ }; -+ -+ wkup_pmx1: pinctrl@0x4301c038 { -+ compatible = "pinctrl-single"; -+ /* Proxy 0 addressing */ -+ reg = <0x00 0x4301c038 0x00 0x8>; -+ #pinctrl-cells = <1>; -+ pinctrl-single,register-width = <32>; -+ pinctrl-single,function-mask = <0xffffffff>; -+ }; -+ -+ wkup_pmx2: pinctrl@0x4301c068 { -+ compatible = "pinctrl-single"; -+ /* Proxy 0 addressing */ -+ reg = <0x00 0x4301c068 0x00 0xec>; -+ #pinctrl-cells = <1>; -+ pinctrl-single,register-width = <32>; -+ pinctrl-single,function-mask = <0xffffffff>; -+ }; -+ -+ wkup_pmx3: pinctrl@0x4301c174 { -+ compatible = "pinctrl-single"; -+ /* Proxy 0 addressing */ -+ reg = <0x00 0x4301c174 0x00 0x20>; - #pinctrl-cells = <1>; - pinctrl-single,register-width = <32>; - pinctrl-single,function-mask = <0xffffffff>; -diff --git a/arch/arm64/boot/dts/ti/k3-j7200.dtsi b/arch/arm64/boot/dts/ti/k3-j7200.dtsi -index b7005b8031495..afe99f3920ccd 100644 ---- a/arch/arm64/boot/dts/ti/k3-j7200.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-j7200.dtsi -@@ -60,7 +60,7 @@ - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; -- d-cache-sets = <128>; -+ d-cache-sets = <256>; - next-level-cache = <&L2_0>; - }; - -@@ -74,7 +74,7 @@ - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; -- d-cache-sets = <128>; -+ d-cache-sets = <256>; - next-level-cache = <&L2_0>; - }; - }; -@@ -84,7 +84,7 @@ - cache-level = <2>; - cache-size = <0x100000>; - cache-line-size = <64>; -- cache-sets = <2048>; -+ cache-sets = <1024>; - next-level-cache = <&msmc_l3>; - }; - -@@ -127,6 +127,7 @@ - <0x00 0x00a40000 0x00 0x00a40000 0x00 0x00000800>, /* timesync router */ - <0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */ - <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */ -+ <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */ - <0x00 0x70000000 0x00 0x70000000 0x00 0x00800000>, /* MSMC RAM */ - <0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */ - <0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */ -diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi -index cf3482376c1e6..d662eeb7d80a7 100644 ---- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi -@@ -42,7 +42,7 @@ - #size-cells = <1>; - ranges = <0x0 0x0 0x00100000 0x1c000>; - -- serdes_ln_ctrl: mux@4080 { -+ serdes_ln_ctrl: mux-controller@4080 { - compatible = "mmio-mux"; - reg = <0x00004080 0x50>; - #mux-control-cells = <1>; -@@ -76,7 +76,10 @@ - #interrupt-cells = <3>; - interrupt-controller; - reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ -- <0x00 0x01900000 0x00 0x100000>; /* GICR */ -+ <0x00 0x01900000 0x00 0x100000>, /* GICR */ -+ <0x00 0x6f000000 0x00 0x2000>, /* GICC */ -+ <0x00 0x6f010000 0x00 0x1000>, /* GICH */ -+ <0x00 0x6f020000 0x00 0x2000>; /* GICV */ - - /* vcpumntirq: virtual CPU interface maintenance interrupt */ - interrupts = ; -@@ -333,7 +336,6 @@ - dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, - <&main_udmap 0x4001>; - dma-names = "tx", "rx1", "rx2"; -- dma-coherent; - - rng: rng@4e10000 { - compatible = "inside-secure,safexcel-eip76"; -@@ -610,7 +612,7 @@ - clock-names = "fck"; - #address-cells = <3>; - #size-cells = <2>; -- bus-range = <0x0 0xf>; -+ bus-range = <0x0 0xff>; - vendor-id = <0x104c>; - device-id = <0xb00d>; - msi-map = <0x0 &gic_its 0x0 0x10000>; -@@ -636,7 +638,7 @@ - clocks = <&k3_clks 239 1>; - clock-names = "fck"; - max-functions = /bits/ 8 <6>; -- max-virtual-functions = /bits/ 16 <4 4 4 4 0 0>; -+ max-virtual-functions = /bits/ 8 <4 4 4 4 0 0>; - dma-coherent; - }; - -@@ -658,7 +660,7 @@ - clock-names = "fck"; - #address-cells = <3>; - #size-cells = <2>; -- bus-range = <0x0 0xf>; -+ bus-range = <0x0 0xff>; - vendor-id = <0x104c>; - device-id = <0xb00d>; - msi-map = <0x0 &gic_its 0x10000 0x10000>; -@@ -684,7 +686,7 @@ - clocks = <&k3_clks 240 1>; - clock-names = "fck"; - max-functions = /bits/ 8 <6>; -- max-virtual-functions = /bits/ 16 <4 4 4 4 0 0>; -+ max-virtual-functions = /bits/ 8 <4 4 4 4 0 0>; - dma-coherent; - }; - -@@ -706,7 +708,7 @@ - clock-names = "fck"; - #address-cells = <3>; - #size-cells = <2>; -- bus-range = <0x0 0xf>; -+ bus-range = <0x0 0xff>; - vendor-id = <0x104c>; - device-id = <0xb00d>; - msi-map = <0x0 &gic_its 0x20000 0x10000>; -@@ -732,7 +734,7 @@ - clocks = <&k3_clks 241 1>; - clock-names = "fck"; - max-functions = /bits/ 8 <6>; -- max-virtual-functions = /bits/ 16 <4 4 4 4 0 0>; -+ max-virtual-functions = /bits/ 8 <4 4 4 4 0 0>; - dma-coherent; - }; - -@@ -754,7 +756,7 @@ - clock-names = "fck"; - #address-cells = <3>; - #size-cells = <2>; -- bus-range = <0x0 0xf>; -+ bus-range = <0x0 0xff>; - vendor-id = <0x104c>; - device-id = <0xb00d>; - msi-map = <0x0 &gic_its 0x30000 0x10000>; -@@ -780,7 +782,7 @@ - clocks = <&k3_clks 242 1>; - clock-names = "fck"; - max-functions = /bits/ 8 <6>; -- max-virtual-functions = /bits/ 16 <4 4 4 4 0 0>; -+ max-virtual-functions = /bits/ 8 <4 4 4 4 0 0>; - dma-coherent; - #address-cells = <2>; - #size-cells = <2>; -@@ -1049,7 +1051,6 @@ - ti,itap-del-sel-mmc-hs = <0xa>; - ti,itap-del-sel-ddr52 = <0x3>; - ti,trm-icp = <0x8>; -- ti,strobe-sel = <0x77>; - dma-coherent; - }; - -diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi -index f0587fde147e6..2cd8883de5b53 100644 ---- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi -+++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi -@@ -61,7 +61,7 @@ - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; -- d-cache-sets = <128>; -+ d-cache-sets = <256>; - next-level-cache = <&L2_0>; - }; - -@@ -75,7 +75,7 @@ - i-cache-sets = <256>; - d-cache-size = <0x8000>; - d-cache-line-size = <64>; -- d-cache-sets = <128>; -+ d-cache-sets = <256>; - next-level-cache = <&L2_0>; - }; - }; -@@ -85,7 +85,7 @@ - cache-level = <2>; - cache-size = <0x100000>; - cache-line-size = <64>; -- cache-sets = <2048>; -+ cache-sets = <1024>; - next-level-cache = <&msmc_l3>; - }; - -@@ -136,6 +136,7 @@ - <0x00 0x0e000000 0x00 0x0e000000 0x00 0x01800000>, /* PCIe Core*/ - <0x00 0x10000000 0x00 0x10000000 0x00 0x10000000>, /* PCIe DAT */ - <0x00 0x64800000 0x00 0x64800000 0x00 0x00800000>, /* C71 */ -+ <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */ - <0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT */ - <0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT */ - <0x4d 0x80800000 0x4d 0x80800000 0x00 0x00800000>, /* C66_0 */ -diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts -index 4a86efa32d687..f7124e15f0ff6 100644 ---- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts -+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts -@@ -131,7 +131,7 @@ - reg = <0>; - - partition@0 { -- label = "data"; -+ label = "spi0-data"; - reg = <0x0 0x100000>; - }; - }; -@@ -149,7 +149,7 @@ - reg = <0>; - - partition@0 { -- label = "data"; -+ label = "spi1-data"; - reg = <0x0 0x84000>; - }; - }; -diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi -index 28dccb891a535..8278876ad33fa 100644 ---- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi -+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi -@@ -792,7 +792,7 @@ - }; - - uart0: serial@ff000000 { -- compatible = "cdns,uart-r1p12", "xlnx,xuartps"; -+ compatible = "xlnx,zynqmp-uart", "cdns,uart-r1p12"; - status = "disabled"; - interrupt-parent = <&gic>; - interrupts = <0 21 4>; -@@ -802,7 +802,7 @@ - }; - - uart1: serial@ff010000 { -- compatible = "cdns,uart-r1p12", "xlnx,xuartps"; -+ compatible = "xlnx,zynqmp-uart", "cdns,uart-r1p12"; - status = "disabled"; - interrupt-parent = <&gic>; - interrupts = <0 22 4>; -diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig -index 545197bc05013..4972a81d40d60 100644 ---- a/arch/arm64/configs/defconfig -+++ b/arch/arm64/configs/defconfig -@@ -921,7 +921,7 @@ CONFIG_DMADEVICES=y - CONFIG_DMA_BCM2835=y - CONFIG_DMA_SUN6I=m - CONFIG_FSL_EDMA=y --CONFIG_IMX_SDMA=y -+CONFIG_IMX_SDMA=m - CONFIG_K3_DMA=y - CONFIG_MV_XOR=y - CONFIG_MV_XOR_V2=y -diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig -index 55f19450091b2..1a5406e599bab 100644 ---- a/arch/arm64/crypto/Kconfig -+++ b/arch/arm64/crypto/Kconfig -@@ -59,6 +59,7 @@ config CRYPTO_GHASH_ARM64_CE - select CRYPTO_HASH - select CRYPTO_GF128MUL - select CRYPTO_LIB_AES -+ select CRYPTO_AEAD - - config CRYPTO_CRCT10DIF_ARM64_CE - tristate "CRCT10DIF digest algorithm using PMULL instructions" -diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c -index 9c3d86e397bf3..1fae18ba11ed1 100644 ---- a/arch/arm64/crypto/poly1305-glue.c -+++ b/arch/arm64/crypto/poly1305-glue.c -@@ -52,7 +52,7 @@ static void neon_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - { - if (unlikely(!dctx->sset)) { - if (!dctx->rset) { -- poly1305_init_arch(dctx, src); -+ poly1305_init_arm64(&dctx->h, src); - src += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - dctx->rset = 1; -diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h -index 4ad22c3135dbb..5a0f792492af0 100644 ---- a/arch/arm64/include/asm/arch_gicv3.h -+++ b/arch/arm64/include/asm/arch_gicv3.h -@@ -26,12 +26,6 @@ - * sets the GP register's most significant bits to 0 with an explicit cast. - */ - --static inline void gic_write_eoir(u32 irq) --{ -- write_sysreg_s(irq, SYS_ICC_EOIR1_EL1); -- isb(); --} -- - static __always_inline void gic_write_dir(u32 irq) - { - write_sysreg_s(irq, SYS_ICC_DIR_EL1); -diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h -index bfa58409a4d4d..448a575db8e8e 100644 ---- a/arch/arm64/include/asm/assembler.h -+++ b/arch/arm64/include/asm/assembler.h -@@ -107,6 +107,13 @@ - hint #20 - .endm - -+/* -+ * Clear Branch History instruction -+ */ -+ .macro clearbhb -+ hint #22 -+ .endm -+ - /* - * Speculation barrier - */ -@@ -830,4 +837,50 @@ alternative_endif - - #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */ - -+ .macro __mitigate_spectre_bhb_loop tmp -+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -+alternative_cb spectre_bhb_patch_loop_iter -+ mov \tmp, #32 // Patched to correct the immediate -+alternative_cb_end -+.Lspectre_bhb_loop\@: -+ b . + 4 -+ subs \tmp, \tmp, #1 -+ b.ne .Lspectre_bhb_loop\@ -+ sb -+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ -+ .endm -+ -+ .macro mitigate_spectre_bhb_loop tmp -+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -+alternative_cb spectre_bhb_patch_loop_mitigation_enable -+ b .L_spectre_bhb_loop_done\@ // Patched to NOP -+alternative_cb_end -+ __mitigate_spectre_bhb_loop \tmp -+.L_spectre_bhb_loop_done\@: -+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ -+ .endm -+ -+ /* Save/restores x0-x3 to the stack */ -+ .macro __mitigate_spectre_bhb_fw -+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -+ stp x0, x1, [sp, #-16]! -+ stp x2, x3, [sp, #-16]! -+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 -+alternative_cb smccc_patch_fw_mitigation_conduit -+ nop // Patched to SMC/HVC #0 -+alternative_cb_end -+ ldp x2, x3, [sp], #16 -+ ldp x0, x1, [sp], #16 -+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ -+ .endm -+ -+ .macro mitigate_spectre_bhb_clear_insn -+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -+alternative_cb spectre_bhb_patch_clearbhb -+ /* Patched to NOP when not supported */ -+ clearbhb -+ isb -+alternative_cb_end -+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ -+ .endm - #endif /* __ASM_ASSEMBLER_H */ -diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h -index 13869b76b58cd..abd302e521c06 100644 ---- a/arch/arm64/include/asm/atomic_ll_sc.h -+++ b/arch/arm64/include/asm/atomic_ll_sc.h -@@ -12,19 +12,6 @@ - - #include - --#ifdef CONFIG_ARM64_LSE_ATOMICS --#define __LL_SC_FALLBACK(asm_ops) \ --" b 3f\n" \ --" .subsection 1\n" \ --"3:\n" \ --asm_ops "\n" \ --" b 4f\n" \ --" .previous\n" \ --"4:\n" --#else --#define __LL_SC_FALLBACK(asm_ops) asm_ops --#endif -- - #ifndef CONFIG_CC_HAS_K_CONSTRAINT - #define K - #endif -@@ -43,12 +30,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \ - int result; \ - \ - asm volatile("// atomic_" #op "\n" \ -- __LL_SC_FALLBACK( \ --" prfm pstl1strm, %2\n" \ --"1: ldxr %w0, %2\n" \ --" " #asm_op " %w0, %w0, %w3\n" \ --" stxr %w1, %w0, %2\n" \ --" cbnz %w1, 1b\n") \ -+ " prfm pstl1strm, %2\n" \ -+ "1: ldxr %w0, %2\n" \ -+ " " #asm_op " %w0, %w0, %w3\n" \ -+ " stxr %w1, %w0, %2\n" \ -+ " cbnz %w1, 1b\n" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : __stringify(constraint) "r" (i)); \ - } -@@ -61,13 +47,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ - int result; \ - \ - asm volatile("// atomic_" #op "_return" #name "\n" \ -- __LL_SC_FALLBACK( \ --" prfm pstl1strm, %2\n" \ --"1: ld" #acq "xr %w0, %2\n" \ --" " #asm_op " %w0, %w0, %w3\n" \ --" st" #rel "xr %w1, %w0, %2\n" \ --" cbnz %w1, 1b\n" \ --" " #mb ) \ -+ " prfm pstl1strm, %2\n" \ -+ "1: ld" #acq "xr %w0, %2\n" \ -+ " " #asm_op " %w0, %w0, %w3\n" \ -+ " st" #rel "xr %w1, %w0, %2\n" \ -+ " cbnz %w1, 1b\n" \ -+ " " #mb \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : __stringify(constraint) "r" (i) \ - : cl); \ -@@ -83,13 +68,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ - int val, result; \ - \ - asm volatile("// atomic_fetch_" #op #name "\n" \ -- __LL_SC_FALLBACK( \ --" prfm pstl1strm, %3\n" \ --"1: ld" #acq "xr %w0, %3\n" \ --" " #asm_op " %w1, %w0, %w4\n" \ --" st" #rel "xr %w2, %w1, %3\n" \ --" cbnz %w2, 1b\n" \ --" " #mb ) \ -+ " prfm pstl1strm, %3\n" \ -+ "1: ld" #acq "xr %w0, %3\n" \ -+ " " #asm_op " %w1, %w0, %w4\n" \ -+ " st" #rel "xr %w2, %w1, %3\n" \ -+ " cbnz %w2, 1b\n" \ -+ " " #mb \ - : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : __stringify(constraint) "r" (i) \ - : cl); \ -@@ -142,12 +126,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ - unsigned long tmp; \ - \ - asm volatile("// atomic64_" #op "\n" \ -- __LL_SC_FALLBACK( \ --" prfm pstl1strm, %2\n" \ --"1: ldxr %0, %2\n" \ --" " #asm_op " %0, %0, %3\n" \ --" stxr %w1, %0, %2\n" \ --" cbnz %w1, 1b") \ -+ " prfm pstl1strm, %2\n" \ -+ "1: ldxr %0, %2\n" \ -+ " " #asm_op " %0, %0, %3\n" \ -+ " stxr %w1, %0, %2\n" \ -+ " cbnz %w1, 1b" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : __stringify(constraint) "r" (i)); \ - } -@@ -160,13 +143,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ - unsigned long tmp; \ - \ - asm volatile("// atomic64_" #op "_return" #name "\n" \ -- __LL_SC_FALLBACK( \ --" prfm pstl1strm, %2\n" \ --"1: ld" #acq "xr %0, %2\n" \ --" " #asm_op " %0, %0, %3\n" \ --" st" #rel "xr %w1, %0, %2\n" \ --" cbnz %w1, 1b\n" \ --" " #mb ) \ -+ " prfm pstl1strm, %2\n" \ -+ "1: ld" #acq "xr %0, %2\n" \ -+ " " #asm_op " %0, %0, %3\n" \ -+ " st" #rel "xr %w1, %0, %2\n" \ -+ " cbnz %w1, 1b\n" \ -+ " " #mb \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : __stringify(constraint) "r" (i) \ - : cl); \ -@@ -176,19 +158,18 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ - - #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ - static inline long \ --__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ -+__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ - { \ - s64 result, val; \ - unsigned long tmp; \ - \ - asm volatile("// atomic64_fetch_" #op #name "\n" \ -- __LL_SC_FALLBACK( \ --" prfm pstl1strm, %3\n" \ --"1: ld" #acq "xr %0, %3\n" \ --" " #asm_op " %1, %0, %4\n" \ --" st" #rel "xr %w2, %1, %3\n" \ --" cbnz %w2, 1b\n" \ --" " #mb ) \ -+ " prfm pstl1strm, %3\n" \ -+ "1: ld" #acq "xr %0, %3\n" \ -+ " " #asm_op " %1, %0, %4\n" \ -+ " st" #rel "xr %w2, %1, %3\n" \ -+ " cbnz %w2, 1b\n" \ -+ " " #mb \ - : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : __stringify(constraint) "r" (i) \ - : cl); \ -@@ -240,15 +221,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) - unsigned long tmp; - - asm volatile("// atomic64_dec_if_positive\n" -- __LL_SC_FALLBACK( --" prfm pstl1strm, %2\n" --"1: ldxr %0, %2\n" --" subs %0, %0, #1\n" --" b.lt 2f\n" --" stlxr %w1, %0, %2\n" --" cbnz %w1, 1b\n" --" dmb ish\n" --"2:") -+ " prfm pstl1strm, %2\n" -+ "1: ldxr %0, %2\n" -+ " subs %0, %0, #1\n" -+ " b.lt 2f\n" -+ " stlxr %w1, %0, %2\n" -+ " cbnz %w1, 1b\n" -+ " dmb ish\n" -+ "2:" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : - : "cc", "memory"); -@@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ - old = (u##sz)old; \ - \ - asm volatile( \ -- __LL_SC_FALLBACK( \ - " prfm pstl1strm, %[v]\n" \ - "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ - " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ -@@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ - " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ - " cbnz %w[tmp], 1b\n" \ - " " #mb "\n" \ -- "2:") \ -+ "2:" \ - : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ - [v] "+Q" (*(u##sz *)ptr) \ - : [old] __stringify(constraint) "r" (old), [new] "r" (new) \ -@@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \ - unsigned long tmp, ret; \ - \ - asm volatile("// __cmpxchg_double" #name "\n" \ -- __LL_SC_FALLBACK( \ - " prfm pstl1strm, %2\n" \ - "1: ldxp %0, %1, %2\n" \ - " eor %0, %0, %3\n" \ -@@ -336,8 +314,8 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \ - " st" #rel "xp %w0, %5, %6, %2\n" \ - " cbnz %w0, 1b\n" \ - " " #mb "\n" \ -- "2:") \ -- : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ -+ "2:" \ -+ : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \ - : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ - : cl); \ - \ -diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h -index da3280f639cd7..28e96118c1e5a 100644 ---- a/arch/arm64/include/asm/atomic_lse.h -+++ b/arch/arm64/include/asm/atomic_lse.h -@@ -11,11 +11,11 @@ - #define __ASM_ATOMIC_LSE_H - - #define ATOMIC_OP(op, asm_op) \ --static inline void __lse_atomic_##op(int i, atomic_t *v) \ -+static inline void __lse_atomic_##op(int i, atomic_t *v) \ - { \ - asm volatile( \ - __LSE_PREAMBLE \ --" " #asm_op " %w[i], %[v]\n" \ -+ " " #asm_op " %w[i], %[v]\n" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v)); \ - } -@@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ - { \ - asm volatile( \ - __LSE_PREAMBLE \ --" " #asm_op #mb " %w[i], %w[i], %[v]" \ -+ " " #asm_op #mb " %w[i], %w[i], %[v]" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ -@@ -130,7 +130,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ - " add %w[i], %w[i], %w[tmp]" \ - : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ - : "r" (v) \ -- : cl); \ -+ : cl); \ - \ - return i; \ - } -@@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ - { \ - asm volatile( \ - __LSE_PREAMBLE \ --" " #asm_op " %[i], %[v]\n" \ -+ " " #asm_op " %[i], %[v]\n" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v)); \ - } -@@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ - { \ - asm volatile( \ - __LSE_PREAMBLE \ --" " #asm_op #mb " %[i], %[i], %[v]" \ -+ " " #asm_op #mb " %[i], %[i], %[v]" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ - : "r" (v) \ - : cl); \ -@@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) - } - - #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ --static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ -+static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\ - { \ - unsigned long tmp; \ - \ -@@ -403,7 +403,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \ - " eor %[old2], %[old2], %[oldval2]\n" \ - " orr %[old1], %[old1], %[old2]" \ - : [old1] "+&r" (x0), [old2] "+&r" (x1), \ -- [v] "+Q" (*(unsigned long *)ptr) \ -+ [v] "+Q" (*(__uint128_t *)ptr) \ - : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ - [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ - : cl); \ -diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h -index 451e11e5fd23b..1c5a005984582 100644 ---- a/arch/arm64/include/asm/barrier.h -+++ b/arch/arm64/include/asm/barrier.h -@@ -23,7 +23,7 @@ - #define dsb(opt) asm volatile("dsb " #opt : : : "memory") - - #define psb_csync() asm volatile("hint #17" : : : "memory") --#define tsb_csync() asm volatile("hint #18" : : : "memory") -+#define __tsb_csync() asm volatile("hint #18" : : : "memory") - #define csdb() asm volatile("hint #20" : : : "memory") - - #ifdef CONFIG_ARM64_PSEUDO_NMI -@@ -46,6 +46,20 @@ - #define dma_rmb() dmb(oshld) - #define dma_wmb() dmb(oshst) - -+ -+#define tsb_csync() \ -+ do { \ -+ /* \ -+ * CPUs affected by Arm Erratum 2054223 or 2067961 needs \ -+ * another TSB to ensure the trace is flushed. The barriers \ -+ * don't have to be strictly back to back, as long as the \ -+ * CPU is in trace prohibited state. \ -+ */ \ -+ if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \ -+ __tsb_csync(); \ -+ __tsb_csync(); \ -+ } while (0) -+ - /* - * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz - * and 0 otherwise. -diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h -index 0f6d16faa5402..a58e366f0b074 100644 ---- a/arch/arm64/include/asm/cpu.h -+++ b/arch/arm64/include/asm/cpu.h -@@ -51,6 +51,7 @@ struct cpuinfo_arm64 { - u64 reg_id_aa64dfr1; - u64 reg_id_aa64isar0; - u64 reg_id_aa64isar1; -+ u64 reg_id_aa64isar2; - u64 reg_id_aa64mmfr0; - u64 reg_id_aa64mmfr1; - u64 reg_id_aa64mmfr2; -diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h -index ef6be92b1921a..a77b5f49b3a6c 100644 ---- a/arch/arm64/include/asm/cpufeature.h -+++ b/arch/arm64/include/asm/cpufeature.h -@@ -637,6 +637,35 @@ static inline bool cpu_supports_mixed_endian_el0(void) - return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); - } - -+ -+static inline bool supports_csv2p3(int scope) -+{ -+ u64 pfr0; -+ u8 csv2_val; -+ -+ if (scope == SCOPE_LOCAL_CPU) -+ pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); -+ else -+ pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); -+ -+ csv2_val = cpuid_feature_extract_unsigned_field(pfr0, -+ ID_AA64PFR0_CSV2_SHIFT); -+ return csv2_val == 3; -+} -+ -+static inline bool supports_clearbhb(int scope) -+{ -+ u64 isar2; -+ -+ if (scope == SCOPE_LOCAL_CPU) -+ isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1); -+ else -+ isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); -+ -+ return cpuid_feature_extract_unsigned_field(isar2, -+ ID_AA64ISAR2_CLEARBHB_SHIFT); -+} -+ - const struct cpumask *system_32bit_el0_cpumask(void); - DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); - -diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h -index 6231e1f0abe7e..9cf5d9551e991 100644 ---- a/arch/arm64/include/asm/cputype.h -+++ b/arch/arm64/include/asm/cputype.h -@@ -41,7 +41,7 @@ - (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) - - #define MIDR_CPU_MODEL(imp, partnum) \ -- (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ -+ ((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \ - (0xf << MIDR_ARCHITECTURE_SHIFT) | \ - ((partnum) << MIDR_PARTNUM_SHIFT)) - -@@ -60,6 +60,7 @@ - #define ARM_CPU_IMP_FUJITSU 0x46 - #define ARM_CPU_IMP_HISI 0x48 - #define ARM_CPU_IMP_APPLE 0x61 -+#define ARM_CPU_IMP_AMPERE 0xC0 - - #define ARM_CPU_PART_AEM_V8 0xD0F - #define ARM_CPU_PART_FOUNDATION 0xD00 -@@ -73,6 +74,15 @@ - #define ARM_CPU_PART_CORTEX_A76 0xD0B - #define ARM_CPU_PART_NEOVERSE_N1 0xD0C - #define ARM_CPU_PART_CORTEX_A77 0xD0D -+#define ARM_CPU_PART_NEOVERSE_V1 0xD40 -+#define ARM_CPU_PART_CORTEX_A78 0xD41 -+#define ARM_CPU_PART_CORTEX_A78AE 0xD42 -+#define ARM_CPU_PART_CORTEX_X1 0xD44 -+#define ARM_CPU_PART_CORTEX_A510 0xD46 -+#define ARM_CPU_PART_CORTEX_A710 0xD47 -+#define ARM_CPU_PART_CORTEX_X2 0xD48 -+#define ARM_CPU_PART_NEOVERSE_N2 0xD49 -+#define ARM_CPU_PART_CORTEX_A78C 0xD4B - - #define APM_CPU_PART_POTENZA 0x000 - -@@ -103,6 +113,8 @@ - #define APPLE_CPU_PART_M1_ICESTORM 0x022 - #define APPLE_CPU_PART_M1_FIRESTORM 0x023 - -+#define AMPERE_CPU_PART_AMPERE1 0xAC3 -+ - #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) - #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) - #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) -@@ -113,6 +125,15 @@ - #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) - #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) - #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) -+#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) -+#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) -+#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) -+#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) -+#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) -+#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) -+#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) -+#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) -+#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) - #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) - #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) - #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) -@@ -133,6 +154,7 @@ - #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) - #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) - #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) -+#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) - - /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ - #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX -diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h -index 657c921fd784a..8de1a840ad974 100644 ---- a/arch/arm64/include/asm/debug-monitors.h -+++ b/arch/arm64/include/asm/debug-monitors.h -@@ -76,7 +76,7 @@ struct task_struct; - - struct step_hook { - struct list_head node; -- int (*fn)(struct pt_regs *regs, unsigned int esr); -+ int (*fn)(struct pt_regs *regs, unsigned long esr); - }; - - void register_user_step_hook(struct step_hook *hook); -@@ -87,7 +87,7 @@ void unregister_kernel_step_hook(struct step_hook *hook); - - struct break_hook { - struct list_head node; -- int (*fn)(struct pt_regs *regs, unsigned int esr); -+ int (*fn)(struct pt_regs *regs, unsigned long esr); - u16 imm; - u16 mask; /* These bits are ignored when comparing with imm */ - }; -@@ -116,6 +116,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs, - void kernel_enable_single_step(struct pt_regs *regs); - void kernel_disable_single_step(void); - int kernel_active_single_step(void); -+void kernel_rewind_single_step(struct pt_regs *regs); - - #ifdef CONFIG_HAVE_HW_BREAKPOINT - int reinstall_suspended_bps(struct pt_regs *regs); -diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h -index d3e1825337be3..53cbbb96f7ebf 100644 ---- a/arch/arm64/include/asm/efi.h -+++ b/arch/arm64/include/asm/efi.h -@@ -14,7 +14,6 @@ - - #ifdef CONFIG_EFI - extern void efi_init(void); --extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); - #else - #define efi_init() - #endif -@@ -26,6 +25,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); - ({ \ - efi_virtmap_load(); \ - __efi_fpsimd_begin(); \ -+ raw_spin_lock(&efi_rt_lock); \ - }) - - #define arch_efi_call_virt(p, f, args...) \ -@@ -37,10 +37,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); - - #define arch_efi_call_virt_teardown() \ - ({ \ -+ raw_spin_unlock(&efi_rt_lock); \ - __efi_fpsimd_end(); \ - efi_virtmap_unload(); \ - }) - -+extern raw_spinlock_t efi_rt_lock; - efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); - - #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) -diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h -index 3198acb2aad8c..7f3c87f7a0cec 100644 ---- a/arch/arm64/include/asm/el2_setup.h -+++ b/arch/arm64/include/asm/el2_setup.h -@@ -106,7 +106,7 @@ - msr_s SYS_ICC_SRE_EL2, x0 - isb // Make sure SRE is now set - mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, -- tbz x0, #0, 1f // and check that it sticks -+ tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks - msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults - .Lskip_gicv3_\@: - .endm -diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h -index 29f97eb3dad41..9f91c8906edd9 100644 ---- a/arch/arm64/include/asm/esr.h -+++ b/arch/arm64/include/asm/esr.h -@@ -68,6 +68,7 @@ - #define ESR_ELx_EC_MAX (0x3F) - - #define ESR_ELx_EC_SHIFT (26) -+#define ESR_ELx_EC_WIDTH (6) - #define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT) - #define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT) - -@@ -323,14 +324,14 @@ - #ifndef __ASSEMBLY__ - #include - --static inline bool esr_is_data_abort(u32 esr) -+static inline bool esr_is_data_abort(unsigned long esr) - { -- const u32 ec = ESR_ELx_EC(esr); -+ const unsigned long ec = ESR_ELx_EC(esr); - - return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR; - } - --const char *esr_get_class_string(u32 esr); -+const char *esr_get_class_string(unsigned long esr); - #endif /* __ASSEMBLY */ - - #endif /* __ASM_ESR_H */ -diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h -index 339477dca5513..0e6535aa78c2f 100644 ---- a/arch/arm64/include/asm/exception.h -+++ b/arch/arm64/include/asm/exception.h -@@ -19,9 +19,9 @@ - #define __exception_irq_entry __kprobes - #endif - --static inline u32 disr_to_esr(u64 disr) -+static inline unsigned long disr_to_esr(u64 disr) - { -- unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; -+ unsigned long esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; - - if ((disr & DISR_EL1_IDS) == 0) - esr |= (disr & DISR_EL1_ESR_MASK); -@@ -57,23 +57,23 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs, - void (*func)(struct pt_regs *)); - asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); - --void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); -+void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs); - void do_undefinstr(struct pt_regs *regs); - void do_bti(struct pt_regs *regs); --void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, -+void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, - struct pt_regs *regs); --void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); --void do_sve_acc(unsigned int esr, struct pt_regs *regs); --void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs); --void do_sysinstr(unsigned int esr, struct pt_regs *regs); --void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); --void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); --void do_cp15instr(unsigned int esr, struct pt_regs *regs); -+void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs); -+void do_sve_acc(unsigned long esr, struct pt_regs *regs); -+void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs); -+void do_sysinstr(unsigned long esr, struct pt_regs *regs); -+void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs); -+void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr); -+void do_cp15instr(unsigned long esr, struct pt_regs *regs); - void do_el0_svc(struct pt_regs *regs); - void do_el0_svc_compat(struct pt_regs *regs); --void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); --void do_serror(struct pt_regs *regs, unsigned int esr); -+void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr); -+void do_serror(struct pt_regs *regs, unsigned long esr); - void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); - --void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); -+void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); - #endif /* __ASM_EXCEPTION_H */ -diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h -index b15eb4a3e6b20..840a35ed92ec8 100644 ---- a/arch/arm64/include/asm/extable.h -+++ b/arch/arm64/include/asm/extable.h -@@ -22,15 +22,6 @@ struct exception_table_entry - - #define ARCH_HAS_RELATIVE_EXTABLE - --static inline bool in_bpf_jit(struct pt_regs *regs) --{ -- if (!IS_ENABLED(CONFIG_BPF_JIT)) -- return false; -- -- return regs->pc >= BPF_JIT_REGION_START && -- regs->pc < BPF_JIT_REGION_END; --} -- - #ifdef CONFIG_BPF_JIT - int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, - struct pt_regs *regs); -diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h -index 4335800201c97..daff882883f92 100644 ---- a/arch/arm64/include/asm/fixmap.h -+++ b/arch/arm64/include/asm/fixmap.h -@@ -62,9 +62,11 @@ enum fixed_addresses { - #endif /* CONFIG_ACPI_APEI_GHES */ - - #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -+ FIX_ENTRY_TRAMP_TEXT3, -+ FIX_ENTRY_TRAMP_TEXT2, -+ FIX_ENTRY_TRAMP_TEXT1, - FIX_ENTRY_TRAMP_DATA, -- FIX_ENTRY_TRAMP_TEXT, --#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) -+#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1)) - #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ - __end_of_permanent_fixed_addresses, - -diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h -index 8c129db8232a6..f68fbb2074730 100644 ---- a/arch/arm64/include/asm/hwcap.h -+++ b/arch/arm64/include/asm/hwcap.h -@@ -105,6 +105,9 @@ - #define KERNEL_HWCAP_RNG __khwcap2_feature(RNG) - #define KERNEL_HWCAP_BTI __khwcap2_feature(BTI) - #define KERNEL_HWCAP_MTE __khwcap2_feature(MTE) -+#define KERNEL_HWCAP_ECV __khwcap2_feature(ECV) -+#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP) -+#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES) - - /* - * This yields a mask that user programs can use to figure out what -diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h -index 6b776c8667b20..b02f0c328c8e4 100644 ---- a/arch/arm64/include/asm/insn.h -+++ b/arch/arm64/include/asm/insn.h -@@ -65,6 +65,7 @@ enum aarch64_insn_hint_cr_op { - AARCH64_INSN_HINT_PSB = 0x11 << 5, - AARCH64_INSN_HINT_TSB = 0x12 << 5, - AARCH64_INSN_HINT_CSDB = 0x14 << 5, -+ AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5, - - AARCH64_INSN_HINT_BTI = 0x20 << 5, - AARCH64_INSN_HINT_BTIC = 0x22 << 5, -diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h -index 7fd836bea7eb4..3995652daf81a 100644 ---- a/arch/arm64/include/asm/io.h -+++ b/arch/arm64/include/asm/io.h -@@ -192,4 +192,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); - extern int valid_phys_addr_range(phys_addr_t addr, size_t size); - extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); - -+extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, -+ unsigned long flags); -+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap -+ - #endif /* __ASM_IO_H */ -diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h -index 96dc0f7da258d..a971d462f531c 100644 ---- a/arch/arm64/include/asm/kernel-pgtable.h -+++ b/arch/arm64/include/asm/kernel-pgtable.h -@@ -103,8 +103,8 @@ - /* - * Initial memory map attributes. - */ --#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) --#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -+#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED | PTE_UXN) -+#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S | PMD_SECT_UXN) - - #if ARM64_KERNEL_USES_PMD_MAPS - #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) -diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h -index 327120c0089fe..f67a561e0935e 100644 ---- a/arch/arm64/include/asm/kvm_arm.h -+++ b/arch/arm64/include/asm/kvm_arm.h -@@ -91,7 +91,7 @@ - #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) - - /* TCR_EL2 Registers bits */ --#define TCR_EL2_RES1 ((1 << 31) | (1 << 23)) -+#define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) - #define TCR_EL2_TBI (1 << 20) - #define TCR_EL2_PS_SHIFT 16 - #define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT) -@@ -276,7 +276,7 @@ - #define CPTR_EL2_TFP_SHIFT 10 - - /* Hyp Coprocessor Trap Register */ --#define CPTR_EL2_TCPAC (1 << 31) -+#define CPTR_EL2_TCPAC (1U << 31) - #define CPTR_EL2_TAM (1 << 30) - #define CPTR_EL2_TTA (1 << 20) - #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) -diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h -index fd418955e31e6..64f8a90d33277 100644 ---- a/arch/arm64/include/asm/kvm_emulate.h -+++ b/arch/arm64/include/asm/kvm_emulate.h -@@ -366,8 +366,26 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) - - static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) - { -- if (kvm_vcpu_abt_iss1tw(vcpu)) -- return true; -+ if (kvm_vcpu_abt_iss1tw(vcpu)) { -+ /* -+ * Only a permission fault on a S1PTW should be -+ * considered as a write. Otherwise, page tables baked -+ * in a read-only memslot will result in an exception -+ * being delivered in the guest. -+ * -+ * The drawback is that we end-up faulting twice if the -+ * guest is using any of HW AF/DB: a translation fault -+ * to map the page containing the PT (read only at -+ * first), then a permission fault to allow the flags -+ * to be set. -+ */ -+ switch (kvm_vcpu_trap_get_fault_type(vcpu)) { -+ case ESR_ELx_FSC_PERM: -+ return true; -+ default: -+ return false; -+ } -+ } - - if (kvm_vcpu_trap_is_iabt(vcpu)) - return false; -diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h -index f8be56d5342ba..1713630bf8f5a 100644 ---- a/arch/arm64/include/asm/kvm_host.h -+++ b/arch/arm64/include/asm/kvm_host.h -@@ -711,6 +711,11 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) - ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); - } - -+static inline bool kvm_system_needs_idmapped_vectors(void) -+{ -+ return cpus_have_const_cap(ARM64_SPECTRE_V3A); -+} -+ - void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); - - static inline void kvm_arch_hardware_unsetup(void) {} -@@ -790,6 +795,10 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); - #define kvm_vcpu_has_pmu(vcpu) \ - (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) - -+#define kvm_supports_32bit_el0() \ -+ (system_supports_32bit_el0() && \ -+ !static_branch_unlikely(&arm64_mismatched_32bit_el0)) -+ - int kvm_trng_call(struct kvm_vcpu *vcpu); - #ifdef CONFIG_KVM - extern phys_addr_t hyp_mem_base; -diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h -index f1745a8434144..05886322c300c 100644 ---- a/arch/arm64/include/asm/memory.h -+++ b/arch/arm64/include/asm/memory.h -@@ -44,11 +44,8 @@ - #define _PAGE_OFFSET(va) (-(UL(1) << (va))) - #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) - #define KIMAGE_VADDR (MODULES_END) --#define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN)) --#define BPF_JIT_REGION_SIZE (SZ_128M) --#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) - #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) --#define MODULES_VADDR (BPF_JIT_REGION_END) -+#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN)) - #define MODULES_VSIZE (SZ_128M) - #define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) - #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) -diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h -index a11ccadd47d29..094701ec5500b 100644 ---- a/arch/arm64/include/asm/module.lds.h -+++ b/arch/arm64/include/asm/module.lds.h -@@ -1,8 +1,8 @@ - SECTIONS { - #ifdef CONFIG_ARM64_MODULE_PLTS -- .plt 0 (NOLOAD) : { BYTE(0) } -- .init.plt 0 (NOLOAD) : { BYTE(0) } -- .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } -+ .plt 0 : { BYTE(0) } -+ .init.plt 0 : { BYTE(0) } -+ .text.ftrace_trampoline 0 : { BYTE(0) } - #endif - - #ifdef CONFIG_KASAN_SW_TAGS -diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h -index 22420e1f8c037..592aabb25b0e7 100644 ---- a/arch/arm64/include/asm/mte-kasan.h -+++ b/arch/arm64/include/asm/mte-kasan.h -@@ -5,6 +5,7 @@ - #ifndef __ASM_MTE_KASAN_H - #define __ASM_MTE_KASAN_H - -+#include - #include - - #ifndef __ASSEMBLY__ -@@ -84,10 +85,12 @@ static inline void __dc_gzva(u64 p) - static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, - bool init) - { -- u64 curr, mask, dczid_bs, end1, end2, end3; -+ u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3; - - /* Read DC G(Z)VA block size from the system register. */ -- dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf); -+ dczid = read_cpuid(DCZID_EL0); -+ dczid_bs = 4ul << (dczid & 0xf); -+ dczid_dzp = (dczid >> 4) & 1; - - curr = (u64)__tag_set(addr, tag); - mask = dczid_bs - 1; -@@ -106,7 +109,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, - */ - #define SET_MEMTAG_RANGE(stg_post, dc_gva) \ - do { \ -- if (size >= 2 * dczid_bs) { \ -+ if (!dczid_dzp && size >= 2 * dczid_bs) {\ - do { \ - curr = stg_post(curr); \ - } while (curr < end1); \ -diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h -index 02511650cffe5..3e368ca66623b 100644 ---- a/arch/arm64/include/asm/mte.h -+++ b/arch/arm64/include/asm/mte.h -@@ -40,7 +40,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte); - void mte_copy_page_tags(void *kto, const void *kfrom); - void mte_thread_init_user(void); - void mte_thread_switch(struct task_struct *next); -+void mte_cpu_setup(void); - void mte_suspend_enter(void); -+void mte_suspend_exit(void); - long set_mte_ctrl(struct task_struct *task, unsigned long arg); - long get_mte_ctrl(struct task_struct *task); - int mte_ptrace_copy_tags(struct task_struct *child, long request, -@@ -69,6 +71,9 @@ static inline void mte_thread_switch(struct task_struct *next) - static inline void mte_suspend_enter(void) - { - } -+static inline void mte_suspend_exit(void) -+{ -+} - static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg) - { - return 0; -diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h -index f98c91bbd7c17..993a27ea6f543 100644 ---- a/arch/arm64/include/asm/page.h -+++ b/arch/arm64/include/asm/page.h -@@ -41,7 +41,6 @@ void tag_clear_highpage(struct page *to); - - typedef struct page *pgtable_t; - --int pfn_valid(unsigned long pfn); - int pfn_is_map_memory(unsigned long pfn); - - #include -diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h -index 8433a2058eb15..237224484d0f6 100644 ---- a/arch/arm64/include/asm/pgalloc.h -+++ b/arch/arm64/include/asm/pgalloc.h -@@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep, - static inline void - pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) - { -- VM_BUG_ON(mm != &init_mm); -+ VM_BUG_ON(mm && mm != &init_mm); - __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN); - } - -diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h -index 40085e53f573d..66671ff051835 100644 ---- a/arch/arm64/include/asm/pgtable-hwdef.h -+++ b/arch/arm64/include/asm/pgtable-hwdef.h -@@ -273,6 +273,8 @@ - #define TCR_NFD1 (UL(1) << 54) - #define TCR_E0PD0 (UL(1) << 55) - #define TCR_E0PD1 (UL(1) << 56) -+#define TCR_TCMA0 (UL(1) << 57) -+#define TCR_TCMA1 (UL(1) << 58) - - /* - * TTBR. -diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h -index 7032f04c8ac6e..b1e1b74d993c3 100644 ---- a/arch/arm64/include/asm/pgtable-prot.h -+++ b/arch/arm64/include/asm/pgtable-prot.h -@@ -92,7 +92,7 @@ extern bool arm64_use_ng_mappings; - #define __P001 PAGE_READONLY - #define __P010 PAGE_READONLY - #define __P011 PAGE_READONLY --#define __P100 PAGE_EXECONLY -+#define __P100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */ - #define __P101 PAGE_READONLY_EXEC - #define __P110 PAGE_READONLY_EXEC - #define __P111 PAGE_READONLY_EXEC -@@ -101,7 +101,7 @@ extern bool arm64_use_ng_mappings; - #define __S001 PAGE_READONLY - #define __S010 PAGE_SHARED - #define __S011 PAGE_SHARED --#define __S100 PAGE_EXECONLY -+#define __S100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */ - #define __S101 PAGE_READONLY_EXEC - #define __S110 PAGE_SHARED_EXEC - #define __S111 PAGE_SHARED_EXEC -diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h -index dfa76afa0ccff..ed57717cd0040 100644 ---- a/arch/arm64/include/asm/pgtable.h -+++ b/arch/arm64/include/asm/pgtable.h -@@ -67,9 +67,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; - * page table entry, taking care of 52-bit addresses. - */ - #ifdef CONFIG_ARM64_PA_BITS_52 --#define __pte_to_phys(pte) \ -- ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36)) --#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK) -+static inline phys_addr_t __pte_to_phys(pte_t pte) -+{ -+ return (pte_val(pte) & PTE_ADDR_LOW) | -+ ((pte_val(pte) & PTE_ADDR_HIGH) << 36); -+} -+static inline pteval_t __phys_to_pte_val(phys_addr_t phys) -+{ -+ return (phys | (phys >> 36)) & PTE_ADDR_MASK; -+} - #else - #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) - #define __phys_to_pte_val(phys) (phys) -@@ -529,7 +535,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - PMD_TYPE_TABLE) - #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ - PMD_TYPE_SECT) --#define pmd_leaf(pmd) pmd_sect(pmd) -+#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) - #define pmd_bad(pmd) (!pmd_table(pmd)) - - #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) -@@ -619,7 +625,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) - #define pud_none(pud) (!pud_val(pud)) - #define pud_bad(pud) (!pud_table(pud)) - #define pud_present(pud) pte_present(pud_pte(pud)) --#define pud_leaf(pud) pud_sect(pud) -+#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) - #define pud_valid(pud) pte_valid(pud_pte(pud)) - - static inline void set_pud(pud_t *pudp, pud_t pud) -@@ -1011,18 +1017,6 @@ static inline bool arch_wants_old_prefaulted_pte(void) - } - #define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte - --static inline pgprot_t arch_filter_pgprot(pgprot_t prot) --{ -- if (cpus_have_const_cap(ARM64_HAS_EPAN)) -- return prot; -- -- if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY)) -- return prot; -- -- return PAGE_READONLY_EXEC; --} -- -- - #endif /* !__ASSEMBLY__ */ - - #endif /* __ASM_PGTABLE_H */ -diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h -index ee2bdc1b9f5bb..7364530de0a77 100644 ---- a/arch/arm64/include/asm/processor.h -+++ b/arch/arm64/include/asm/processor.h -@@ -204,8 +204,9 @@ void tls_preserve_current_state(void); - - static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) - { -+ s32 previous_syscall = regs->syscallno; - memset(regs, 0, sizeof(*regs)); -- forget_syscall(regs); -+ regs->syscallno = previous_syscall; - regs->pc = pc; - - if (system_uses_irq_prio_masking()) -@@ -239,13 +240,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, - } - #endif - --static inline bool is_ttbr0_addr(unsigned long addr) -+static __always_inline bool is_ttbr0_addr(unsigned long addr) - { - /* entry assembly clears tags for TTBR0 addrs */ - return addr < TASK_SIZE; - } - --static inline bool is_ttbr1_addr(unsigned long addr) -+static __always_inline bool is_ttbr1_addr(unsigned long addr) - { - /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ - return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; -@@ -335,12 +336,10 @@ long get_tagged_addr_ctrl(struct task_struct *task); - * of header definitions for the use of task_stack_page. - */ - --#define current_top_of_stack() \ --({ \ -- struct stack_info _info; \ -- BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \ -- _info.high; \ --}) -+/* -+ * The top of the current task's task stack -+ */ -+#define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE) - #define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL)) - - #endif /* __ASSEMBLY__ */ -diff --git a/arch/arm64/include/asm/rwonce.h b/arch/arm64/include/asm/rwonce.h -index 1bce62fa908a3..56f7b1d4d54b9 100644 ---- a/arch/arm64/include/asm/rwonce.h -+++ b/arch/arm64/include/asm/rwonce.h -@@ -5,7 +5,7 @@ - #ifndef __ASM_RWONCE_H - #define __ASM_RWONCE_H - --#ifdef CONFIG_LTO -+#if defined(CONFIG_LTO) && !defined(__ASSEMBLY__) - - #include - #include -@@ -66,7 +66,7 @@ - }) - - #endif /* !BUILD_VDSO */ --#endif /* CONFIG_LTO */ -+#endif /* CONFIG_LTO && !__ASSEMBLY__ */ - - #include - -diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h -index 8297bccf07845..5cd4d09bc69d7 100644 ---- a/arch/arm64/include/asm/scs.h -+++ b/arch/arm64/include/asm/scs.h -@@ -9,15 +9,16 @@ - #ifdef CONFIG_SHADOW_CALL_STACK - scs_sp .req x18 - -- .macro scs_load tsk -- ldr scs_sp, [\tsk, #TSK_TI_SCS_SP] -+ .macro scs_load_current -+ get_current_task scs_sp -+ ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP] - .endm - - .macro scs_save tsk - str scs_sp, [\tsk, #TSK_TI_SCS_SP] - .endm - #else -- .macro scs_load tsk -+ .macro scs_load_current - .endm - - .macro scs_save tsk -diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h -index 7bea1d705dd64..6bb0258fb4aa1 100644 ---- a/arch/arm64/include/asm/sdei.h -+++ b/arch/arm64/include/asm/sdei.h -@@ -17,6 +17,9 @@ - - #include - -+DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event); -+DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event); -+ - extern unsigned long sdei_exit_mode; - - /* Software Delegated Exception entry point from firmware*/ -@@ -29,6 +32,9 @@ asmlinkage void __sdei_asm_entry_trampoline(unsigned long event_num, - unsigned long pc, - unsigned long pstate); - -+/* Abort a running handler. Context is discarded. */ -+void __sdei_handler_abort(void); -+ - /* - * The above entry point does the minimum to call C code. This function does - * anything else, before calling the driver. -diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h -index e4ad9db53af1d..552891e626e53 100644 ---- a/arch/arm64/include/asm/sections.h -+++ b/arch/arm64/include/asm/sections.h -@@ -22,4 +22,9 @@ extern char __irqentry_text_start[], __irqentry_text_end[]; - extern char __mmuoff_data_start[], __mmuoff_data_end[]; - extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; - -+static inline size_t entry_tramp_text_size(void) -+{ -+ return __entry_tramp_text_end - __entry_tramp_text_start; -+} -+ - #endif /* __ASM_SECTIONS_H */ -diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h -index f62ca39da6c5a..aa3d3607d5c8d 100644 ---- a/arch/arm64/include/asm/spectre.h -+++ b/arch/arm64/include/asm/spectre.h -@@ -67,7 +67,8 @@ struct bp_hardening_data { - - DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); - --static inline void arm64_apply_bp_hardening(void) -+/* Called during entry so must be __always_inline */ -+static __always_inline void arm64_apply_bp_hardening(void) - { - struct bp_hardening_data *d; - -@@ -93,5 +94,9 @@ void spectre_v4_enable_task_mitigation(struct task_struct *tsk); - - enum mitigation_state arm64_get_meltdown_state(void); - -+enum mitigation_state arm64_get_spectre_bhb_state(void); -+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); -+u8 spectre_bhb_loop_affected(int scope); -+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); - #endif /* __ASSEMBLY__ */ - #endif /* __ASM_SPECTRE_H */ -diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h -index b383b4802a7bd..d30217c21eff7 100644 ---- a/arch/arm64/include/asm/syscall_wrapper.h -+++ b/arch/arm64/include/asm/syscall_wrapper.h -@@ -8,7 +8,7 @@ - #ifndef __ASM_SYSCALL_WRAPPER_H - #define __ASM_SYSCALL_WRAPPER_H - --struct pt_regs; -+#include - - #define SC_ARM64_REGS_TO_ARGS(x, ...) \ - __MAP(x,__SC_ARGS \ -diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h -index b268082d67edd..543eb08fa8e5f 100644 ---- a/arch/arm64/include/asm/sysreg.h -+++ b/arch/arm64/include/asm/sysreg.h -@@ -109,8 +109,14 @@ - #define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) - - #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) -+#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4) -+#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6) - #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) -+#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4) -+#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6) - #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) -+#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4) -+#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6) - - /* - * System registers, organised loosely by encoding but grouped together -@@ -180,6 +186,7 @@ - - #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) - #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) -+#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2) - - #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) - #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) -@@ -764,6 +771,21 @@ - #define ID_AA64ISAR1_GPI_NI 0x0 - #define ID_AA64ISAR1_GPI_IMP_DEF 0x1 - -+/* id_aa64isar2 */ -+#define ID_AA64ISAR2_CLEARBHB_SHIFT 28 -+#define ID_AA64ISAR2_RPRES_SHIFT 4 -+#define ID_AA64ISAR2_WFXT_SHIFT 0 -+ -+#define ID_AA64ISAR2_RPRES_8BIT 0x0 -+#define ID_AA64ISAR2_RPRES_12BIT 0x1 -+/* -+ * Value 0x1 has been removed from the architecture, and is -+ * reserved, but has not yet been removed from the ARM ARM -+ * as of ARM DDI 0487G.b. -+ */ -+#define ID_AA64ISAR2_WFXT_NI 0x0 -+#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2 -+ - /* id_aa64pfr0 */ - #define ID_AA64PFR0_CSV3_SHIFT 60 - #define ID_AA64PFR0_CSV2_SHIFT 56 -@@ -881,6 +903,8 @@ - #endif - - /* id_aa64mmfr1 */ -+#define ID_AA64MMFR1_ECBHB_SHIFT 60 -+#define ID_AA64MMFR1_AFP_SHIFT 44 - #define ID_AA64MMFR1_ETS_SHIFT 36 - #define ID_AA64MMFR1_TWED_SHIFT 32 - #define ID_AA64MMFR1_XNX_SHIFT 28 -@@ -1076,10 +1100,6 @@ - #define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */ - #define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) - --/* TCR EL1 Bit Definitions */ --#define SYS_TCR_EL1_TCMA1 (BIT(58)) --#define SYS_TCR_EL1_TCMA0 (BIT(57)) -- - /* GCR_EL1 Definitions */ - #define SYS_GCR_EL1_RRND (BIT(16)) - #define SYS_GCR_EL1_EXCL_MASK 0xffffUL -diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h -index 305a7157c6a6a..0eb7709422e29 100644 ---- a/arch/arm64/include/asm/system_misc.h -+++ b/arch/arm64/include/asm/system_misc.h -@@ -23,9 +23,9 @@ void die(const char *msg, struct pt_regs *regs, int err); - struct siginfo; - void arm64_notify_die(const char *str, struct pt_regs *regs, - int signo, int sicode, unsigned long far, -- int err); -+ unsigned long err); - --void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, -+void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned long, - struct pt_regs *), - int sig, int code, const char *name); - -diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h -index 54f32a0675dff..6e5826470bea6 100644 ---- a/arch/arm64/include/asm/traps.h -+++ b/arch/arm64/include/asm/traps.h -@@ -24,7 +24,7 @@ struct undef_hook { - - void register_undef_hook(struct undef_hook *hook); - void unregister_undef_hook(struct undef_hook *hook); --void force_signal_inject(int signal, int code, unsigned long address, unsigned int err); -+void force_signal_inject(int signal, int code, unsigned long address, unsigned long err); - void arm64_notify_segfault(unsigned long addr); - void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str); - void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str); -@@ -57,7 +57,7 @@ static inline int in_entry_text(unsigned long ptr) - * errors share the same encoding as an all-zeros encoding from a CPU that - * doesn't support RAS. - */ --static inline bool arm64_is_ras_serror(u32 esr) -+static inline bool arm64_is_ras_serror(unsigned long esr) - { - WARN_ON(preemptible()); - -@@ -77,9 +77,9 @@ static inline bool arm64_is_ras_serror(u32 esr) - * We treat them as Uncontainable. - * Non-RAS SError's are reported as Uncontained/Uncategorized. - */ --static inline u32 arm64_ras_serror_get_severity(u32 esr) -+static inline unsigned long arm64_ras_serror_get_severity(unsigned long esr) - { -- u32 aet = esr & ESR_ELx_AET; -+ unsigned long aet = esr & ESR_ELx_AET; - - if (!arm64_is_ras_serror(esr)) { - /* Not a RAS error, we can't interpret the ESR. */ -@@ -98,6 +98,6 @@ static inline u32 arm64_ras_serror_get_severity(u32 esr) - return aet; - } - --bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr); --void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr); -+bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr); -+void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr); - #endif -diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h -index 190b494e22ab9..0fd6056ba412b 100644 ---- a/arch/arm64/include/asm/uaccess.h -+++ b/arch/arm64/include/asm/uaccess.h -@@ -292,12 +292,22 @@ do { \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ - } while (0) - -+/* -+ * We must not call into the scheduler between uaccess_ttbr0_enable() and -+ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, -+ * we must evaluate these outside of the critical section. -+ */ - #define __raw_get_user(x, ptr, err) \ - do { \ -+ __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \ -+ __typeof__(x) __rgu_val; \ - __chk_user_ptr(ptr); \ -+ \ - uaccess_ttbr0_enable(); \ -- __raw_get_mem("ldtr", x, ptr, err); \ -+ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \ - uaccess_ttbr0_disable(); \ -+ \ -+ (x) = __rgu_val; \ - } while (0) - - #define __get_user_error(x, ptr, err) \ -@@ -321,14 +331,22 @@ do { \ - - #define get_user __get_user - -+/* -+ * We must not call into the scheduler between __uaccess_enable_tco_async() and -+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking -+ * functions, we must evaluate these outside of the critical section. -+ */ - #define __get_kernel_nofault(dst, src, type, err_label) \ - do { \ -+ __typeof__(dst) __gkn_dst = (dst); \ -+ __typeof__(src) __gkn_src = (src); \ - int __gkn_err = 0; \ - \ - __uaccess_enable_tco_async(); \ -- __raw_get_mem("ldr", *((type *)(dst)), \ -- (__force type *)(src), __gkn_err); \ -+ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \ -+ (__force type *)(__gkn_src), __gkn_err); \ - __uaccess_disable_tco_async(); \ -+ \ - if (unlikely(__gkn_err)) \ - goto err_label; \ - } while (0) -@@ -367,11 +385,19 @@ do { \ - } \ - } while (0) - -+/* -+ * We must not call into the scheduler between uaccess_ttbr0_enable() and -+ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, -+ * we must evaluate these outside of the critical section. -+ */ - #define __raw_put_user(x, ptr, err) \ - do { \ -- __chk_user_ptr(ptr); \ -+ __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \ -+ __typeof__(*(ptr)) __rpu_val = (x); \ -+ __chk_user_ptr(__rpu_ptr); \ -+ \ - uaccess_ttbr0_enable(); \ -- __raw_put_mem("sttr", x, ptr, err); \ -+ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \ - uaccess_ttbr0_disable(); \ - } while (0) - -@@ -396,14 +422,22 @@ do { \ - - #define put_user __put_user - -+/* -+ * We must not call into the scheduler between __uaccess_enable_tco_async() and -+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking -+ * functions, we must evaluate these outside of the critical section. -+ */ - #define __put_kernel_nofault(dst, src, type, err_label) \ - do { \ -+ __typeof__(dst) __pkn_dst = (dst); \ -+ __typeof__(src) __pkn_src = (src); \ - int __pkn_err = 0; \ - \ - __uaccess_enable_tco_async(); \ -- __raw_put_mem("str", *((type *)(src)), \ -- (__force type *)(dst), __pkn_err); \ -+ __raw_put_mem("str", *((type *)(__pkn_src)), \ -+ (__force type *)(__pkn_dst), __pkn_err); \ - __uaccess_disable_tco_async(); \ -+ \ - if (unlikely(__pkn_err)) \ - goto err_label; \ - } while(0) -diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h -new file mode 100644 -index 0000000000000..bc9a2145f4194 ---- /dev/null -+++ b/arch/arm64/include/asm/vectors.h -@@ -0,0 +1,73 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* -+ * Copyright (C) 2022 ARM Ltd. -+ */ -+#ifndef __ASM_VECTORS_H -+#define __ASM_VECTORS_H -+ -+#include -+#include -+ -+#include -+ -+extern char vectors[]; -+extern char tramp_vectors[]; -+extern char __bp_harden_el1_vectors[]; -+ -+/* -+ * Note: the order of this enum corresponds to two arrays in entry.S: -+ * tramp_vecs and __bp_harden_el1_vectors. By default the canonical -+ * 'full fat' vectors are used directly. -+ */ -+enum arm64_bp_harden_el1_vectors { -+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -+ /* -+ * Perform the BHB loop mitigation, before branching to the canonical -+ * vectors. -+ */ -+ EL1_VECTOR_BHB_LOOP, -+ -+ /* -+ * Make the SMC call for firmware mitigation, before branching to the -+ * canonical vectors. -+ */ -+ EL1_VECTOR_BHB_FW, -+ -+ /* -+ * Use the ClearBHB instruction, before branching to the canonical -+ * vectors. -+ */ -+ EL1_VECTOR_BHB_CLEAR_INSN, -+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ -+ -+ /* -+ * Remap the kernel before branching to the canonical vectors. -+ */ -+ EL1_VECTOR_KPTI, -+}; -+ -+#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -+#define EL1_VECTOR_BHB_LOOP -1 -+#define EL1_VECTOR_BHB_FW -1 -+#define EL1_VECTOR_BHB_CLEAR_INSN -1 -+#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ -+ -+/* The vectors to use on return from EL0. e.g. to remap the kernel */ -+DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector); -+ -+#ifndef CONFIG_UNMAP_KERNEL_AT_EL0 -+#define TRAMP_VALIAS 0ul -+#endif -+ -+static inline const char * -+arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot) -+{ -+ if (arm64_kernel_unmapped_at_el0()) -+ return (char *)(TRAMP_VALIAS + SZ_2K * slot); -+ -+ WARN_ON_ONCE(slot == EL1_VECTOR_KPTI); -+ -+ return __bp_harden_el1_vectors + SZ_2K * slot; -+} -+ -+#endif /* __ASM_VECTORS_H */ -diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h -index b8f41aa234ee1..f03731847d9df 100644 ---- a/arch/arm64/include/uapi/asm/hwcap.h -+++ b/arch/arm64/include/uapi/asm/hwcap.h -@@ -75,5 +75,8 @@ - #define HWCAP2_RNG (1 << 16) - #define HWCAP2_BTI (1 << 17) - #define HWCAP2_MTE (1 << 18) -+#define HWCAP2_ECV (1 << 19) -+#define HWCAP2_AFP (1 << 20) -+#define HWCAP2_RPRES (1 << 21) - - #endif /* _UAPI__ASM_HWCAP_H */ -diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h -index b3edde68bc3e0..323e251ed37bc 100644 ---- a/arch/arm64/include/uapi/asm/kvm.h -+++ b/arch/arm64/include/uapi/asm/kvm.h -@@ -281,6 +281,11 @@ struct kvm_arm_copy_mte_tags { - #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 - #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) - -+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3) -+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0 -+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1 -+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2 -+ - /* SVE registers */ - #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) - -diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile -index 3f1490bfb938a..749e31475e413 100644 ---- a/arch/arm64/kernel/Makefile -+++ b/arch/arm64/kernel/Makefile -@@ -74,6 +74,10 @@ obj-$(CONFIG_ARM64_MTE) += mte.o - obj-y += vdso-wrap.o - obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o - -+# Force dependency (vdso*-wrap.S includes vdso.so through incbin) -+$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so -+$(obj)/vdso32-wrap.o: $(obj)/vdso32/vdso.so -+ - obj-y += probes/ - head-y := head.o - extra-y += $(head-y) vmlinux.lds -diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c -index 3fb79b76e9d96..7bbf5104b7b7b 100644 ---- a/arch/arm64/kernel/alternative.c -+++ b/arch/arm64/kernel/alternative.c -@@ -42,7 +42,7 @@ bool alternative_is_applied(u16 cpufeature) - /* - * Check if the target PC is within an alternative block. - */ --static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) -+static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) - { - unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); - return !(pc >= replptr && pc <= (replptr + alt->alt_len)); -@@ -50,7 +50,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) - - #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) - --static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) -+static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) - { - u32 insn; - -@@ -95,7 +95,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp - return insn; - } - --static void patch_alternative(struct alt_instr *alt, -+static noinstr void patch_alternative(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, int nr_inst) - { - __le32 *replptr; -diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c -index 0e86e8b9ceddf..c5da9d1e954a0 100644 ---- a/arch/arm64/kernel/armv8_deprecated.c -+++ b/arch/arm64/kernel/armv8_deprecated.c -@@ -59,6 +59,7 @@ struct insn_emulation { - static LIST_HEAD(insn_emulation); - static int nr_insn_emulated __initdata; - static DEFINE_RAW_SPINLOCK(insn_emulation_lock); -+static DEFINE_MUTEX(insn_emulation_mutex); - - static void register_emulation_hooks(struct insn_emulation_ops *ops) - { -@@ -207,10 +208,10 @@ static int emulation_proc_handler(struct ctl_table *table, int write, - loff_t *ppos) - { - int ret = 0; -- struct insn_emulation *insn = (struct insn_emulation *) table->data; -+ struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode); - enum insn_emulation_mode prev_mode = insn->current_mode; - -- table->data = &insn->current_mode; -+ mutex_lock(&insn_emulation_mutex); - ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - - if (ret || !write || prev_mode == insn->current_mode) -@@ -223,7 +224,7 @@ static int emulation_proc_handler(struct ctl_table *table, int write, - update_insn_emulation_mode(insn, INSN_UNDEF); - } - ret: -- table->data = insn; -+ mutex_unlock(&insn_emulation_mutex); - return ret; - } - -@@ -247,7 +248,7 @@ static void __init register_insn_emulation_sysctl(void) - sysctl->maxlen = sizeof(int); - - sysctl->procname = insn->ops->name; -- sysctl->data = insn; -+ sysctl->data = &insn->current_mode; - sysctl->extra1 = &insn->min; - sysctl->extra2 = &insn->max; - sysctl->proc_handler = emulation_proc_handler; -diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c -index 587543c6c51cb..97c42be71338a 100644 ---- a/arch/arm64/kernel/cacheinfo.c -+++ b/arch/arm64/kernel/cacheinfo.c -@@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, - - int init_cache_level(unsigned int cpu) - { -- unsigned int ctype, level, leaves, fw_level; -+ unsigned int ctype, level, leaves; -+ int fw_level; - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - - for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { -@@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu) - else - fw_level = acpi_find_last_cache_level(cpu); - -+ if (fw_level < 0) -+ return fw_level; -+ - if (level < fw_level) { - /* - * some external caches not specified in CLIDR_EL1 -diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c -index e2c20c036442f..bf69a20bc27f9 100644 ---- a/arch/arm64/kernel/cpu_errata.c -+++ b/arch/arm64/kernel/cpu_errata.c -@@ -209,6 +209,21 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { - { - ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), - }, -+ { -+ /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */ -+ ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe), -+ }, -+#endif -+#ifdef CONFIG_ARM64_ERRATUM_2441007 -+ { -+ ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), -+ }, -+#endif -+#ifdef CONFIG_ARM64_ERRATUM_2441009 -+ { -+ /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */ -+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1), -+ }, - #endif - {}, - }; -@@ -340,6 +355,50 @@ static const struct midr_range erratum_1463225[] = { - }; - #endif - -+#ifdef CONFIG_ARM64_ERRATUM_1742098 -+static struct midr_range broken_aarch32_aes[] = { -+ MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), -+ {}, -+}; -+#endif -+ -+#ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE -+static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { -+#ifdef CONFIG_ARM64_ERRATUM_2139208 -+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), -+#endif -+#ifdef CONFIG_ARM64_ERRATUM_2119858 -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), -+#endif -+ {}, -+}; -+#endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */ -+ -+#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE -+static const struct midr_range tsb_flush_fail_cpus[] = { -+#ifdef CONFIG_ARM64_ERRATUM_2067961 -+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), -+#endif -+#ifdef CONFIG_ARM64_ERRATUM_2054223 -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), -+#endif -+ {}, -+}; -+#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ -+ -+#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE -+static struct midr_range trbe_write_out_of_range_cpus[] = { -+#ifdef CONFIG_ARM64_ERRATUM_2253138 -+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), -+#endif -+#ifdef CONFIG_ARM64_ERRATUM_2224489 -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), -+#endif -+ {}, -+}; -+#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ -+ - const struct arm64_cpu_capabilities arm64_errata[] = { - #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE - { -@@ -425,7 +484,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { - #endif - #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI - { -- .desc = "Qualcomm erratum 1009, or ARM erratum 1286807", -+ .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009", - .capability = ARM64_WORKAROUND_REPEAT_TLBI, - .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, - .matches = cpucap_multi_entry_cap_matches, -@@ -464,6 +523,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { - .matches = has_spectre_v4, - .cpu_enable = spectre_v4_enable_mitigation, - }, -+ { -+ .desc = "Spectre-BHB", -+ .capability = ARM64_SPECTRE_BHB, -+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, -+ .matches = is_spectre_bhb_affected, -+ .cpu_enable = spectre_bhb_enable_mitigation, -+ }, - #ifdef CONFIG_ARM64_ERRATUM_1418040 - { - .desc = "ARM erratum 1418040", -@@ -534,6 +600,52 @@ const struct arm64_cpu_capabilities arm64_errata[] = { - ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), - }, - #endif -+ -+#ifdef CONFIG_ARM64_ERRATUM_2457168 -+ { -+ .desc = "ARM erratum 2457168", -+ .capability = ARM64_WORKAROUND_2457168, -+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, -+ /* Cortex-A510 r0p0-r1p1 */ -+ CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) -+ }, -+#endif -+#ifdef CONFIG_ARM64_ERRATUM_1742098 -+ { -+ .desc = "ARM erratum 1742098", -+ .capability = ARM64_WORKAROUND_1742098, -+ CAP_MIDR_RANGE_LIST(broken_aarch32_aes), -+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, -+ }, -+#endif -+#ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE -+ { -+ /* -+ * The erratum work around is handled within the TRBE -+ * driver and can be applied per-cpu. So, we can allow -+ * a late CPU to come online with this erratum. -+ */ -+ .desc = "ARM erratum 2119858 or 2139208", -+ .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE, -+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, -+ CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus), -+ }, -+#endif -+#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE -+ { -+ .desc = "ARM erratum 2067961 or 2054223", -+ .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, -+ ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), -+ }, -+#endif -+#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE -+ { -+ .desc = "ARM erratum 2253138 or 2224489", -+ .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, -+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, -+ CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), -+ }, -+#endif - { - } - }; -diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c -index 6ec7036ef7e18..d4ee345ff4296 100644 ---- a/arch/arm64/kernel/cpufeature.c -+++ b/arch/arm64/kernel/cpufeature.c -@@ -73,10 +73,13 @@ - #include - #include - #include -+#include -+ - #include - #include - #include - #include -+#include - #include - #include - #include -@@ -85,6 +88,7 @@ - #include - #include - #include -+#include - #include - - /* Kernel representation of AT_HWCAP and AT_HWCAP2 */ -@@ -110,6 +114,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); - bool arm64_use_ng_mappings = false; - EXPORT_SYMBOL(arm64_use_ng_mappings); - -+DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors; -+ - /* - * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs - * support it? -@@ -225,6 +231,12 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { - ARM64_FTR_END, - }; - -+static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { -+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0), -+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0), -+ ARM64_FTR_END, -+}; -+ - static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), -@@ -279,7 +291,7 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { - }; - - static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { -- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0), -+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0), - /* -@@ -325,6 +337,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { - }; - - static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { -+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0), -@@ -524,7 +537,7 @@ static const struct arm64_ftr_bits ftr_id_pfr2[] = { - - static const struct arm64_ftr_bits ftr_id_dfr0[] = { - /* [31:28] TraceFilt */ -- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_PERFMON_SHIFT, 4, 0xf), -+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0), -@@ -573,15 +586,19 @@ static const struct arm64_ftr_bits ftr_raz[] = { - ARM64_FTR_END, - }; - --#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \ -+#define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \ - .sys_id = id, \ - .reg = &(struct arm64_ftr_reg){ \ -- .name = #id, \ -+ .name = id_str, \ - .override = (ovr), \ - .ftr_bits = &((table)[0]), \ - }} - --#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override) -+#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \ -+ __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr) -+ -+#define ARM64_FTR_REG(id, table) \ -+ __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override) - - struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override; - struct arm64_ftr_override __ro_after_init id_aa64pfr1_override; -@@ -633,6 +650,7 @@ static const struct __ftr_reg_entry { - ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), - ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1, - &id_aa64isar1_override), -+ ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2), - - /* Op1 = 0, CRn = 0, CRm = 7 */ - ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), -@@ -929,6 +947,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) - init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); - init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); - init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); -+ init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); - init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); - init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); - init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); -@@ -1147,6 +1166,8 @@ void update_cpu_features(int cpu, - info->reg_id_aa64isar0, boot->reg_id_aa64isar0); - taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, - info->reg_id_aa64isar1, boot->reg_id_aa64isar1); -+ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, -+ info->reg_id_aa64isar2, boot->reg_id_aa64isar2); - - /* - * Differing PARange support is fine as long as all peripherals and -@@ -1268,6 +1289,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id) - read_sysreg_case(SYS_ID_AA64MMFR2_EL1); - read_sysreg_case(SYS_ID_AA64ISAR0_EL1); - read_sysreg_case(SYS_ID_AA64ISAR1_EL1); -+ read_sysreg_case(SYS_ID_AA64ISAR2_EL1); - - read_sysreg_case(SYS_CNTFRQ_EL0); - read_sysreg_case(SYS_CTR_EL0); -@@ -1575,6 +1597,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) - - int cpu = smp_processor_id(); - -+ if (__this_cpu_read(this_cpu_vector) == vectors) { -+ const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); -+ -+ __this_cpu_write(this_cpu_vector, v); -+ } -+ - /* - * We don't need to rewrite the page-tables if either we've done - * it already or we have KASLR enabled and therefore have not -@@ -1709,7 +1737,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) - pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", - smp_processor_id()); - cpumask_set_cpu(smp_processor_id(), &amu_cpus); -- update_freq_counters_refs(); -+ -+ /* 0 reference values signal broken/disabled counters */ -+ if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168)) -+ update_freq_counters_refs(); - } - } - -@@ -1872,7 +1903,8 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused) - static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) - { - sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0); -- isb(); -+ -+ mte_cpu_setup(); - - /* - * Clear the tags in the zero page. This needs to be done via the -@@ -1885,6 +1917,14 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) - } - #endif /* CONFIG_ARM64_MTE */ - -+static void elf_hwcap_fixup(void) -+{ -+#ifdef CONFIG_ARM64_ERRATUM_1742098 -+ if (cpus_have_const_cap(ARM64_WORKAROUND_1742098)) -+ compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES; -+#endif /* ARM64_ERRATUM_1742098 */ -+} -+ - #ifdef CONFIG_KVM - static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused) - { -@@ -2451,6 +2491,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { - #ifdef CONFIG_ARM64_MTE - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), - #endif /* CONFIG_ARM64_MTE */ -+ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), -+ HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), -+ HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), - {}, - }; - -@@ -2909,8 +2952,10 @@ void __init setup_cpu_features(void) - setup_system_capabilities(); - setup_elf_hwcaps(arm64_elf_hwcaps); - -- if (system_supports_32bit_el0()) -+ if (system_supports_32bit_el0()) { - setup_elf_hwcaps(compat_elf_hwcaps); -+ elf_hwcap_fixup(); -+ } - - if (system_uses_ttbr0_pan()) - pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); -@@ -2962,6 +3007,7 @@ static int enable_mismatched_32bit_el0(unsigned int cpu) - cpu_active_mask); - get_cpu_device(lucky_winner)->offline_disabled = true; - setup_elf_hwcaps(compat_elf_hwcaps); -+ elf_hwcap_fixup(); - pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n", - cpu, lucky_winner); - return 0; -diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c -index 03991eeff6430..3006f43248084 100644 ---- a/arch/arm64/kernel/cpuidle.c -+++ b/arch/arm64/kernel/cpuidle.c -@@ -54,6 +54,9 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) - struct acpi_lpi_state *lpi; - struct acpi_processor *pr = per_cpu(processors, cpu); - -+ if (unlikely(!pr || !pr->flags.has_lpi)) -+ return -EINVAL; -+ - /* - * If the PSCI cpu_suspend function hook has not been initialized - * idle states must not be enabled, so bail out -@@ -61,9 +64,6 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) - if (!psci_ops.cpu_suspend) - return -EOPNOTSUPP; - -- if (unlikely(!pr || !pr->flags.has_lpi)) -- return -EINVAL; -- - count = pr->power.count - 1; - if (count <= 0) - return -ENODEV; -diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c -index 87731fea5e418..591c18a889a56 100644 ---- a/arch/arm64/kernel/cpuinfo.c -+++ b/arch/arm64/kernel/cpuinfo.c -@@ -94,6 +94,9 @@ static const char *const hwcap_str[] = { - [KERNEL_HWCAP_RNG] = "rng", - [KERNEL_HWCAP_BTI] = "bti", - [KERNEL_HWCAP_MTE] = "mte", -+ [KERNEL_HWCAP_ECV] = "ecv", -+ [KERNEL_HWCAP_AFP] = "afp", -+ [KERNEL_HWCAP_RPRES] = "rpres", - }; - - #ifdef CONFIG_COMPAT -@@ -390,6 +393,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) - info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); - info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); - info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); -+ info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1); - info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); - info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); - info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); -diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c -index 4f3661eeb7ec6..732f0890416de 100644 ---- a/arch/arm64/kernel/debug-monitors.c -+++ b/arch/arm64/kernel/debug-monitors.c -@@ -202,7 +202,7 @@ void unregister_kernel_step_hook(struct step_hook *hook) - * So we call all the registered handlers, until the right handler is - * found which returns zero. - */ --static int call_step_hook(struct pt_regs *regs, unsigned int esr) -+static int call_step_hook(struct pt_regs *regs, unsigned long esr) - { - struct step_hook *hook; - struct list_head *list; -@@ -238,7 +238,7 @@ static void send_user_sigtrap(int si_code) - "User debug trap"); - } - --static int single_step_handler(unsigned long unused, unsigned int esr, -+static int single_step_handler(unsigned long unused, unsigned long esr, - struct pt_regs *regs) - { - bool handler_found = false; -@@ -299,11 +299,11 @@ void unregister_kernel_break_hook(struct break_hook *hook) - unregister_debug_hook(&hook->node); - } - --static int call_break_hook(struct pt_regs *regs, unsigned int esr) -+static int call_break_hook(struct pt_regs *regs, unsigned long esr) - { - struct break_hook *hook; - struct list_head *list; -- int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; -+ int (*fn)(struct pt_regs *regs, unsigned long esr) = NULL; - - list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; - -@@ -312,7 +312,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) - * entirely not preemptible, and we can use rcu list safely here. - */ - list_for_each_entry_rcu(hook, list, node) { -- unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; -+ unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; - - if ((comment & ~hook->mask) == hook->imm) - fn = hook->fn; -@@ -322,7 +322,7 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) - } - NOKPROBE_SYMBOL(call_break_hook); - --static int brk_handler(unsigned long unused, unsigned int esr, -+static int brk_handler(unsigned long unused, unsigned long esr, - struct pt_regs *regs) - { - if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) -@@ -438,6 +438,11 @@ int kernel_active_single_step(void) - } - NOKPROBE_SYMBOL(kernel_active_single_step); - -+void kernel_rewind_single_step(struct pt_regs *regs) -+{ -+ set_regs_spsr_ss(regs); -+} -+ - /* ptrace API */ - void user_enable_single_step(struct task_struct *task) - { -diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S -index 75691a2641c1c..2d3c4b02393e4 100644 ---- a/arch/arm64/kernel/efi-rt-wrapper.S -+++ b/arch/arm64/kernel/efi-rt-wrapper.S -@@ -4,6 +4,7 @@ - */ - - #include -+#include - - SYM_FUNC_START(__efi_rt_asm_wrapper) - stp x29, x30, [sp, #-32]! -@@ -16,6 +17,12 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) - */ - stp x1, x18, [sp, #16] - -+ ldr_l x16, efi_rt_stack_top -+ mov sp, x16 -+#ifdef CONFIG_SHADOW_CALL_STACK -+ str x18, [sp, #-16]! -+#endif -+ - /* - * We are lucky enough that no EFI runtime services take more than - * 5 arguments, so all are passed in registers rather than via the -@@ -29,6 +36,7 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) - mov x4, x6 - blr x8 - -+ mov sp, x29 - ldp x1, x2, [sp, #16] - cmp x2, x18 - ldp x29, x30, [sp], #32 -@@ -42,6 +50,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) - * called with preemption disabled and a separate shadow stack is used - * for interrupts. - */ -- mov x18, x2 -+#ifdef CONFIG_SHADOW_CALL_STACK -+ ldr_l x18, efi_rt_stack_top -+ ldr x18, [x18, #-16] -+#endif -+ - b efi_handle_corrupted_x18 // tail call - SYM_FUNC_END(__efi_rt_asm_wrapper) -diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c -index e1be6c429810d..9669f3fa2aefe 100644 ---- a/arch/arm64/kernel/efi.c -+++ b/arch/arm64/kernel/efi.c -@@ -12,6 +12,14 @@ - - #include - -+static bool region_is_misaligned(const efi_memory_desc_t *md) -+{ -+ if (PAGE_SIZE == EFI_PAGE_SIZE) -+ return false; -+ return !PAGE_ALIGNED(md->phys_addr) || -+ !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT); -+} -+ - /* - * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be - * executable, everything else can be mapped with the XN bits -@@ -25,14 +33,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md) - if (type == EFI_MEMORY_MAPPED_IO) - return PROT_DEVICE_nGnRE; - -- if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr), -- "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?")) -+ if (region_is_misaligned(md)) { -+ static bool __initdata code_is_misaligned; -+ - /* -- * If the region is not aligned to the page size of the OS, we -- * can not use strict permissions, since that would also affect -- * the mapping attributes of the adjacent regions. -+ * Regions that are not aligned to the OS page size cannot be -+ * mapped with strict permissions, as those might interfere -+ * with the permissions that are needed by the adjacent -+ * region's mapping. However, if we haven't encountered any -+ * misaligned runtime code regions so far, we can safely use -+ * non-executable permissions for non-code regions. - */ -- return pgprot_val(PAGE_KERNEL_EXEC); -+ code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE); -+ -+ return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC) -+ : pgprot_val(PAGE_KERNEL); -+ } - - /* R-- */ - if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == -@@ -63,19 +79,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) - bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE || - md->type == EFI_RUNTIME_SERVICES_DATA); - -- if (!PAGE_ALIGNED(md->phys_addr) || -- !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) { -- /* -- * If the end address of this region is not aligned to page -- * size, the mapping is rounded up, and may end up sharing a -- * page frame with the next UEFI memory region. If we create -- * a block entry now, we may need to split it again when mapping -- * the next region, and support for that is going to be removed -- * from the MMU routines. So avoid block mappings altogether in -- * that case. -- */ -+ /* -+ * If this region is not aligned to the page size used by the OS, the -+ * mapping will be rounded outwards, and may end up sharing a page -+ * frame with an adjacent runtime memory region. Given that the page -+ * table descriptor covering the shared page will be rewritten when the -+ * adjacent region gets mapped, we must avoid block mappings here so we -+ * don't have to worry about splitting them when that happens. -+ */ -+ if (region_is_misaligned(md)) - page_mappings_only = true; -- } - - create_pgd_mapping(mm, md->phys_addr, md->virt_addr, - md->num_pages << EFI_PAGE_SHIFT, -@@ -102,6 +115,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm, - BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && - md->type != EFI_RUNTIME_SERVICES_DATA); - -+ if (region_is_misaligned(md)) -+ return 0; -+ - /* - * Calling apply_to_page_range() is only safe on regions that are - * guaranteed to be mapped down to pages. Since we are only called -@@ -128,3 +144,30 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f) - pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f); - return s; - } -+ -+DEFINE_RAW_SPINLOCK(efi_rt_lock); -+ -+asmlinkage u64 *efi_rt_stack_top __ro_after_init; -+ -+/* EFI requires 8 KiB of stack space for runtime services */ -+static_assert(THREAD_SIZE >= SZ_8K); -+ -+static int __init arm64_efi_rt_init(void) -+{ -+ void *p; -+ -+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) -+ return 0; -+ -+ p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL, -+ NUMA_NO_NODE, &&l); -+l: if (!p) { -+ pr_warn("Failed to allocate EFI runtime stack\n"); -+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); -+ return -ENOMEM; -+ } -+ -+ efi_rt_stack_top = p + THREAD_SIZE; -+ return 0; -+} -+core_initcall(arm64_efi_rt_init); -diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c -index 32f9796c4ffe7..fc91dad1579ab 100644 ---- a/arch/arm64/kernel/entry-common.c -+++ b/arch/arm64/kernel/entry-common.c -@@ -72,7 +72,7 @@ static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs) - if (interrupts_enabled(regs)) { - if (regs->exit_rcu) { - trace_hardirqs_on_prepare(); -- lockdep_hardirqs_on_prepare(CALLER_ADDR0); -+ lockdep_hardirqs_on_prepare(); - rcu_irq_exit(); - lockdep_hardirqs_on(CALLER_ADDR0); - return; -@@ -117,7 +117,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs) - static __always_inline void __exit_to_user_mode(void) - { - trace_hardirqs_on_prepare(); -- lockdep_hardirqs_on_prepare(CALLER_ADDR0); -+ lockdep_hardirqs_on_prepare(); - user_enter_irqoff(); - lockdep_hardirqs_on(CALLER_ADDR0); - } -@@ -175,7 +175,7 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs) - ftrace_nmi_exit(); - if (restore) { - trace_hardirqs_on_prepare(); -- lockdep_hardirqs_on_prepare(CALLER_ADDR0); -+ lockdep_hardirqs_on_prepare(); - } - - rcu_nmi_exit(); -@@ -211,7 +211,7 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) - - if (restore) { - trace_hardirqs_on_prepare(); -- lockdep_hardirqs_on_prepare(CALLER_ADDR0); -+ lockdep_hardirqs_on_prepare(); - } - - rcu_nmi_exit(); -@@ -273,13 +273,13 @@ extern void (*handle_arch_irq)(struct pt_regs *); - extern void (*handle_arch_fiq)(struct pt_regs *); - - static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, -- unsigned int esr) -+ unsigned long esr) - { - arm64_enter_nmi(regs); - - console_verbose(); - -- pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n", -+ pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n", - vector, smp_processor_id(), esr, - esr_get_class_string(esr)); - -@@ -320,7 +320,8 @@ static void cortex_a76_erratum_1463225_svc_handler(void) - __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); - } - --static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) -+static __always_inline bool -+cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) - { - if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) - return false; -@@ -795,7 +796,7 @@ UNHANDLED(el0t, 32, error) - #ifdef CONFIG_VMAP_STACK - asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) - { -- unsigned int esr = read_sysreg(esr_el1); -+ unsigned long esr = read_sysreg(esr_el1); - unsigned long far = read_sysreg(far_el1); - - arm64_enter_nmi(regs); -diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S -index b3e4f9a088b1a..8cf970d219f5d 100644 ---- a/arch/arm64/kernel/entry-ftrace.S -+++ b/arch/arm64/kernel/entry-ftrace.S -@@ -77,11 +77,17 @@ - .endm - - SYM_CODE_START(ftrace_regs_caller) -+#ifdef BTI_C -+ BTI_C -+#endif - ftrace_regs_entry 1 - b ftrace_common - SYM_CODE_END(ftrace_regs_caller) - - SYM_CODE_START(ftrace_caller) -+#ifdef BTI_C -+ BTI_C -+#endif - ftrace_regs_entry 0 - b ftrace_common - SYM_CODE_END(ftrace_caller) -diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S -index bc6d5a970a131..bdc5f744249bb 100644 ---- a/arch/arm64/kernel/entry.S -+++ b/arch/arm64/kernel/entry.S -@@ -37,18 +37,21 @@ - - .macro kernel_ventry, el:req, ht:req, regsize:req, label:req - .align 7 --#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -+.Lventry_start\@: - .if \el == 0 --alternative_if ARM64_UNMAP_KERNEL_AT_EL0 -+ /* -+ * This must be the first instruction of the EL0 vector entries. It is -+ * skipped by the trampoline vectors, to trigger the cleanup. -+ */ -+ b .Lskip_tramp_vectors_cleanup\@ - .if \regsize == 64 - mrs x30, tpidrro_el0 - msr tpidrro_el0, xzr - .else - mov x30, xzr - .endif --alternative_else_nop_endif -+.Lskip_tramp_vectors_cleanup\@: - .endif --#endif - - sub sp, sp, #PT_REGS_SIZE - #ifdef CONFIG_VMAP_STACK -@@ -95,11 +98,15 @@ alternative_else_nop_endif - mrs x0, tpidrro_el0 - #endif - b el\el\ht\()_\regsize\()_\label -+.org .Lventry_start\@ + 128 // Did we overflow the ventry slot? - .endm - -- .macro tramp_alias, dst, sym -+ .macro tramp_alias, dst, sym, tmp - mov_q \dst, TRAMP_VALIAS -- add \dst, \dst, #(\sym - .entry.tramp.text) -+ adr_l \tmp, \sym -+ add \dst, \dst, \tmp -+ adr_l \tmp, .entry.tramp.text -+ sub \dst, \dst, \tmp - .endm - - /* -@@ -116,7 +123,7 @@ alternative_cb_end - tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ - mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 - mov w1, #\state --alternative_cb spectre_v4_patch_fw_mitigation_conduit -+alternative_cb smccc_patch_fw_mitigation_conduit - nop // Patched to SMC/HVC #0 - alternative_cb_end - .L__asm_ssbd_skip\@: -@@ -265,7 +272,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH - alternative_else_nop_endif - 1: - -- scs_load tsk -+ scs_load_current - .else - add x21, sp, #PT_REGS_SIZE - get_current_task tsk -@@ -413,21 +420,26 @@ alternative_else_nop_endif - ldp x24, x25, [sp, #16 * 12] - ldp x26, x27, [sp, #16 * 13] - ldp x28, x29, [sp, #16 * 14] -- ldr lr, [sp, #S_LR] -- add sp, sp, #PT_REGS_SIZE // restore sp - - .if \el == 0 --alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 -+alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 -+ ldr lr, [sp, #S_LR] -+ add sp, sp, #PT_REGS_SIZE // restore sp -+ eret -+alternative_else_nop_endif - #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - bne 4f -- msr far_el1, x30 -- tramp_alias x30, tramp_exit_native -+ msr far_el1, x29 -+ tramp_alias x30, tramp_exit_native, x29 - br x30 - 4: -- tramp_alias x30, tramp_exit_compat -+ tramp_alias x30, tramp_exit_compat, x29 - br x30 - #endif - .else -+ ldr lr, [sp, #S_LR] -+ add sp, sp, #PT_REGS_SIZE // restore sp -+ - /* Ensure any device/NC reads complete */ - alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 - -@@ -594,12 +606,6 @@ SYM_CODE_END(ret_to_user) - - .popsection // .entry.text - --#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 --/* -- * Exception vectors trampoline. -- */ -- .pushsection ".entry.tramp.text", "ax" -- - // Move from tramp_pg_dir to swapper_pg_dir - .macro tramp_map_kernel, tmp - mrs \tmp, ttbr1_el1 -@@ -633,12 +639,47 @@ alternative_else_nop_endif - */ - .endm - -- .macro tramp_ventry, regsize = 64 -+ .macro tramp_data_page dst -+ adr_l \dst, .entry.tramp.text -+ sub \dst, \dst, PAGE_SIZE -+ .endm -+ -+ .macro tramp_data_read_var dst, var -+#ifdef CONFIG_RANDOMIZE_BASE -+ tramp_data_page \dst -+ add \dst, \dst, #:lo12:__entry_tramp_data_\var -+ ldr \dst, [\dst] -+#else -+ ldr \dst, =\var -+#endif -+ .endm -+ -+#define BHB_MITIGATION_NONE 0 -+#define BHB_MITIGATION_LOOP 1 -+#define BHB_MITIGATION_FW 2 -+#define BHB_MITIGATION_INSN 3 -+ -+ .macro tramp_ventry, vector_start, regsize, kpti, bhb - .align 7 - 1: - .if \regsize == 64 - msr tpidrro_el0, x30 // Restored in kernel_ventry - .endif -+ -+ .if \bhb == BHB_MITIGATION_LOOP -+ /* -+ * This sequence must appear before the first indirect branch. i.e. the -+ * ret out of tramp_ventry. It appears here because x30 is free. -+ */ -+ __mitigate_spectre_bhb_loop x30 -+ .endif // \bhb == BHB_MITIGATION_LOOP -+ -+ .if \bhb == BHB_MITIGATION_INSN -+ clearbhb -+ isb -+ .endif // \bhb == BHB_MITIGATION_INSN -+ -+ .if \kpti == 1 - /* - * Defend against branch aliasing attacks by pushing a dummy - * entry onto the return stack and using a RET instruction to -@@ -648,46 +689,75 @@ alternative_else_nop_endif - b . - 2: - tramp_map_kernel x30 --#ifdef CONFIG_RANDOMIZE_BASE -- adr x30, tramp_vectors + PAGE_SIZE - alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 -- ldr x30, [x30] --#else -- ldr x30, =vectors --#endif -+ tramp_data_read_var x30, vectors - alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM -- prfm plil1strm, [x30, #(1b - tramp_vectors)] -+ prfm plil1strm, [x30, #(1b - \vector_start)] - alternative_else_nop_endif -+ - msr vbar_el1, x30 -- add x30, x30, #(1b - tramp_vectors) - isb -+ .else -+ ldr x30, =vectors -+ .endif // \kpti == 1 -+ -+ .if \bhb == BHB_MITIGATION_FW -+ /* -+ * The firmware sequence must appear before the first indirect branch. -+ * i.e. the ret out of tramp_ventry. But it also needs the stack to be -+ * mapped to save/restore the registers the SMC clobbers. -+ */ -+ __mitigate_spectre_bhb_fw -+ .endif // \bhb == BHB_MITIGATION_FW -+ -+ add x30, x30, #(1b - \vector_start + 4) - ret -+.org 1b + 128 // Did we overflow the ventry slot? - .endm - - .macro tramp_exit, regsize = 64 -- adr x30, tramp_vectors -+ tramp_data_read_var x30, this_cpu_vector -+ get_this_cpu_offset x29 -+ ldr x30, [x30, x29] -+ - msr vbar_el1, x30 -- tramp_unmap_kernel x30 -+ ldr lr, [sp, #S_LR] -+ tramp_unmap_kernel x29 - .if \regsize == 64 -- mrs x30, far_el1 -+ mrs x29, far_el1 - .endif -+ add sp, sp, #PT_REGS_SIZE // restore sp - eret - sb - .endm - -- .align 11 --SYM_CODE_START_NOALIGN(tramp_vectors) -+ .macro generate_tramp_vector, kpti, bhb -+.Lvector_start\@: - .space 0x400 - -- tramp_ventry -- tramp_ventry -- tramp_ventry -- tramp_ventry -+ .rept 4 -+ tramp_ventry .Lvector_start\@, 64, \kpti, \bhb -+ .endr -+ .rept 4 -+ tramp_ventry .Lvector_start\@, 32, \kpti, \bhb -+ .endr -+ .endm - -- tramp_ventry 32 -- tramp_ventry 32 -- tramp_ventry 32 -- tramp_ventry 32 -+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -+/* -+ * Exception vectors trampoline. -+ * The order must match __bp_harden_el1_vectors and the -+ * arm64_bp_harden_el1_vectors enum. -+ */ -+ .pushsection ".entry.tramp.text", "ax" -+ .align 11 -+SYM_CODE_START_NOALIGN(tramp_vectors) -+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP -+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW -+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN -+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ -+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE - SYM_CODE_END(tramp_vectors) - - SYM_CODE_START(tramp_exit_native) -@@ -704,12 +774,56 @@ SYM_CODE_END(tramp_exit_compat) - .pushsection ".rodata", "a" - .align PAGE_SHIFT - SYM_DATA_START(__entry_tramp_data_start) -+__entry_tramp_data_vectors: - .quad vectors -+#ifdef CONFIG_ARM_SDE_INTERFACE -+__entry_tramp_data___sdei_asm_handler: -+ .quad __sdei_asm_handler -+#endif /* CONFIG_ARM_SDE_INTERFACE */ -+__entry_tramp_data_this_cpu_vector: -+ .quad this_cpu_vector - SYM_DATA_END(__entry_tramp_data_start) - .popsection // .rodata - #endif /* CONFIG_RANDOMIZE_BASE */ - #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ - -+/* -+ * Exception vectors for spectre mitigations on entry from EL1 when -+ * kpti is not in use. -+ */ -+ .macro generate_el1_vector, bhb -+.Lvector_start\@: -+ kernel_ventry 1, t, 64, sync // Synchronous EL1t -+ kernel_ventry 1, t, 64, irq // IRQ EL1t -+ kernel_ventry 1, t, 64, fiq // FIQ EL1h -+ kernel_ventry 1, t, 64, error // Error EL1t -+ -+ kernel_ventry 1, h, 64, sync // Synchronous EL1h -+ kernel_ventry 1, h, 64, irq // IRQ EL1h -+ kernel_ventry 1, h, 64, fiq // FIQ EL1h -+ kernel_ventry 1, h, 64, error // Error EL1h -+ -+ .rept 4 -+ tramp_ventry .Lvector_start\@, 64, 0, \bhb -+ .endr -+ .rept 4 -+ tramp_ventry .Lvector_start\@, 32, 0, \bhb -+ .endr -+ .endm -+ -+/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ -+ .pushsection ".entry.text", "ax" -+ .align 11 -+SYM_CODE_START(__bp_harden_el1_vectors) -+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -+ generate_el1_vector bhb=BHB_MITIGATION_LOOP -+ generate_el1_vector bhb=BHB_MITIGATION_FW -+ generate_el1_vector bhb=BHB_MITIGATION_INSN -+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ -+SYM_CODE_END(__bp_harden_el1_vectors) -+ .popsection -+ -+ - /* - * Register switch for AArch64. The callee-saved registers need to be saved - * and restored. On entry: -@@ -741,7 +855,7 @@ SYM_FUNC_START(cpu_switch_to) - msr sp_el0, x1 - ptrauth_keys_install_kernel x1, x8, x9, x10 - scs_save x0 -- scs_load x1 -+ scs_load_current - ret - SYM_FUNC_END(cpu_switch_to) - NOKPROBE(cpu_switch_to) -@@ -769,19 +883,19 @@ NOKPROBE(ret_from_fork) - */ - SYM_FUNC_START(call_on_irq_stack) - #ifdef CONFIG_SHADOW_CALL_STACK -- stp scs_sp, xzr, [sp, #-16]! -+ get_current_task x16 -+ scs_save x16 - ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 - #endif -+ - /* Create a frame record to save our LR and SP (implicit in FP) */ - stp x29, x30, [sp, #-16]! - mov x29, sp - - ldr_this_cpu x16, irq_stack_ptr, x17 -- mov x15, #IRQ_STACK_SIZE -- add x16, x16, x15 - - /* Move to the new stack and call the function there */ -- mov sp, x16 -+ add sp, x16, #IRQ_STACK_SIZE - blr x1 - - /* -@@ -790,9 +904,7 @@ SYM_FUNC_START(call_on_irq_stack) - */ - mov sp, x29 - ldp x29, x30, [sp], #16 --#ifdef CONFIG_SHADOW_CALL_STACK -- ldp scs_sp, xzr, [sp], #16 --#endif -+ scs_load_current - ret - SYM_FUNC_END(call_on_irq_stack) - NOKPROBE(call_on_irq_stack) -@@ -835,14 +947,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoline) - * Remember whether to unmap the kernel on exit. - */ - 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] -- --#ifdef CONFIG_RANDOMIZE_BASE -- adr x4, tramp_vectors + PAGE_SIZE -- add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler -- ldr x4, [x4] --#else -- ldr x4, =__sdei_asm_handler --#endif -+ tramp_data_read_var x4, __sdei_asm_handler - br x4 - SYM_CODE_END(__sdei_asm_entry_trampoline) - NOKPROBE(__sdei_asm_entry_trampoline) -@@ -865,13 +970,6 @@ SYM_CODE_END(__sdei_asm_exit_trampoline) - NOKPROBE(__sdei_asm_exit_trampoline) - .ltorg - .popsection // .entry.tramp.text --#ifdef CONFIG_RANDOMIZE_BASE --.pushsection ".rodata", "a" --SYM_DATA_START(__sdei_asm_trampoline_next_handler) -- .quad __sdei_asm_handler --SYM_DATA_END(__sdei_asm_trampoline_next_handler) --.popsection // .rodata --#endif /* CONFIG_RANDOMIZE_BASE */ - #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ - - /* -@@ -907,9 +1005,13 @@ SYM_CODE_START(__sdei_asm_handler) - - mov x19, x1 - --#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK) -+ /* Store the registered-event for crash_smp_send_stop() */ - ldrb w4, [x19, #SDEI_EVENT_PRIORITY] --#endif -+ cbnz w4, 1f -+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 -+ b 2f -+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 -+2: str x19, [x5] - - #ifdef CONFIG_VMAP_STACK - /* -@@ -974,14 +1076,33 @@ SYM_CODE_START(__sdei_asm_handler) - - ldr_l x2, sdei_exit_mode - -+ /* Clear the registered-event seen by crash_smp_send_stop() */ -+ ldrb w3, [x4, #SDEI_EVENT_PRIORITY] -+ cbnz w3, 1f -+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 -+ b 2f -+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 -+2: str xzr, [x5] -+ - alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 - sdei_handler_exit exit_mode=x2 - alternative_else_nop_endif - - #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -- tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline -+ tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3 - br x5 - #endif - SYM_CODE_END(__sdei_asm_handler) - NOKPROBE(__sdei_asm_handler) -+ -+SYM_CODE_START(__sdei_handler_abort) -+ mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME -+ adr x1, 1f -+ ldr_l x2, sdei_exit_mode -+ sdei_handler_exit exit_mode=x2 -+ // exit the handler and jump to the next instruction. -+ // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx. -+1: ret -+SYM_CODE_END(__sdei_handler_abort) -+NOKPROBE(__sdei_handler_abort) - #endif /* CONFIG_ARM_SDE_INTERFACE */ -diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c -index ff4962750b3d0..7a3fcf21b18a7 100644 ---- a/arch/arm64/kernel/fpsimd.c -+++ b/arch/arm64/kernel/fpsimd.c -@@ -930,7 +930,7 @@ void fpsimd_release_task(struct task_struct *dead_task) - * would have disabled the SVE access trap for userspace during - * ret_to_user, making an SVE access trap impossible in that case. - */ --void do_sve_acc(unsigned int esr, struct pt_regs *regs) -+void do_sve_acc(unsigned long esr, struct pt_regs *regs) - { - /* Even if we chose not to use SVE, the hardware could still trap: */ - if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { -@@ -972,7 +972,7 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs) - /* - * Trapped FP/ASIMD access. - */ --void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) -+void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs) - { - /* TODO: implement lazy context saving/restoring */ - WARN_ON(1); -@@ -981,7 +981,7 @@ void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) - /* - * Raise a SIGFPE for the current process. - */ --void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) -+void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs) - { - unsigned int si_code = FPE_FLTUNK; - -diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c -index 7f467bd9db7a3..dba774f3b8d7c 100644 ---- a/arch/arm64/kernel/ftrace.c -+++ b/arch/arm64/kernel/ftrace.c -@@ -78,47 +78,76 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) - } - - /* -- * Turn on the call to ftrace_caller() in instrumented function -+ * Find the address the callsite must branch to in order to reach '*addr'. -+ * -+ * Due to the limited range of 'BL' instructions, modules may be placed too far -+ * away to branch directly and must use a PLT. -+ * -+ * Returns true when '*addr' contains a reachable target address, or has been -+ * modified to contain a PLT address. Returns false otherwise. - */ --int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) -+static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, -+ struct module *mod, -+ unsigned long *addr) - { - unsigned long pc = rec->ip; -- u32 old, new; -- long offset = (long)pc - (long)addr; -+ long offset = (long)*addr - (long)pc; -+ struct plt_entry *plt; - -- if (offset < -SZ_128M || offset >= SZ_128M) { -- struct module *mod; -- struct plt_entry *plt; -+ /* -+ * When the target is within range of the 'BL' instruction, use 'addr' -+ * as-is and branch to that directly. -+ */ -+ if (offset >= -SZ_128M && offset < SZ_128M) -+ return true; - -- if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) -- return -EINVAL; -+ /* -+ * When the target is outside of the range of a 'BL' instruction, we -+ * must use a PLT to reach it. We can only place PLTs for modules, and -+ * only when module PLT support is built-in. -+ */ -+ if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) -+ return false; - -- /* -- * On kernels that support module PLTs, the offset between the -- * branch instruction and its target may legally exceed the -- * range of an ordinary relative 'bl' opcode. In this case, we -- * need to branch via a trampoline in the module. -- * -- * NOTE: __module_text_address() must be called with preemption -- * disabled, but we can rely on ftrace_lock to ensure that 'mod' -- * retains its validity throughout the remainder of this code. -- */ -+ /* -+ * 'mod' is only set at module load time, but if we end up -+ * dealing with an out-of-range condition, we can assume it -+ * is due to a module being loaded far away from the kernel. -+ * -+ * NOTE: __module_text_address() must be called with preemption -+ * disabled, but we can rely on ftrace_lock to ensure that 'mod' -+ * retains its validity throughout the remainder of this code. -+ */ -+ if (!mod) { - preempt_disable(); - mod = __module_text_address(pc); - preempt_enable(); -+ } - -- if (WARN_ON(!mod)) -- return -EINVAL; -- -- plt = get_ftrace_plt(mod, addr); -- if (!plt) { -- pr_err("ftrace: no module PLT for %ps\n", (void *)addr); -- return -EINVAL; -- } -+ if (WARN_ON(!mod)) -+ return false; - -- addr = (unsigned long)plt; -+ plt = get_ftrace_plt(mod, *addr); -+ if (!plt) { -+ pr_err("ftrace: no module PLT for %ps\n", (void *)*addr); -+ return false; - } - -+ *addr = (unsigned long)plt; -+ return true; -+} -+ -+/* -+ * Turn on the call to ftrace_caller() in instrumented function -+ */ -+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) -+{ -+ unsigned long pc = rec->ip; -+ u32 old, new; -+ -+ if (!ftrace_find_callable_addr(rec, NULL, &addr)) -+ return -EINVAL; -+ - old = aarch64_insn_gen_nop(); - new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); - -@@ -132,6 +161,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, - unsigned long pc = rec->ip; - u32 old, new; - -+ if (!ftrace_find_callable_addr(rec, NULL, &old_addr)) -+ return -EINVAL; -+ if (!ftrace_find_callable_addr(rec, NULL, &addr)) -+ return -EINVAL; -+ - old = aarch64_insn_gen_branch_imm(pc, old_addr, - AARCH64_INSN_BRANCH_LINK); - new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); -@@ -181,54 +215,30 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, - unsigned long addr) - { - unsigned long pc = rec->ip; -- bool validate = true; - u32 old = 0, new; -- long offset = (long)pc - (long)addr; -- -- if (offset < -SZ_128M || offset >= SZ_128M) { -- u32 replaced; -- -- if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) -- return -EINVAL; - -- /* -- * 'mod' is only set at module load time, but if we end up -- * dealing with an out-of-range condition, we can assume it -- * is due to a module being loaded far away from the kernel. -- */ -- if (!mod) { -- preempt_disable(); -- mod = __module_text_address(pc); -- preempt_enable(); -- -- if (WARN_ON(!mod)) -- return -EINVAL; -- } -- -- /* -- * The instruction we are about to patch may be a branch and -- * link instruction that was redirected via a PLT entry. In -- * this case, the normal validation will fail, but we can at -- * least check that we are dealing with a branch and link -- * instruction that points into the right module. -- */ -- if (aarch64_insn_read((void *)pc, &replaced)) -- return -EFAULT; -- -- if (!aarch64_insn_is_bl(replaced) || -- !within_module(pc + aarch64_get_branch_offset(replaced), -- mod)) -- return -EINVAL; -+ new = aarch64_insn_gen_nop(); - -- validate = false; -- } else { -- old = aarch64_insn_gen_branch_imm(pc, addr, -- AARCH64_INSN_BRANCH_LINK); -+ /* -+ * When using mcount, callsites in modules may have been initalized to -+ * call an arbitrary module PLT (which redirects to the _mcount stub) -+ * rather than the ftrace PLT we'll use at runtime (which redirects to -+ * the ftrace trampoline). We can ignore the old PLT when initializing -+ * the callsite. -+ * -+ * Note: 'mod' is only set at module load time. -+ */ -+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && -+ IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) { -+ return aarch64_insn_patch_text_nosync((void *)pc, new); - } - -- new = aarch64_insn_gen_nop(); -+ if (!ftrace_find_callable_addr(rec, mod, &addr)) -+ return -EINVAL; - -- return ftrace_modify_code(pc, old, new, validate); -+ old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); -+ -+ return ftrace_modify_code(pc, old, new, true); - } - - void arch_ftrace_update_code(int command) -diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S -index 17962452e31de..512a921edad59 100644 ---- a/arch/arm64/kernel/head.S -+++ b/arch/arm64/kernel/head.S -@@ -285,7 +285,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables) - subs x1, x1, #64 - b.ne 1b - -- mov x7, SWAPPER_MM_MMUFLAGS -+ mov_q x7, SWAPPER_MM_MMUFLAGS - - /* - * Create the identity mapping. -@@ -409,7 +409,7 @@ SYM_FUNC_END(__create_page_tables) - stp xzr, xzr, [sp, #S_STACKFRAME] - add x29, sp, #S_STACKFRAME - -- scs_load \tsk -+ scs_load_current - - adr_l \tmp1, __per_cpu_offset - ldr w\tmp2, [\tsk, #TSK_CPU] -diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c -index 712e97c03e54c..2a7f21314cde6 100644 ---- a/arch/arm64/kernel/hw_breakpoint.c -+++ b/arch/arm64/kernel/hw_breakpoint.c -@@ -617,7 +617,7 @@ NOKPROBE_SYMBOL(toggle_bp_registers); - /* - * Debug exception handlers. - */ --static int breakpoint_handler(unsigned long unused, unsigned int esr, -+static int breakpoint_handler(unsigned long unused, unsigned long esr, - struct pt_regs *regs) - { - int i, step = 0, *kernel_step; -@@ -751,7 +751,7 @@ static int watchpoint_report(struct perf_event *wp, unsigned long addr, - return step; - } - --static int watchpoint_handler(unsigned long addr, unsigned int esr, -+static int watchpoint_handler(unsigned long addr, unsigned long esr, - struct pt_regs *regs) - { - int i, step = 0, *kernel_step, access, closest_match = 0; -diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h -index c96a9a0043bf4..e03e60f9482b4 100644 ---- a/arch/arm64/kernel/image-vars.h -+++ b/arch/arm64/kernel/image-vars.h -@@ -66,6 +66,10 @@ KVM_NVHE_ALIAS(kvm_patch_vector_branch); - KVM_NVHE_ALIAS(kvm_update_va_mask); - KVM_NVHE_ALIAS(kvm_get_kimage_voffset); - KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0); -+KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter); -+KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable); -+KVM_NVHE_ALIAS(spectre_bhb_patch_wa3); -+KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb); - - /* Global kernel state accessed by nVHE hyp code. */ - KVM_NVHE_ALIAS(kvm_vgic_global_state); -diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c -index 2aede780fb80c..4e1f983df3d1c 100644 ---- a/arch/arm64/kernel/kgdb.c -+++ b/arch/arm64/kernel/kgdb.c -@@ -224,6 +224,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, - */ - if (!kernel_active_single_step()) - kernel_enable_single_step(linux_regs); -+ else -+ kernel_rewind_single_step(linux_regs); - err = 0; - break; - default: -@@ -232,14 +234,14 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, - return err; - } - --static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) -+static int kgdb_brk_fn(struct pt_regs *regs, unsigned long esr) - { - kgdb_handle_exception(1, SIGTRAP, 0, regs); - return DBG_HOOK_HANDLED; - } - NOKPROBE_SYMBOL(kgdb_brk_fn) - --static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) -+static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned long esr) - { - compiled_break = 1; - kgdb_handle_exception(1, SIGTRAP, 0, regs); -@@ -248,7 +250,7 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) - } - NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); - --static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) -+static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr) - { - if (!kgdb_single_step) - return DBG_HOOK_ERROR; -diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c -index 63634b4d72c15..59c648d518488 100644 ---- a/arch/arm64/kernel/machine_kexec_file.c -+++ b/arch/arm64/kernel/machine_kexec_file.c -@@ -149,6 +149,7 @@ int load_other_segments(struct kimage *image, - initrd_len, cmdline, 0); - if (!dtb) { - pr_err("Preparing for new dtb failed\n"); -+ ret = -EINVAL; - goto out_err; - } - -diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c -index e53493d8b208b..08fcbcb40d882 100644 ---- a/arch/arm64/kernel/module-plts.c -+++ b/arch/arm64/kernel/module-plts.c -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - #include - - static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc, -@@ -342,7 +343,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, - if (nents) - sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL); - -- if (!str_has_prefix(secstrings + dstsec->sh_name, ".init")) -+ if (!module_init_layout_section(secstrings + dstsec->sh_name)) - core_plts += count_plts(syms, rels, numrels, - sechdrs[i].sh_info, dstsec); - else -diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c -index b5ec010c481f3..309a27553c875 100644 ---- a/arch/arm64/kernel/module.c -+++ b/arch/arm64/kernel/module.c -@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size) - module_alloc_end = MODULES_END; - - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, -- module_alloc_end, gfp_mask, PAGE_KERNEL, 0, -+ module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK, - NUMA_NO_NODE, __builtin_return_address(0)); - - if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && -@@ -58,7 +58,7 @@ void *module_alloc(unsigned long size) - PAGE_KERNEL, 0, NUMA_NO_NODE, - __builtin_return_address(0)); - -- if (p && (kasan_module_alloc(p, size) < 0)) { -+ if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) { - vfree(p); - return NULL; - } -diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c -index e5e801bc53122..a3898bac5ae6f 100644 ---- a/arch/arm64/kernel/mte.c -+++ b/arch/arm64/kernel/mte.c -@@ -53,7 +53,12 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte, - * the new page->flags are visible before the tags were updated. - */ - smp_wmb(); -- mte_clear_page_tags(page_address(page)); -+ /* -+ * Test PG_mte_tagged again in case it was racing with another -+ * set_pte_at(). -+ */ -+ if (!test_and_set_bit(PG_mte_tagged, &page->flags)) -+ mte_clear_page_tags(page_address(page)); - } - - void mte_sync_tags(pte_t old_pte, pte_t pte) -@@ -69,10 +74,13 @@ void mte_sync_tags(pte_t old_pte, pte_t pte) - - /* if PG_mte_tagged is set, tags have already been initialised */ - for (i = 0; i < nr_pages; i++, page++) { -- if (!test_and_set_bit(PG_mte_tagged, &page->flags)) -+ if (!test_bit(PG_mte_tagged, &page->flags)) - mte_sync_page_tags(page, old_pte, check_swap, - pte_is_tagged); - } -+ -+ /* ensure the tags are visible before the PTE is set */ -+ smp_wmb(); - } - - int memcmp_pages(struct page *page1, struct page *page2) -@@ -210,6 +218,49 @@ void mte_thread_switch(struct task_struct *next) - mte_check_tfsr_el1(); - } - -+void mte_cpu_setup(void) -+{ -+ u64 rgsr; -+ -+ /* -+ * CnP must be enabled only after the MAIR_EL1 register has been set -+ * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may -+ * lead to the wrong memory type being used for a brief window during -+ * CPU power-up. -+ * -+ * CnP is not a boot feature so MTE gets enabled before CnP, but let's -+ * make sure that is the case. -+ */ -+ BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT); -+ BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT); -+ -+ /* Normal Tagged memory type at the corresponding MAIR index */ -+ sysreg_clear_set(mair_el1, -+ MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED), -+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED, -+ MT_NORMAL_TAGGED)); -+ -+ write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1); -+ -+ /* -+ * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then -+ * RGSR_EL1.SEED must be non-zero for IRG to produce -+ * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we -+ * must initialize it. -+ */ -+ rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) << -+ SYS_RGSR_EL1_SEED_SHIFT; -+ if (rgsr == 0) -+ rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT; -+ write_sysreg_s(rgsr, SYS_RGSR_EL1); -+ -+ /* clear any pending tag check faults in TFSR*_EL1 */ -+ write_sysreg_s(0, SYS_TFSR_EL1); -+ write_sysreg_s(0, SYS_TFSRE0_EL1); -+ -+ local_flush_tlb_all(); -+} -+ - void mte_suspend_enter(void) - { - if (!system_supports_mte()) -@@ -226,6 +277,14 @@ void mte_suspend_enter(void) - mte_check_tfsr_el1(); - } - -+void mte_suspend_exit(void) -+{ -+ if (!system_supports_mte()) -+ return; -+ -+ mte_cpu_setup(); -+} -+ - long set_mte_ctrl(struct task_struct *task, unsigned long arg) - { - u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & -diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c -index 75fed4460407d..57c7c211f8c71 100644 ---- a/arch/arm64/kernel/paravirt.c -+++ b/arch/arm64/kernel/paravirt.c -@@ -35,7 +35,7 @@ static u64 native_steal_clock(int cpu) - DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); - - struct pv_time_stolen_time_region { -- struct pvclock_vcpu_stolen_time *kaddr; -+ struct pvclock_vcpu_stolen_time __rcu *kaddr; - }; - - static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region); -@@ -52,7 +52,9 @@ early_param("no-steal-acc", parse_no_stealacc); - /* return stolen time in ns by asking the hypervisor */ - static u64 para_steal_clock(int cpu) - { -+ struct pvclock_vcpu_stolen_time *kaddr = NULL; - struct pv_time_stolen_time_region *reg; -+ u64 ret = 0; - - reg = per_cpu_ptr(&stolen_time_region, cpu); - -@@ -61,28 +63,37 @@ static u64 para_steal_clock(int cpu) - * online notification callback runs. Until the callback - * has run we just return zero. - */ -- if (!reg->kaddr) -+ rcu_read_lock(); -+ kaddr = rcu_dereference(reg->kaddr); -+ if (!kaddr) { -+ rcu_read_unlock(); - return 0; -+ } - -- return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time)); -+ ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time)); -+ rcu_read_unlock(); -+ return ret; - } - - static int stolen_time_cpu_down_prepare(unsigned int cpu) - { -+ struct pvclock_vcpu_stolen_time *kaddr = NULL; - struct pv_time_stolen_time_region *reg; - - reg = this_cpu_ptr(&stolen_time_region); - if (!reg->kaddr) - return 0; - -- memunmap(reg->kaddr); -- memset(reg, 0, sizeof(*reg)); -+ kaddr = rcu_replace_pointer(reg->kaddr, NULL, true); -+ synchronize_rcu(); -+ memunmap(kaddr); - - return 0; - } - - static int stolen_time_cpu_online(unsigned int cpu) - { -+ struct pvclock_vcpu_stolen_time *kaddr = NULL; - struct pv_time_stolen_time_region *reg; - struct arm_smccc_res res; - -@@ -93,17 +104,19 @@ static int stolen_time_cpu_online(unsigned int cpu) - if (res.a0 == SMCCC_RET_NOT_SUPPORTED) - return -EINVAL; - -- reg->kaddr = memremap(res.a0, -+ kaddr = memremap(res.a0, - sizeof(struct pvclock_vcpu_stolen_time), - MEMREMAP_WB); - -+ rcu_assign_pointer(reg->kaddr, kaddr); -+ - if (!reg->kaddr) { - pr_warn("Failed to map stolen time data structure\n"); - return -ENOMEM; - } - -- if (le32_to_cpu(reg->kaddr->revision) != 0 || -- le32_to_cpu(reg->kaddr->attributes) != 0) { -+ if (le32_to_cpu(kaddr->revision) != 0 || -+ le32_to_cpu(kaddr->attributes) != 0) { - pr_warn_once("Unexpected revision or attributes in stolen time data\n"); - return -ENXIO; - } -diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c -index 771f543464e06..33e0fabc0b79b 100644 ---- a/arch/arm64/kernel/patching.c -+++ b/arch/arm64/kernel/patching.c -@@ -117,8 +117,8 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) - int i, ret = 0; - struct aarch64_insn_patch *pp = arg; - -- /* The first CPU becomes master */ -- if (atomic_inc_return(&pp->cpu_count) == 1) { -+ /* The last CPU becomes master */ -+ if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) { - for (i = 0; ret == 0 && i < pp->insn_cnt; i++) - ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], - pp->new_insns[i]); -diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c -index 4a72c27273097..86d9f20131723 100644 ---- a/arch/arm64/kernel/perf_callchain.c -+++ b/arch/arm64/kernel/perf_callchain.c -@@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_frame_tail __user *tail, - void perf_callchain_user(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs) - { -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); -+ -+ if (guest_cbs && guest_cbs->is_in_guest()) { - /* We don't support guest os callchain now */ - return; - } -@@ -147,9 +149,10 @@ static bool callchain_trace(void *data, unsigned long pc) - void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - struct stackframe frame; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ if (guest_cbs && guest_cbs->is_in_guest()) { - /* We don't support guest os callchain now */ - return; - } -@@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, - - unsigned long perf_instruction_pointer(struct pt_regs *regs) - { -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) -- return perf_guest_cbs->get_guest_ip(); -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); -+ -+ if (guest_cbs && guest_cbs->is_in_guest()) -+ return guest_cbs->get_guest_ip(); - - return instruction_pointer(regs); - } - - unsigned long perf_misc_flags(struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - int misc = 0; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -- if (perf_guest_cbs->is_user_mode()) -+ if (guest_cbs && guest_cbs->is_in_guest()) { -+ if (guest_cbs->is_user_mode()) - misc |= PERF_RECORD_MISC_GUEST_USER; - else - misc |= PERF_RECORD_MISC_GUEST_KERNEL; -diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c -index 6dbcc89f66627..2162b6fd7251d 100644 ---- a/arch/arm64/kernel/probes/kprobes.c -+++ b/arch/arm64/kernel/probes/kprobes.c -@@ -7,6 +7,9 @@ - * Copyright (C) 2013 Linaro Limited. - * Author: Sandeepa Prabhu - */ -+ -+#define pr_fmt(fmt) "kprobes: " fmt -+ - #include - #include - #include -@@ -218,7 +221,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, - break; - case KPROBE_HIT_SS: - case KPROBE_REENTER: -- pr_warn("Unrecoverable kprobe detected.\n"); -+ pr_warn("Failed to recover from reentered kprobes.\n"); - dump_kprobe(p); - BUG(); - break; -@@ -332,7 +335,7 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) - } - - static int __kprobes --kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr) -+kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr) - { - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long addr = instruction_pointer(regs); -@@ -356,7 +359,7 @@ static struct break_hook kprobes_break_ss_hook = { - }; - - static int __kprobes --kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) -+kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr) - { - kprobe_handler(regs); - return DBG_HOOK_HANDLED; -diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c -index 9be668f3f0341..d49aef2657cdf 100644 ---- a/arch/arm64/kernel/probes/uprobes.c -+++ b/arch/arm64/kernel/probes/uprobes.c -@@ -166,7 +166,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, - } - - static int uprobe_breakpoint_handler(struct pt_regs *regs, -- unsigned int esr) -+ unsigned long esr) - { - if (uprobe_pre_sstep_notifier(regs)) - return DBG_HOOK_HANDLED; -@@ -175,7 +175,7 @@ static int uprobe_breakpoint_handler(struct pt_regs *regs, - } - - static int uprobe_single_step_handler(struct pt_regs *regs, -- unsigned int esr) -+ unsigned long esr) - { - struct uprobe_task *utask = current->utask; - -diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c -index 40adb8cdbf5af..23efabcb00b85 100644 ---- a/arch/arm64/kernel/process.c -+++ b/arch/arm64/kernel/process.c -@@ -439,34 +439,26 @@ static void entry_task_switch(struct task_struct *next) - - /* - * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. -- * Assuming the virtual counter is enabled at the beginning of times: -- * -- * - disable access when switching from a 64bit task to a 32bit task -- * - enable access when switching from a 32bit task to a 64bit task -+ * Ensure access is disabled when switching to a 32bit task, ensure -+ * access is enabled when switching to a 64bit task. - */ --static void erratum_1418040_thread_switch(struct task_struct *prev, -- struct task_struct *next) -+static void erratum_1418040_thread_switch(struct task_struct *next) - { -- bool prev32, next32; -- u64 val; -- -- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040)) -- return; -- -- prev32 = is_compat_thread(task_thread_info(prev)); -- next32 = is_compat_thread(task_thread_info(next)); -- -- if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) -+ if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || -+ !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) - return; - -- val = read_sysreg(cntkctl_el1); -- -- if (!next32) -- val |= ARCH_TIMER_USR_VCT_ACCESS_EN; -+ if (is_compat_thread(task_thread_info(next))) -+ sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); - else -- val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; -+ sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); -+} - -- write_sysreg(val, cntkctl_el1); -+static void erratum_1418040_new_exec(void) -+{ -+ preempt_disable(); -+ erratum_1418040_thread_switch(current); -+ preempt_enable(); - } - - /* -@@ -501,7 +493,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, - contextidr_thread_switch(next); - entry_task_switch(next); - ssbs_thread_switch(next); -- erratum_1418040_thread_switch(prev, next); -+ erratum_1418040_thread_switch(next); - ptrauth_thread_switch_user(next); - - /* -@@ -613,6 +605,7 @@ void arch_setup_new_exec(void) - current->mm->context.flags = mmflags; - ptrauth_thread_init_user(); - mte_thread_init_user(); -+ erratum_1418040_new_exec(); - - if (task_spec_ssb_noexec(current)) { - arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, -diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c -index 902e4084c4775..428cfabd11c49 100644 ---- a/arch/arm64/kernel/proton-pack.c -+++ b/arch/arm64/kernel/proton-pack.c -@@ -18,15 +18,18 @@ - */ - - #include -+#include - #include - #include - #include - #include - #include - -+#include - #include - #include - #include -+#include - #include - - /* -@@ -96,14 +99,51 @@ static bool spectre_v2_mitigations_off(void) - return ret; - } - -+static const char *get_bhb_affected_string(enum mitigation_state bhb_state) -+{ -+ switch (bhb_state) { -+ case SPECTRE_UNAFFECTED: -+ return ""; -+ default: -+ case SPECTRE_VULNERABLE: -+ return ", but not BHB"; -+ case SPECTRE_MITIGATED: -+ return ", BHB"; -+ } -+} -+ -+static bool _unprivileged_ebpf_enabled(void) -+{ -+#ifdef CONFIG_BPF_SYSCALL -+ return !sysctl_unprivileged_bpf_disabled; -+#else -+ return false; -+#endif -+} -+ - ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, - char *buf) - { -+ enum mitigation_state bhb_state = arm64_get_spectre_bhb_state(); -+ const char *bhb_str = get_bhb_affected_string(bhb_state); -+ const char *v2_str = "Branch predictor hardening"; -+ - switch (spectre_v2_state) { - case SPECTRE_UNAFFECTED: -- return sprintf(buf, "Not affected\n"); -+ if (bhb_state == SPECTRE_UNAFFECTED) -+ return sprintf(buf, "Not affected\n"); -+ -+ /* -+ * Platforms affected by Spectre-BHB can't report -+ * "Not affected" for Spectre-v2. -+ */ -+ v2_str = "CSV2"; -+ fallthrough; - case SPECTRE_MITIGATED: -- return sprintf(buf, "Mitigation: Branch predictor hardening\n"); -+ if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled()) -+ return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); -+ -+ return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str); - case SPECTRE_VULNERABLE: - fallthrough; - default: -@@ -193,17 +233,20 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn) - __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT); - } - --static void call_smc_arch_workaround_1(void) -+/* Called during entry so must be noinstr */ -+static noinstr void call_smc_arch_workaround_1(void) - { - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); - } - --static void call_hvc_arch_workaround_1(void) -+/* Called during entry so must be noinstr */ -+static noinstr void call_hvc_arch_workaround_1(void) - { - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); - } - --static void qcom_link_stack_sanitisation(void) -+/* Called during entry so must be noinstr */ -+static noinstr void qcom_link_stack_sanitisation(void) - { - u64 tmp; - -@@ -554,9 +597,9 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, - * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction - * to call into firmware to adjust the mitigation state. - */ --void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt, -- __le32 *origptr, -- __le32 *updptr, int nr_inst) -+void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, -+ __le32 *origptr, -+ __le32 *updptr, int nr_inst) - { - u32 insn; - -@@ -770,3 +813,351 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) - return -ENODEV; - } - } -+ -+/* -+ * Spectre BHB. -+ * -+ * A CPU is either: -+ * - Mitigated by a branchy loop a CPU specific number of times, and listed -+ * in our "loop mitigated list". -+ * - Mitigated in software by the firmware Spectre v2 call. -+ * - Has the ClearBHB instruction to perform the mitigation. -+ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no -+ * software mitigation in the vectors is needed. -+ * - Has CSV2.3, so is unaffected. -+ */ -+static enum mitigation_state spectre_bhb_state; -+ -+enum mitigation_state arm64_get_spectre_bhb_state(void) -+{ -+ return spectre_bhb_state; -+} -+ -+enum bhb_mitigation_bits { -+ BHB_LOOP, -+ BHB_FW, -+ BHB_HW, -+ BHB_INSN, -+}; -+static unsigned long system_bhb_mitigations; -+ -+/* -+ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any -+ * SCOPE_SYSTEM call will give the right answer. -+ */ -+u8 spectre_bhb_loop_affected(int scope) -+{ -+ u8 k = 0; -+ static u8 max_bhb_k; -+ -+ if (scope == SCOPE_LOCAL_CPU) { -+ static const struct midr_range spectre_bhb_k32_list[] = { -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), -+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), -+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), -+ {}, -+ }; -+ static const struct midr_range spectre_bhb_k24_list[] = { -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), -+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), -+ {}, -+ }; -+ static const struct midr_range spectre_bhb_k11_list[] = { -+ MIDR_ALL_VERSIONS(MIDR_AMPERE1), -+ {}, -+ }; -+ static const struct midr_range spectre_bhb_k8_list[] = { -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), -+ {}, -+ }; -+ -+ if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) -+ k = 32; -+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) -+ k = 24; -+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) -+ k = 11; -+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) -+ k = 8; -+ -+ max_bhb_k = max(max_bhb_k, k); -+ } else { -+ k = max_bhb_k; -+ } -+ -+ return k; -+} -+ -+static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) -+{ -+ int ret; -+ struct arm_smccc_res res; -+ -+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, -+ ARM_SMCCC_ARCH_WORKAROUND_3, &res); -+ -+ ret = res.a0; -+ switch (ret) { -+ case SMCCC_RET_SUCCESS: -+ return SPECTRE_MITIGATED; -+ case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: -+ return SPECTRE_UNAFFECTED; -+ default: -+ fallthrough; -+ case SMCCC_RET_NOT_SUPPORTED: -+ return SPECTRE_VULNERABLE; -+ } -+} -+ -+static bool is_spectre_bhb_fw_affected(int scope) -+{ -+ static bool system_affected; -+ enum mitigation_state fw_state; -+ bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE; -+ static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), -+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), -+ {}, -+ }; -+ bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), -+ spectre_bhb_firmware_mitigated_list); -+ -+ if (scope != SCOPE_LOCAL_CPU) -+ return system_affected; -+ -+ fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); -+ if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { -+ system_affected = true; -+ return true; -+ } -+ -+ return false; -+} -+ -+static bool supports_ecbhb(int scope) -+{ -+ u64 mmfr1; -+ -+ if (scope == SCOPE_LOCAL_CPU) -+ mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1); -+ else -+ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); -+ -+ return cpuid_feature_extract_unsigned_field(mmfr1, -+ ID_AA64MMFR1_ECBHB_SHIFT); -+} -+ -+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, -+ int scope) -+{ -+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); -+ -+ if (supports_csv2p3(scope)) -+ return false; -+ -+ if (supports_clearbhb(scope)) -+ return true; -+ -+ if (spectre_bhb_loop_affected(scope)) -+ return true; -+ -+ if (is_spectre_bhb_fw_affected(scope)) -+ return true; -+ -+ return false; -+} -+ -+static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) -+{ -+ const char *v = arm64_get_bp_hardening_vector(slot); -+ -+ if (slot < 0) -+ return; -+ -+ __this_cpu_write(this_cpu_vector, v); -+ -+ /* -+ * When KPTI is in use, the vectors are switched when exiting to -+ * user-space. -+ */ -+ if (arm64_kernel_unmapped_at_el0()) -+ return; -+ -+ write_sysreg(v, vbar_el1); -+ isb(); -+} -+ -+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) -+{ -+ bp_hardening_cb_t cpu_cb; -+ enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; -+ struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); -+ -+ if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) -+ return; -+ -+ if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) { -+ /* No point mitigating Spectre-BHB alone. */ -+ } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { -+ pr_info_once("spectre-bhb mitigation disabled by compile time option\n"); -+ } else if (cpu_mitigations_off()) { -+ pr_info_once("spectre-bhb mitigation disabled by command line option\n"); -+ } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { -+ state = SPECTRE_MITIGATED; -+ set_bit(BHB_HW, &system_bhb_mitigations); -+ } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) { -+ /* -+ * Ensure KVM uses the indirect vector which will have ClearBHB -+ * added. -+ */ -+ if (!data->slot) -+ data->slot = HYP_VECTOR_INDIRECT; -+ -+ this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); -+ state = SPECTRE_MITIGATED; -+ set_bit(BHB_INSN, &system_bhb_mitigations); -+ } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { -+ /* -+ * Ensure KVM uses the indirect vector which will have the -+ * branchy-loop added. A57/A72-r0 will already have selected -+ * the spectre-indirect vector, which is sufficient for BHB -+ * too. -+ */ -+ if (!data->slot) -+ data->slot = HYP_VECTOR_INDIRECT; -+ -+ this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); -+ state = SPECTRE_MITIGATED; -+ set_bit(BHB_LOOP, &system_bhb_mitigations); -+ } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { -+ fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); -+ if (fw_state == SPECTRE_MITIGATED) { -+ /* -+ * Ensure KVM uses one of the spectre bp_hardening -+ * vectors. The indirect vector doesn't include the EL3 -+ * call, so needs upgrading to -+ * HYP_VECTOR_SPECTRE_INDIRECT. -+ */ -+ if (!data->slot || data->slot == HYP_VECTOR_INDIRECT) -+ data->slot += 1; -+ -+ this_cpu_set_vectors(EL1_VECTOR_BHB_FW); -+ -+ /* -+ * The WA3 call in the vectors supersedes the WA1 call -+ * made during context-switch. Uninstall any firmware -+ * bp_hardening callback. -+ */ -+ cpu_cb = spectre_v2_get_sw_mitigation_cb(); -+ if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb) -+ __this_cpu_write(bp_hardening_data.fn, NULL); -+ -+ state = SPECTRE_MITIGATED; -+ set_bit(BHB_FW, &system_bhb_mitigations); -+ } -+ } -+ -+ update_mitigation_state(&spectre_bhb_state, state); -+} -+ -+/* Patched to NOP when enabled */ -+void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, -+ __le32 *origptr, -+ __le32 *updptr, int nr_inst) -+{ -+ BUG_ON(nr_inst != 1); -+ -+ if (test_bit(BHB_LOOP, &system_bhb_mitigations)) -+ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); -+} -+ -+/* Patched to NOP when enabled */ -+void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, -+ __le32 *origptr, -+ __le32 *updptr, int nr_inst) -+{ -+ BUG_ON(nr_inst != 1); -+ -+ if (test_bit(BHB_FW, &system_bhb_mitigations)) -+ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); -+} -+ -+/* Patched to correct the immediate */ -+void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, -+ __le32 *origptr, __le32 *updptr, int nr_inst) -+{ -+ u8 rd; -+ u32 insn; -+ u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); -+ -+ BUG_ON(nr_inst != 1); /* MOV -> MOV */ -+ -+ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) -+ return; -+ -+ insn = le32_to_cpu(*origptr); -+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); -+ insn = aarch64_insn_gen_movewide(rd, loop_count, 0, -+ AARCH64_INSN_VARIANT_64BIT, -+ AARCH64_INSN_MOVEWIDE_ZERO); -+ *updptr++ = cpu_to_le32(insn); -+} -+ -+/* Patched to mov WA3 when supported */ -+void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt, -+ __le32 *origptr, __le32 *updptr, int nr_inst) -+{ -+ u8 rd; -+ u32 insn; -+ -+ BUG_ON(nr_inst != 1); /* MOV -> MOV */ -+ -+ if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) || -+ !test_bit(BHB_FW, &system_bhb_mitigations)) -+ return; -+ -+ insn = le32_to_cpu(*origptr); -+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); -+ -+ insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR, -+ AARCH64_INSN_VARIANT_32BIT, -+ AARCH64_INSN_REG_ZR, rd, -+ ARM_SMCCC_ARCH_WORKAROUND_3); -+ if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT)) -+ return; -+ -+ *updptr++ = cpu_to_le32(insn); -+} -+ -+/* Patched to NOP when not supported */ -+void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt, -+ __le32 *origptr, __le32 *updptr, int nr_inst) -+{ -+ BUG_ON(nr_inst != 2); -+ -+ if (test_bit(BHB_INSN, &system_bhb_mitigations)) -+ return; -+ -+ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); -+ *updptr++ = cpu_to_le32(aarch64_insn_gen_nop()); -+} -+ -+#ifdef CONFIG_BPF_SYSCALL -+#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n" -+void unpriv_ebpf_notify(int new_state) -+{ -+ if (spectre_v2_state == SPECTRE_VULNERABLE || -+ spectre_bhb_state != SPECTRE_MITIGATED) -+ return; -+ -+ if (!new_state) -+ pr_err("WARNING: %s", EBPF_WARN); -+} -+#endif -diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c -index 47f77d1234cb6..532611d07bdcb 100644 ---- a/arch/arm64/kernel/sdei.c -+++ b/arch/arm64/kernel/sdei.c -@@ -47,6 +47,9 @@ DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); - DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); - #endif - -+DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event); -+DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event); -+ - static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu) - { - unsigned long *p; -diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c -index c287b9407f287..b3e1beccf4588 100644 ---- a/arch/arm64/kernel/signal.c -+++ b/arch/arm64/kernel/signal.c -@@ -577,10 +577,12 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, - { - int err; - -- err = sigframe_alloc(user, &user->fpsimd_offset, -- sizeof(struct fpsimd_context)); -- if (err) -- return err; -+ if (system_supports_fpsimd()) { -+ err = sigframe_alloc(user, &user->fpsimd_offset, -+ sizeof(struct fpsimd_context)); -+ if (err) -+ return err; -+ } - - /* fault information, if valid */ - if (add_all || current->thread.fault_code) { -@@ -1010,6 +1012,7 @@ static_assert(offsetof(siginfo_t, si_upper) == 0x28); - static_assert(offsetof(siginfo_t, si_pkey) == 0x20); - static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); - static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); -+static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); - static_assert(offsetof(siginfo_t, si_band) == 0x10); - static_assert(offsetof(siginfo_t, si_fd) == 0x18); - static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); -diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c -index d984282b979f8..4700f8522d27b 100644 ---- a/arch/arm64/kernel/signal32.c -+++ b/arch/arm64/kernel/signal32.c -@@ -487,6 +487,7 @@ static_assert(offsetof(compat_siginfo_t, si_upper) == 0x18); - static_assert(offsetof(compat_siginfo_t, si_pkey) == 0x14); - static_assert(offsetof(compat_siginfo_t, si_perf_data) == 0x10); - static_assert(offsetof(compat_siginfo_t, si_perf_type) == 0x14); -+static_assert(offsetof(compat_siginfo_t, si_perf_flags) == 0x18); - static_assert(offsetof(compat_siginfo_t, si_band) == 0x0c); - static_assert(offsetof(compat_siginfo_t, si_fd) == 0x10); - static_assert(offsetof(compat_siginfo_t, si_call_addr) == 0x0c); -diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c -index 6f6ff072acbde..bc29cc044a4d7 100644 ---- a/arch/arm64/kernel/smp.c -+++ b/arch/arm64/kernel/smp.c -@@ -234,6 +234,7 @@ asmlinkage notrace void secondary_start_kernel(void) - * Log the CPU info before it is marked online and might get read. - */ - cpuinfo_store_cpu(); -+ store_cpu_topology(cpu); - - /* - * Enable GIC and timers. -@@ -242,7 +243,6 @@ asmlinkage notrace void secondary_start_kernel(void) - - ipi_setup(cpu); - -- store_cpu_topology(cpu); - numa_add_cpu(cpu); - - /* -@@ -1073,10 +1073,8 @@ void crash_smp_send_stop(void) - * If this cpu is the only one alive at this point in time, online or - * not, there are no stop messages to be sent around, so just back out. - */ -- if (num_other_online_cpus() == 0) { -- sdei_mask_local_cpu(); -- return; -- } -+ if (num_other_online_cpus() == 0) -+ goto skip_ipi; - - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); -@@ -1095,7 +1093,9 @@ void crash_smp_send_stop(void) - pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", - cpumask_pr_args(&mask)); - -+skip_ipi: - sdei_mask_local_cpu(); -+ sdei_handler_abort(); - } - - bool smp_crash_stop_failed(void) -diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c -index 8982a2b78acfc..3b8dc538a4c42 100644 ---- a/arch/arm64/kernel/stacktrace.c -+++ b/arch/arm64/kernel/stacktrace.c -@@ -33,7 +33,7 @@ - */ - - --void start_backtrace(struct stackframe *frame, unsigned long fp, -+notrace void start_backtrace(struct stackframe *frame, unsigned long fp, - unsigned long pc) - { - frame->fp = fp; -@@ -55,6 +55,7 @@ void start_backtrace(struct stackframe *frame, unsigned long fp, - frame->prev_fp = 0; - frame->prev_type = STACK_TYPE_UNKNOWN; - } -+NOKPROBE_SYMBOL(start_backtrace); - - /* - * Unwind from one frame record (A) to the next frame record (B). -diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c -index 19ee7c33769d3..d473ec204fef7 100644 ---- a/arch/arm64/kernel/suspend.c -+++ b/arch/arm64/kernel/suspend.c -@@ -43,6 +43,8 @@ void notrace __cpu_suspend_exit(void) - { - unsigned int cpu = smp_processor_id(); - -+ mte_suspend_exit(); -+ - /* - * We are resuming from reset with the idmap active in TTBR0_EL1. - * We must uninstall the idmap and restore the expected MMU -diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c -index db5159a3055fc..b88a52f7188fc 100644 ---- a/arch/arm64/kernel/sys_compat.c -+++ b/arch/arm64/kernel/sys_compat.c -@@ -114,6 +114,6 @@ long compat_arm_syscall(struct pt_regs *regs, int scno) - addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4); - - arm64_notify_die("Oops - bad compat syscall(2)", regs, -- SIGILL, ILL_ILLTRP, addr, scno); -+ SIGILL, ILL_ILLTRP, addr, 0); - return 0; - } -diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c -index 4dd14a6620c17..877c68f472822 100644 ---- a/arch/arm64/kernel/topology.c -+++ b/arch/arm64/kernel/topology.c -@@ -22,46 +22,6 @@ - #include - #include - --void store_cpu_topology(unsigned int cpuid) --{ -- struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; -- u64 mpidr; -- -- if (cpuid_topo->package_id != -1) -- goto topology_populated; -- -- mpidr = read_cpuid_mpidr(); -- -- /* Uniprocessor systems can rely on default topology values */ -- if (mpidr & MPIDR_UP_BITMASK) -- return; -- -- /* -- * This would be the place to create cpu topology based on MPIDR. -- * -- * However, it cannot be trusted to depict the actual topology; some -- * pieces of the architecture enforce an artificial cap on Aff0 values -- * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an -- * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up -- * having absolutely no relationship to the actual underlying system -- * topology, and cannot be reasonably used as core / package ID. -- * -- * If the MT bit is set, Aff0 *could* be used to define a thread ID, but -- * we still wouldn't be able to obtain a sane core ID. This means we -- * need to entirely ignore MPIDR for any topology deduction. -- */ -- cpuid_topo->thread_id = -1; -- cpuid_topo->core_id = cpuid; -- cpuid_topo->package_id = cpu_to_node(cpuid); -- -- pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", -- cpuid, cpuid_topo->package_id, cpuid_topo->core_id, -- cpuid_topo->thread_id, mpidr); -- --topology_populated: -- update_siblings_masks(cpuid); --} -- - #ifdef CONFIG_ACPI - static bool __init acpi_cpu_is_threaded(int cpu) - { -@@ -249,7 +209,7 @@ static void amu_fie_setup(const struct cpumask *cpus) - for_each_cpu(cpu, cpus) { - if (!freq_counters_valid(cpu) || - freq_inv_set_max_ratio(cpu, -- cpufreq_get_hw_max_freq(cpu) * 1000, -+ cpufreq_get_hw_max_freq(cpu) * 1000ULL, - arch_timer_get_rate())) - return; - } -@@ -308,12 +268,25 @@ core_initcall(init_amu_fie); - - static void cpu_read_corecnt(void *val) - { -+ /* -+ * A value of 0 can be returned if the current CPU does not support AMUs -+ * or if the counter is disabled for this CPU. A return value of 0 at -+ * counter read is properly handled as an error case by the users of the -+ * counter. -+ */ - *(u64 *)val = read_corecnt(); - } - - static void cpu_read_constcnt(void *val) - { -- *(u64 *)val = read_constcnt(); -+ /* -+ * Return 0 if the current CPU is affected by erratum 2457168. A value -+ * of 0 is also returned if the current CPU does not support AMUs or if -+ * the counter is disabled. A return value of 0 at counter read is -+ * properly handled as an error case by the users of the counter. -+ */ -+ *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ? -+ 0UL : read_constcnt(); - } - - static inline -@@ -340,7 +313,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) - */ - bool cpc_ffh_supported(void) - { -- return freq_counters_valid(get_cpu_with_amu_feat()); -+ int cpu = get_cpu_with_amu_feat(); -+ -+ /* -+ * FFH is considered supported if there is at least one present CPU that -+ * supports AMUs. Using FFH to read core and reference counters for CPUs -+ * that do not support AMUs, have counters disabled or that are affected -+ * by errata, will result in a return value of 0. -+ * -+ * This is done to allow any enabled and valid counters to be read -+ * through FFH, knowing that potentially returning 0 as counter value is -+ * properly handled by the users of these counters. -+ */ -+ if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) -+ return false; -+ -+ return true; - } - - int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) -diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c -index b03e383d944ab..21e69a991bc83 100644 ---- a/arch/arm64/kernel/traps.c -+++ b/arch/arm64/kernel/traps.c -@@ -235,7 +235,7 @@ void die(const char *str, struct pt_regs *regs, int err) - raw_spin_unlock_irqrestore(&die_lock, flags); - - if (ret != NOTIFY_STOP) -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - static void arm64_show_signal(int signo, const char *str) -@@ -243,7 +243,7 @@ static void arm64_show_signal(int signo, const char *str) - static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, - DEFAULT_RATELIMIT_BURST); - struct task_struct *tsk = current; -- unsigned int esr = tsk->thread.fault_code; -+ unsigned long esr = tsk->thread.fault_code; - struct pt_regs *regs = task_pt_regs(tsk); - - /* Leave if the signal won't be shown */ -@@ -254,7 +254,7 @@ static void arm64_show_signal(int signo, const char *str) - - pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); - if (esr) -- pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr); -+ pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr); - - pr_cont("%s", str); - print_vma_addr(KERN_CONT " in ", regs->pc); -@@ -288,7 +288,7 @@ void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, - - void arm64_notify_die(const char *str, struct pt_regs *regs, - int signo, int sicode, unsigned long far, -- int err) -+ unsigned long err) - { - if (user_mode(regs)) { - WARN_ON(regs != current_pt_regs()); -@@ -440,7 +440,7 @@ exit: - return fn ? fn(regs, instr) : 1; - } - --void force_signal_inject(int signal, int code, unsigned long address, unsigned int err) -+void force_signal_inject(int signal, int code, unsigned long address, unsigned long err) - { - const char *desc; - struct pt_regs *regs = current_pt_regs(); -@@ -507,7 +507,7 @@ void do_bti(struct pt_regs *regs) - } - NOKPROBE_SYMBOL(do_bti); - --void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr) -+void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr) - { - /* - * Unexpected FPAC exception or pointer authentication failure in -@@ -538,7 +538,7 @@ NOKPROBE_SYMBOL(do_ptrauth_fault); - uaccess_ttbr0_disable(); \ - } - --static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) -+static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs) - { - unsigned long tagged_address, address; - int rt = ESR_ELx_SYS64_ISS_RT(esr); -@@ -578,7 +578,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) - arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); - } - --static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) -+static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) - { - int rt = ESR_ELx_SYS64_ISS_RT(esr); - unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); -@@ -597,7 +597,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) - arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); - } - --static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) -+static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs) - { - int rt = ESR_ELx_SYS64_ISS_RT(esr); - -@@ -605,7 +605,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) - arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); - } - --static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) -+static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) - { - int rt = ESR_ELx_SYS64_ISS_RT(esr); - -@@ -613,7 +613,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) - arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); - } - --static void mrs_handler(unsigned int esr, struct pt_regs *regs) -+static void mrs_handler(unsigned long esr, struct pt_regs *regs) - { - u32 sysreg, rt; - -@@ -624,15 +624,15 @@ static void mrs_handler(unsigned int esr, struct pt_regs *regs) - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); - } - --static void wfi_handler(unsigned int esr, struct pt_regs *regs) -+static void wfi_handler(unsigned long esr, struct pt_regs *regs) - { - arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); - } - - struct sys64_hook { -- unsigned int esr_mask; -- unsigned int esr_val; -- void (*handler)(unsigned int esr, struct pt_regs *regs); -+ unsigned long esr_mask; -+ unsigned long esr_val; -+ void (*handler)(unsigned long esr, struct pt_regs *regs); - }; - - static const struct sys64_hook sys64_hooks[] = { -@@ -675,7 +675,7 @@ static const struct sys64_hook sys64_hooks[] = { - }; - - #ifdef CONFIG_COMPAT --static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) -+static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs) - { - int cond; - -@@ -695,7 +695,7 @@ static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs) - return aarch32_opcode_cond_checks[cond](regs->pstate); - } - --static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) -+static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) - { - int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; - -@@ -712,7 +712,7 @@ static const struct sys64_hook cp15_32_hooks[] = { - {}, - }; - --static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) -+static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs) - { - int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; - int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; -@@ -732,7 +732,7 @@ static const struct sys64_hook cp15_64_hooks[] = { - {}, - }; - --void do_cp15instr(unsigned int esr, struct pt_regs *regs) -+void do_cp15instr(unsigned long esr, struct pt_regs *regs) - { - const struct sys64_hook *hook, *hook_base; - -@@ -773,7 +773,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs) - NOKPROBE_SYMBOL(do_cp15instr); - #endif - --void do_sysinstr(unsigned int esr, struct pt_regs *regs) -+void do_sysinstr(unsigned long esr, struct pt_regs *regs) - { - const struct sys64_hook *hook; - -@@ -837,7 +837,7 @@ static const char *esr_class_str[] = { - [ESR_ELx_EC_BRK64] = "BRK (AArch64)", - }; - --const char *esr_get_class_string(u32 esr) -+const char *esr_get_class_string(unsigned long esr) - { - return esr_class_str[ESR_ELx_EC(esr)]; - } -@@ -846,7 +846,7 @@ const char *esr_get_class_string(u32 esr) - * bad_el0_sync handles unexpected, but potentially recoverable synchronous - * exceptions taken from EL0. - */ --void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) -+void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) - { - unsigned long pc = instruction_pointer(regs); - -@@ -862,7 +862,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) - DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) - __aligned(16); - --void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) -+void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) - { - unsigned long tsk_stk = (unsigned long)current->stack; - unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); -@@ -871,7 +871,7 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) - console_verbose(); - pr_emerg("Insufficient stack space to handle exception!"); - -- pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr)); -+ pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr)); - pr_emerg("FAR: 0x%016lx\n", far); - - pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", -@@ -892,11 +892,11 @@ void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) - } - #endif - --void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) -+void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) - { - console_verbose(); - -- pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n", -+ pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n", - smp_processor_id(), esr, esr_get_class_string(esr)); - if (regs) - __show_regs(regs); -@@ -907,9 +907,9 @@ void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr) - unreachable(); - } - --bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) -+bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) - { -- u32 aet = arm64_ras_serror_get_severity(esr); -+ unsigned long aet = arm64_ras_serror_get_severity(esr); - - switch (aet) { - case ESR_ELx_AET_CE: /* corrected error */ -@@ -939,7 +939,7 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) - } - } - --void do_serror(struct pt_regs *regs, unsigned int esr) -+void do_serror(struct pt_regs *regs, unsigned long esr) - { - /* non-RAS errors are not containable */ - if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) -@@ -960,7 +960,7 @@ int is_valid_bugaddr(unsigned long addr) - return 1; - } - --static int bug_handler(struct pt_regs *regs, unsigned int esr) -+static int bug_handler(struct pt_regs *regs, unsigned long esr) - { - switch (report_bug(regs->pc, regs)) { - case BUG_TRAP_TYPE_BUG: -@@ -985,10 +985,10 @@ static struct break_hook bug_break_hook = { - .imm = BUG_BRK_IMM, - }; - --static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) -+static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr) - { - pr_err("%s generated an invalid instruction at %pS!\n", -- in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching", -+ "Kernel text patching", - (void *)instruction_pointer(regs)); - - /* We cannot handle this */ -@@ -1007,7 +1007,7 @@ static struct break_hook fault_break_hook = { - #define KASAN_ESR_SIZE_MASK 0x0f - #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) - --static int kasan_handler(struct pt_regs *regs, unsigned int esr) -+static int kasan_handler(struct pt_regs *regs, unsigned long esr) - { - bool recover = esr & KASAN_ESR_RECOVER; - bool write = esr & KASAN_ESR_WRITE; -@@ -1050,11 +1050,11 @@ static struct break_hook kasan_break_hook = { - * Initial handler for AArch64 BRK exceptions - * This handler only used until debug_traps_init(). - */ --int __init early_brk64(unsigned long addr, unsigned int esr, -+int __init early_brk64(unsigned long addr, unsigned long esr, - struct pt_regs *regs) - { - #ifdef CONFIG_KASAN_SW_TAGS -- unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; -+ unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; - - if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) - return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; -diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c -index a61fc4f989b37..55dd15c9745da 100644 ---- a/arch/arm64/kernel/vdso.c -+++ b/arch/arm64/kernel/vdso.c -@@ -314,7 +314,7 @@ static int aarch32_alloc_kuser_vdso_page(void) - - memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, - kuser_sz); -- aarch32_vectors_page = virt_to_page(vdso_page); -+ aarch32_vectors_page = virt_to_page((void *)vdso_page); - return 0; - } - -diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile -index 945e6bb326e3e..b5d8f72e8b32e 100644 ---- a/arch/arm64/kernel/vdso/Makefile -+++ b/arch/arm64/kernel/vdso/Makefile -@@ -48,9 +48,6 @@ GCOV_PROFILE := n - targets += vdso.lds - CPPFLAGS_vdso.lds += -P -C -U$(ARCH) - --# Force dependency (incbin is bad) --$(obj)/vdso.o : $(obj)/vdso.so -- - # Link rule for the .so file, .lds has to be first - $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold_and_vdso_check) -diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile -index 3dba0c4f8f42b..83e9399e38368 100644 ---- a/arch/arm64/kernel/vdso32/Makefile -+++ b/arch/arm64/kernel/vdso32/Makefile -@@ -10,18 +10,15 @@ include $(srctree)/lib/vdso/Makefile - - # Same as cc-*option, but using CC_COMPAT instead of CC - ifeq ($(CONFIG_CC_IS_CLANG), y) --CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) -- - CC_COMPAT ?= $(CC) --CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS) -- --ifneq ($(LLVM),) --LD_COMPAT ?= $(LD) -+CC_COMPAT += --target=arm-linux-gnueabi - else --LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld -+CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc - endif -+ -+ifeq ($(CONFIG_LD_IS_LLD), y) -+LD_COMPAT ?= $(LD) - else --CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc - LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld - endif - -@@ -40,16 +37,13 @@ cc32-as-instr = $(call try-run,\ - # As a result we set our own flags here. - - # KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile --VDSO_CPPFLAGS := -DBUILD_VDSO -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include) -+VDSO_CPPFLAGS := -DBUILD_VDSO -D__KERNEL__ -nostdinc -+VDSO_CPPFLAGS += -isystem $(shell $(CC_COMPAT) -print-file-name=include 2>/dev/null) - VDSO_CPPFLAGS += $(LINUXINCLUDE) - - # Common C and assembly flags - # From top-level Makefile - VDSO_CAFLAGS := $(VDSO_CPPFLAGS) --ifneq ($(shell $(CC_COMPAT) --version 2>&1 | head -n 1 | grep clang),) --VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) --endif -- - VDSO_CAFLAGS += $(call cc32-option,-fno-PIE) - ifdef CONFIG_DEBUG_INFO - VDSO_CAFLAGS += -g -@@ -150,9 +144,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso) - targets += vdso.lds - CPPFLAGS_vdso.lds += -P -C -U$(ARCH) - --# Force dependency (vdso.s includes vdso.so through incbin) --$(obj)/vdso.o: $(obj)/vdso.so -- - include/generated/vdso32-offsets.h: $(obj)/vdso.so.dbg FORCE - $(call if_changed,vdsosym) - -diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S -index f6b1a88245db2..184abd7c4206e 100644 ---- a/arch/arm64/kernel/vmlinux.lds.S -+++ b/arch/arm64/kernel/vmlinux.lds.S -@@ -330,7 +330,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) - <= SZ_4K, "Hibernate exit text too big or misaligned") - #endif - #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 --ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, -+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, - "Entry trampoline text too big") - #endif - #ifdef CONFIG_KVM -diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c -index fe102cd2e5183..3fe816c244cec 100644 ---- a/arch/arm64/kvm/arm.c -+++ b/arch/arm64/kvm/arm.c -@@ -712,8 +712,7 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) - if (likely(!vcpu_mode_is_32bit(vcpu))) - return false; - -- return !system_supports_32bit_el0() || -- static_branch_unlikely(&arm64_mismatched_32bit_el0); -+ return !kvm_supports_32bit_el0(); - } - - /** -@@ -755,6 +754,24 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) - xfer_to_guest_mode_work_pending(); - } - -+/* -+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while -+ * the vCPU is running. -+ * -+ * This must be noinstr as instrumentation may make use of RCU, and this is not -+ * safe during the EQS. -+ */ -+static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu) -+{ -+ int ret; -+ -+ guest_state_enter_irqoff(); -+ ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); -+ guest_state_exit_irqoff(); -+ -+ return ret; -+} -+ - /** - * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code - * @vcpu: The VCPU pointer -@@ -845,9 +862,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) - * Enter the guest - */ - trace_kvm_entry(*vcpu_pc(vcpu)); -- guest_enter_irqoff(); -+ guest_timing_enter_irqoff(); - -- ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); -+ ret = kvm_arm_vcpu_enter_exit(vcpu); - - vcpu->mode = OUTSIDE_GUEST_MODE; - vcpu->stat.exits++; -@@ -882,26 +899,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) - kvm_arch_vcpu_ctxsync_fp(vcpu); - - /* -- * We may have taken a host interrupt in HYP mode (ie -- * while executing the guest). This interrupt is still -- * pending, as we haven't serviced it yet! -+ * We must ensure that any pending interrupts are taken before -+ * we exit guest timing so that timer ticks are accounted as -+ * guest time. Transiently unmask interrupts so that any -+ * pending interrupts are taken. - * -- * We're now back in SVC mode, with interrupts -- * disabled. Enabling the interrupts now will have -- * the effect of taking the interrupt again, in SVC -- * mode this time. -+ * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other -+ * context synchronization event) is necessary to ensure that -+ * pending interrupts are taken. - */ - local_irq_enable(); -+ isb(); -+ local_irq_disable(); -+ -+ guest_timing_exit_irqoff(); -+ -+ local_irq_enable(); - -- /* -- * We do local_irq_enable() before calling guest_exit() so -- * that if a timer interrupt hits while running the guest we -- * account that tick as being spent in the guest. We enable -- * preemption after calling guest_exit() so that if we get -- * preempted we make sure ticks after that is not counted as -- * guest time. -- */ -- guest_exit(); - trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); - - /* Exit types that need handling before we can be preempted */ -@@ -1443,10 +1457,8 @@ static int kvm_init_vector_slots(void) - base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); - kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT); - -- if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) -- return 0; -- -- if (!has_vhe()) { -+ if (kvm_system_needs_idmapped_vectors() && -+ !is_protected_kvm_enabled()) { - err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs), - __BP_HARDEN_HYP_VECS_SZ, &base); - if (err) -@@ -1971,31 +1983,50 @@ out_err: - return err; - } - --static void _kvm_host_prot_finalize(void *discard) -+static void _kvm_host_prot_finalize(void *arg) - { -- WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)); -+ int *err = arg; -+ -+ if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize))) -+ WRITE_ONCE(*err, -EINVAL); - } - --static int finalize_hyp_mode(void) -+static int pkvm_drop_host_privileges(void) - { -- if (!is_protected_kvm_enabled()) -- return 0; -- -- /* -- * Exclude HYP BSS from kmemleak so that it doesn't get peeked -- * at, which would end badly once the section is inaccessible. -- * None of other sections should ever be introspected. -- */ -- kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); -+ int ret = 0; - - /* - * Flip the static key upfront as that may no longer be possible - * once the host stage 2 is installed. - */ - static_branch_enable(&kvm_protected_mode_initialized); -- on_each_cpu(_kvm_host_prot_finalize, NULL, 1); - -- return 0; -+ /* -+ * Fixup the boot mode so that we don't take spurious round -+ * trips via EL2 on cpu_resume. Flush to the PoC for a good -+ * measure, so that it can be observed by a CPU coming out of -+ * suspend with the MMU off. -+ */ -+ __boot_cpu_mode[0] = __boot_cpu_mode[1] = BOOT_CPU_MODE_EL1; -+ dcache_clean_poc((unsigned long)__boot_cpu_mode, -+ (unsigned long)(__boot_cpu_mode + 2)); -+ -+ on_each_cpu(_kvm_host_prot_finalize, &ret, 1); -+ return ret; -+} -+ -+static int finalize_hyp_mode(void) -+{ -+ if (!is_protected_kvm_enabled()) -+ return 0; -+ -+ /* -+ * Exclude HYP sections from kmemleak so that they don't get peeked -+ * at, which would end badly once inaccessible. -+ */ -+ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); -+ kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size); -+ return pkvm_drop_host_privileges(); - } - - struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) -diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c -index 5ce26bedf23c0..94108e2e09179 100644 ---- a/arch/arm64/kvm/guest.c -+++ b/arch/arm64/kvm/guest.c -@@ -242,7 +242,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) - u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; - switch (mode) { - case PSR_AA32_MODE_USR: -- if (!system_supports_32bit_el0()) -+ if (!kvm_supports_32bit_el0()) - return -EINVAL; - break; - case PSR_AA32_MODE_FIQ: -diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c -index 275a27368a04c..a5ab5215094ee 100644 ---- a/arch/arm64/kvm/handle_exit.c -+++ b/arch/arm64/kvm/handle_exit.c -@@ -226,6 +226,14 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index) - { - struct kvm_run *run = vcpu->run; - -+ if (ARM_SERROR_PENDING(exception_index)) { -+ /* -+ * The SError is handled by handle_exit_early(). If the guest -+ * survives it will re-execute the original instruction. -+ */ -+ return 1; -+ } -+ - exception_index = ARM_EXCEPTION_CODE(exception_index); - - switch (exception_index) { -diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c -index 0418399e0a201..aa06e28f2991f 100644 ---- a/arch/arm64/kvm/hyp/exception.c -+++ b/arch/arm64/kvm/hyp/exception.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - #if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__) - #error Hypervisor code only! -@@ -38,7 +39,10 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) - - static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val) - { -- write_sysreg_el1(val, SYS_SPSR); -+ if (has_vhe()) -+ write_sysreg_el1(val, SYS_SPSR); -+ else -+ __vcpu_sys_reg(vcpu, SPSR_EL1) = val; - } - - static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val) -@@ -112,7 +116,7 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode, - new |= (old & PSR_C_BIT); - new |= (old & PSR_V_BIT); - -- if (kvm_has_mte(vcpu->kvm)) -+ if (kvm_has_mte(kern_hyp_va(vcpu->kvm))) - new |= PSR_TCO_BIT; - - new |= (old & PSR_DIT_BIT); -diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S -index 9aa9b73475c95..7839d075729b1 100644 ---- a/arch/arm64/kvm/hyp/hyp-entry.S -+++ b/arch/arm64/kvm/hyp/hyp-entry.S -@@ -44,7 +44,7 @@ - el1_sync: // Guest trapped into EL2 - - mrs x0, esr_el2 -- lsr x0, x0, #ESR_ELx_EC_SHIFT -+ ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH - cmp x0, #ESR_ELx_EC_HVC64 - ccmp x0, #ESR_ELx_EC_HVC32, #4, ne - b.ne el1_trap -@@ -62,6 +62,10 @@ el1_sync: // Guest trapped into EL2 - /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ - eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ - ARM_SMCCC_ARCH_WORKAROUND_2) -+ cbz w1, wa_epilogue -+ -+ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \ -+ ARM_SMCCC_ARCH_WORKAROUND_3) - cbnz w1, el1_trap - - wa_epilogue: -@@ -192,7 +196,10 @@ SYM_CODE_END(__kvm_hyp_vector) - sub sp, sp, #(8 * 4) - stp x2, x3, [sp, #(8 * 0)] - stp x0, x1, [sp, #(8 * 2)] -+ alternative_cb spectre_bhb_patch_wa3 -+ /* Patched to mov WA3 when supported */ - mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 -+ alternative_cb_end - smc #0 - ldp x2, x3, [sp, #(8 * 0)] - add sp, sp, #(8 * 2) -@@ -205,6 +212,8 @@ SYM_CODE_END(__kvm_hyp_vector) - spectrev2_smccc_wa1_smc - .else - stp x0, x1, [sp, #-16]! -+ mitigate_spectre_bhb_loop x0 -+ mitigate_spectre_bhb_clear_insn - .endif - .if \indirect != 0 - alternative_cb kvm_patch_vector_branch -diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h -index a0e78a6027be0..ecd41844eda09 100644 ---- a/arch/arm64/kvm/hyp/include/hyp/switch.h -+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h -@@ -416,10 +416,17 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) - */ - static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) - { -+ /* -+ * Save PSTATE early so that we can evaluate the vcpu mode -+ * early on. -+ */ -+ vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR); -+ - if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) - vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); - -- if (ARM_SERROR_PENDING(*exit_code)) { -+ if (ARM_SERROR_PENDING(*exit_code) && -+ ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) { - u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); - - /* -diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h -index de7e14c862e6c..7ecca8b078519 100644 ---- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h -+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h -@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) - static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) - { - ctxt->regs.pc = read_sysreg_el2(SYS_ELR); -- ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); -+ /* -+ * Guest PSTATE gets saved at guest fixup time in all -+ * cases. We still need to handle the nVHE host side here. -+ */ -+ if (!has_vhe() && ctxt->__hyp_running_vcpu) -+ ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); - - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) - ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2); -diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile -index 8d741f71377f4..964c2134ea1e5 100644 ---- a/arch/arm64/kvm/hyp/nvhe/Makefile -+++ b/arch/arm64/kvm/hyp/nvhe/Makefile -@@ -83,6 +83,10 @@ quiet_cmd_hypcopy = HYPCOPY $@ - # Remove ftrace, Shadow Call Stack, and CFI CFLAGS. - # This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations. - KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) -+# Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile' -+# when profile optimization is applied. gen-hyprel does not support SHT_REL and -+# causes a build failure. Remove profile optimization flags. -+KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS)) - - # KVM nVHE code is run at a different exception code with a different map, so - # compiler instrumentation that inserts callbacks or checks into the code may -diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S -index 4b652ffb591d4..d310d2b2c8b40 100644 ---- a/arch/arm64/kvm/hyp/nvhe/host.S -+++ b/arch/arm64/kvm/hyp/nvhe/host.S -@@ -115,7 +115,7 @@ SYM_FUNC_END(__hyp_do_panic) - .L__vect_start\@: - stp x0, x1, [sp, #-16]! - mrs x0, esr_el2 -- lsr x0, x0, #ESR_ELx_EC_SHIFT -+ ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH - cmp x0, #ESR_ELx_EC_HVC64 - b.ne __host_exit - -diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c -index 2fabeceb889a9..5146fb1705054 100644 ---- a/arch/arm64/kvm/hyp/nvhe/mm.c -+++ b/arch/arm64/kvm/hyp/nvhe/mm.c -@@ -146,8 +146,10 @@ int hyp_map_vectors(void) - phys_addr_t phys; - void *bp_base; - -- if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) -+ if (!kvm_system_needs_idmapped_vectors()) { -+ __hyp_bp_vect_base = __bp_harden_hyp_vecs; - return 0; -+ } - - phys = __hyp_pa(__bp_harden_hyp_vecs); - bp_base = (void *)__pkvm_create_private_mapping(phys, -diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c -index 57c27846320f4..58ad9c5ba3112 100644 ---- a/arch/arm64/kvm/hyp/nvhe/setup.c -+++ b/arch/arm64/kvm/hyp/nvhe/setup.c -@@ -177,7 +177,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, - - phys = kvm_pte_to_phys(pte); - if (!addr_is_memory(phys)) -- return 0; -+ return -EINVAL; - - /* - * Adjust the host stage-2 mappings to match the ownership attributes -@@ -206,8 +206,18 @@ static int finalize_host_mappings(void) - .cb = finalize_host_mappings_walker, - .flags = KVM_PGTABLE_WALK_LEAF, - }; -+ int i, ret; -+ -+ for (i = 0; i < hyp_memblock_nr; i++) { -+ struct memblock_region *reg = &hyp_memory[i]; -+ u64 start = (u64)hyp_phys_to_virt(reg->base); -+ -+ ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker); -+ if (ret) -+ return ret; -+ } - -- return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits), &walker); -+ return 0; - } - - void __noreturn __pkvm_init_finalise(void) -diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c -index a34b01cc8ab9e..4db5409f40c4f 100644 ---- a/arch/arm64/kvm/hyp/nvhe/switch.c -+++ b/arch/arm64/kvm/hyp/nvhe/switch.c -@@ -279,5 +279,5 @@ void __noreturn hyp_panic(void) - - asmlinkage void kvm_unexpected_el2_exception(void) - { -- return __kvm_unexpected_el2_exception(); -+ __kvm_unexpected_el2_exception(); - } -diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c -index f8ceebe4982eb..4c77ff556f0ae 100644 ---- a/arch/arm64/kvm/hyp/pgtable.c -+++ b/arch/arm64/kvm/hyp/pgtable.c -@@ -921,13 +921,9 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - */ - stage2_put_pte(ptep, mmu, addr, level, mm_ops); - -- if (need_flush) { -- kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops); -- -- dcache_clean_inval_poc((unsigned long)pte_follow, -- (unsigned long)pte_follow + -- kvm_granule_size(level)); -- } -+ if (need_flush && mm_ops->dcache_clean_inval_poc) -+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops), -+ kvm_granule_size(level)); - - if (childp) - mm_ops->put_page(childp); -@@ -1089,15 +1085,13 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, - struct kvm_pgtable *pgt = arg; - struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; - kvm_pte_t pte = *ptep; -- kvm_pte_t *pte_follow; - - if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) - return 0; - -- pte_follow = kvm_pte_follow(pte, mm_ops); -- dcache_clean_inval_poc((unsigned long)pte_follow, -- (unsigned long)pte_follow + -- kvm_granule_size(level)); -+ if (mm_ops->dcache_clean_inval_poc) -+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops), -+ kvm_granule_size(level)); - return 0; - } - -diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c -index ded2c66675f06..813e6e2178c16 100644 ---- a/arch/arm64/kvm/hyp/vhe/switch.c -+++ b/arch/arm64/kvm/hyp/vhe/switch.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -25,6 +26,7 @@ - #include - #include - #include -+#include - - /* VHE specific context */ - DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); -@@ -68,7 +70,7 @@ NOKPROBE_SYMBOL(__activate_traps); - - static void __deactivate_traps(struct kvm_vcpu *vcpu) - { -- extern char vectors[]; /* kernel exception vectors */ -+ const char *host_vectors = vectors; - - ___deactivate_traps(vcpu); - -@@ -82,7 +84,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) - asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); - - write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); -- write_sysreg(vectors, vbar_el1); -+ -+ if (!arm64_kernel_unmapped_at_el0()) -+ host_vectors = __this_cpu_read(this_cpu_vector); -+ write_sysreg(host_vectors, vbar_el1); - } - NOKPROBE_SYMBOL(__deactivate_traps); - -@@ -215,5 +220,5 @@ void __noreturn hyp_panic(void) - - asmlinkage void kvm_unexpected_el2_exception(void) - { -- return __kvm_unexpected_el2_exception(); -+ __kvm_unexpected_el2_exception(); - } -diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c -index 30da78f72b3b3..202b8c455724b 100644 ---- a/arch/arm64/kvm/hypercalls.c -+++ b/arch/arm64/kvm/hypercalls.c -@@ -107,6 +107,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) - break; - } - break; -+ case ARM_SMCCC_ARCH_WORKAROUND_3: -+ switch (arm64_get_spectre_bhb_state()) { -+ case SPECTRE_VULNERABLE: -+ break; -+ case SPECTRE_MITIGATED: -+ val[0] = SMCCC_RET_SUCCESS; -+ break; -+ case SPECTRE_UNAFFECTED: -+ val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; -+ break; -+ } -+ break; - case ARM_SMCCC_HV_PV_TIME_FEATURES: - val[0] = SMCCC_RET_SUCCESS; - break; -diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c -index 69bd1732a299f..38a8095744a07 100644 ---- a/arch/arm64/kvm/mmu.c -+++ b/arch/arm64/kvm/mmu.c -@@ -468,14 +468,33 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr) - CONFIG_PGTABLE_LEVELS), - .mm_ops = &kvm_user_mm_ops, - }; -+ unsigned long flags; - kvm_pte_t pte = 0; /* Keep GCC quiet... */ - u32 level = ~0; - int ret; - -+ /* -+ * Disable IRQs so that we hazard against a concurrent -+ * teardown of the userspace page tables (which relies on -+ * IPI-ing threads). -+ */ -+ local_irq_save(flags); - ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); -- VM_BUG_ON(ret); -- VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS); -- VM_BUG_ON(!(pte & PTE_VALID)); -+ local_irq_restore(flags); -+ -+ if (ret) -+ return ret; -+ -+ /* -+ * Not seeing an error, but not updating level? Something went -+ * deeply wrong... -+ */ -+ if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS)) -+ return -EFAULT; -+ -+ /* Oops, the userspace PTs are gone... Replay the fault */ -+ if (!kvm_pte_valid(pte)) -+ return -EAGAIN; - - return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); - } -@@ -826,7 +845,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, - * - * Returns the size of the mapping. - */ --static unsigned long -+static long - transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long hva, kvm_pfn_t *pfnp, - phys_addr_t *ipap) -@@ -838,8 +857,15 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, - * sure that the HVA and IPA are sufficiently aligned and that the - * block map is contained within the memslot. - */ -- if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && -- get_user_mapping_size(kvm, hva) >= PMD_SIZE) { -+ if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { -+ int sz = get_user_mapping_size(kvm, hva); -+ -+ if (sz < 0) -+ return sz; -+ -+ if (sz < PMD_SIZE) -+ return PAGE_SIZE; -+ - /* - * The address we faulted on is backed by a transparent huge - * page. However, because we map the compound huge page and -@@ -957,7 +983,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - kvm_pfn_t pfn; - bool logging_active = memslot_is_logging(memslot); - unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); -- unsigned long vma_pagesize, fault_granule; -+ long vma_pagesize, fault_granule; - enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; - struct kvm_pgtable *pgt; - -@@ -971,6 +997,20 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - return -EFAULT; - } - -+ /* -+ * Permission faults just need to update the existing leaf entry, -+ * and so normally don't require allocations from the memcache. The -+ * only exception to this is when dirty logging is enabled at runtime -+ * and a write fault needs to collapse a block entry into a table. -+ */ -+ if (fault_status != FSC_PERM || -+ (logging_active && write_fault)) { -+ ret = kvm_mmu_topup_memory_cache(memcache, -+ kvm_mmu_cache_min_pages(kvm)); -+ if (ret) -+ return ret; -+ } -+ - /* - * Let's check if we will get back a huge page backed by hugetlbfs, or - * get block mapping for device MMIO region. -@@ -1025,36 +1065,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - fault_ipa &= ~(vma_pagesize - 1); - - gfn = fault_ipa >> PAGE_SHIFT; -- mmap_read_unlock(current->mm); -- -- /* -- * Permission faults just need to update the existing leaf entry, -- * and so normally don't require allocations from the memcache. The -- * only exception to this is when dirty logging is enabled at runtime -- * and a write fault needs to collapse a block entry into a table. -- */ -- if (fault_status != FSC_PERM || (logging_active && write_fault)) { -- ret = kvm_mmu_topup_memory_cache(memcache, -- kvm_mmu_cache_min_pages(kvm)); -- if (ret) -- return ret; -- } - -- mmu_seq = vcpu->kvm->mmu_notifier_seq; - /* -- * Ensure the read of mmu_notifier_seq happens before we call -- * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk -- * the page we just got a reference to gets unmapped before we have a -- * chance to grab the mmu_lock, which ensure that if the page gets -- * unmapped afterwards, the call to kvm_unmap_gfn will take it away -- * from us again properly. This smp_rmb() interacts with the smp_wmb() -- * in kvm_mmu_notifier_invalidate_. -+ * Read mmu_notifier_seq so that KVM can detect if the results of -+ * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to -+ * acquiring kvm->mmu_lock. - * -- * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is -- * used to avoid unnecessary overhead introduced to locate the memory -- * slot because it's always fixed even @gfn is adjusted for huge pages. -+ * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs -+ * with the smp_wmb() in kvm_dec_notifier_count(). - */ -- smp_rmb(); -+ mmu_seq = vcpu->kvm->mmu_notifier_seq; -+ mmap_read_unlock(current->mm); - - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, - write_fault, &writable, NULL); -@@ -1104,6 +1125,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - vma_pagesize = transparent_hugepage_adjust(kvm, memslot, - hva, &pfn, - &fault_ipa); -+ -+ if (vma_pagesize < 0) { -+ ret = vma_pagesize; -+ goto out_unlock; -+ } - } - - if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) { -diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c -index 2af3c37445e00..886048c083638 100644 ---- a/arch/arm64/kvm/pmu-emul.c -+++ b/arch/arm64/kvm/pmu-emul.c -@@ -554,6 +554,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); - } - } -+ kvm_vcpu_pmu_restore_guest(vcpu); - } - - /** -diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c -index 74c47d4202534..be03ea3e775a8 100644 ---- a/arch/arm64/kvm/psci.c -+++ b/arch/arm64/kvm/psci.c -@@ -406,7 +406,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) - - int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) - { -- return 3; /* PSCI version and two workaround registers */ -+ return 4; /* PSCI version and three workaround registers */ - } - - int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -@@ -420,6 +420,9 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) - if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++)) - return -EFAULT; - -+ if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++)) -+ return -EFAULT; -+ - return 0; - } - -@@ -459,6 +462,17 @@ static int get_kernel_wa_level(u64 regid) - case SPECTRE_VULNERABLE: - return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; - } -+ break; -+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: -+ switch (arm64_get_spectre_bhb_state()) { -+ case SPECTRE_VULNERABLE: -+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL; -+ case SPECTRE_MITIGATED: -+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL; -+ case SPECTRE_UNAFFECTED: -+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED; -+ } -+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL; - } - - return -EINVAL; -@@ -475,6 +489,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) - break; - case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: - case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: -+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: - val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; - break; - default: -@@ -493,6 +508,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) - u64 val; - int wa_level; - -+ if (KVM_REG_SIZE(reg->id) != sizeof(val)) -+ return -ENOENT; - if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id))) - return -EFAULT; - -@@ -520,6 +537,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) - } - - case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: -+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: - if (val & ~KVM_REG_FEATURE_LEVEL_MASK) - return -EINVAL; - -diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c -index 1d46e185f31e1..d00170d7ddf5e 100644 ---- a/arch/arm64/kvm/sys_regs.c -+++ b/arch/arm64/kvm/sys_regs.c -@@ -649,7 +649,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) - */ - val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) - | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); -- if (!system_supports_32bit_el0()) -+ if (!kvm_supports_32bit_el0()) - val |= ARMV8_PMU_PMCR_LC; - __vcpu_sys_reg(vcpu, r->reg) = val; - } -@@ -698,11 +698,10 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, - val = __vcpu_sys_reg(vcpu, PMCR_EL0); - val &= ~ARMV8_PMU_PMCR_MASK; - val |= p->regval & ARMV8_PMU_PMCR_MASK; -- if (!system_supports_32bit_el0()) -+ if (!kvm_supports_32bit_el0()) - val |= ARMV8_PMU_PMCR_LC; - __vcpu_sys_reg(vcpu, PMCR_EL0) = val; - kvm_pmu_handle_pmcr(vcpu, val); -- kvm_vcpu_pmu_restore_guest(vcpu); - } else { - /* PMCR.P & PMCR.C are RAZ */ - val = __vcpu_sys_reg(vcpu, PMCR_EL0) -@@ -1518,7 +1517,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { - /* CRm=6 */ - ID_SANITISED(ID_AA64ISAR0_EL1), - ID_SANITISED(ID_AA64ISAR1_EL1), -- ID_UNALLOCATED(6,2), -+ ID_SANITISED(ID_AA64ISAR2_EL1), - ID_UNALLOCATED(6,3), - ID_UNALLOCATED(6,4), - ID_UNALLOCATED(6,5), -diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c -index 61728c543eb9c..1d534283378a3 100644 ---- a/arch/arm64/kvm/vgic/vgic-its.c -+++ b/arch/arm64/kvm/vgic/vgic-its.c -@@ -2096,7 +2096,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, - - memset(entry, 0, esz); - -- while (len > 0) { -+ while (true) { - int next_offset; - size_t byte_offset; - -@@ -2109,6 +2109,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, - return next_offset; - - byte_offset = next_offset * esz; -+ if (byte_offset >= len) -+ break; -+ - id += next_offset; - gpa += byte_offset; - len -= byte_offset; -diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c -index 5f9014ae595b7..508aee9f88535 100644 ---- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c -+++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c -@@ -418,11 +418,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, - vgic_mmio_read_pending, vgic_mmio_write_spending, -- NULL, vgic_uaccess_write_spending, 1, -+ vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, - vgic_mmio_read_pending, vgic_mmio_write_cpending, -- NULL, vgic_uaccess_write_cpending, 1, -+ vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1, - VGIC_ACCESS_32bit), - REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, - vgic_mmio_read_active, vgic_mmio_write_sactive, -diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c -index 48c6067fc5ecb..55630ca2c325b 100644 ---- a/arch/arm64/kvm/vgic/vgic-mmio.c -+++ b/arch/arm64/kvm/vgic/vgic-mmio.c -@@ -226,8 +226,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, - return 0; - } - --unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, -- gpa_t addr, unsigned int len) -+static unsigned long __read_pending(struct kvm_vcpu *vcpu, -+ gpa_t addr, unsigned int len, -+ bool is_user) - { - u32 intid = VGIC_ADDR_TO_INTID(addr, 1); - u32 value = 0; -@@ -248,6 +249,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, - IRQCHIP_STATE_PENDING, - &val); - WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); -+ } else if (!is_user && vgic_irq_is_mapped_level(irq)) { -+ val = vgic_get_phys_line_level(irq); - } else { - val = irq_is_pending(irq); - } -@@ -261,6 +264,18 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, - return value; - } - -+unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, -+ gpa_t addr, unsigned int len) -+{ -+ return __read_pending(vcpu, addr, len, false); -+} -+ -+unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu, -+ gpa_t addr, unsigned int len) -+{ -+ return __read_pending(vcpu, addr, len, true); -+} -+ - static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq) - { - return (vgic_irq_is_sgi(irq->intid) && -diff --git a/arch/arm64/kvm/vgic/vgic-mmio.h b/arch/arm64/kvm/vgic/vgic-mmio.h -index fefcca2b14dc7..dcea440159855 100644 ---- a/arch/arm64/kvm/vgic/vgic-mmio.h -+++ b/arch/arm64/kvm/vgic/vgic-mmio.h -@@ -149,6 +149,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, - unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len); - -+unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu, -+ gpa_t addr, unsigned int len); -+ - void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len, - unsigned long val); -diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c -index 21a6207fb2eed..8eb70451323b6 100644 ---- a/arch/arm64/kvm/vgic/vgic-v3.c -+++ b/arch/arm64/kvm/vgic/vgic-v3.c -@@ -347,26 +347,23 @@ retry: - * The deactivation of the doorbell interrupt will trigger the - * unmapping of the associated vPE. - */ --static void unmap_all_vpes(struct vgic_dist *dist) -+static void unmap_all_vpes(struct kvm *kvm) - { -- struct irq_desc *desc; -+ struct vgic_dist *dist = &kvm->arch.vgic; - int i; - -- for (i = 0; i < dist->its_vm.nr_vpes; i++) { -- desc = irq_to_desc(dist->its_vm.vpes[i]->irq); -- irq_domain_deactivate_irq(irq_desc_get_irq_data(desc)); -- } -+ for (i = 0; i < dist->its_vm.nr_vpes; i++) -+ free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i)); - } - --static void map_all_vpes(struct vgic_dist *dist) -+static void map_all_vpes(struct kvm *kvm) - { -- struct irq_desc *desc; -+ struct vgic_dist *dist = &kvm->arch.vgic; - int i; - -- for (i = 0; i < dist->its_vm.nr_vpes; i++) { -- desc = irq_to_desc(dist->its_vm.vpes[i]->irq); -- irq_domain_activate_irq(irq_desc_get_irq_data(desc), false); -- } -+ for (i = 0; i < dist->its_vm.nr_vpes; i++) -+ WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i), -+ dist->its_vm.vpes[i]->irq)); - } - - /** -@@ -391,7 +388,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) - * and enabling of the doorbells have already been done. - */ - if (kvm_vgic_global_state.has_gicv4_1) { -- unmap_all_vpes(dist); -+ unmap_all_vpes(kvm); - vlpi_avail = true; - } - -@@ -441,7 +438,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) - - out: - if (vlpi_avail) -- map_all_vpes(dist); -+ map_all_vpes(kvm); - - return ret; - } -diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c -index c1845d8f5f7e7..f507e3fcffce3 100644 ---- a/arch/arm64/kvm/vgic/vgic-v4.c -+++ b/arch/arm64/kvm/vgic/vgic-v4.c -@@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val) - *val = !!(*ptr & mask); - } - -+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq) -+{ -+ return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu); -+} -+ - /** - * vgic_v4_init - Initialize the GICv4 data structures - * @kvm: Pointer to the VM being initialized -@@ -282,8 +287,7 @@ int vgic_v4_init(struct kvm *kvm) - irq_flags &= ~IRQ_NOAUTOEN; - irq_set_status_flags(irq, irq_flags); - -- ret = request_irq(irq, vgic_v4_doorbell_handler, -- 0, "vcpu", vcpu); -+ ret = vgic_v4_request_vpe_irq(vcpu, irq); - if (ret) { - kvm_err("failed to allocate vcpu IRQ%d\n", irq); - /* -diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h -index 14a9218641f57..36021c31a706a 100644 ---- a/arch/arm64/kvm/vgic/vgic.h -+++ b/arch/arm64/kvm/vgic/vgic.h -@@ -321,5 +321,6 @@ int vgic_v4_init(struct kvm *kvm); - void vgic_v4_teardown(struct kvm *kvm); - void vgic_v4_configure_vsgis(struct kvm *kvm); - void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); -+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); - - #endif -diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S -index b84b179edba3a..1fd5d790ab800 100644 ---- a/arch/arm64/lib/clear_page.S -+++ b/arch/arm64/lib/clear_page.S -@@ -16,6 +16,7 @@ - */ - SYM_FUNC_START_PI(clear_page) - mrs x1, dczid_el0 -+ tbnz x1, #4, 2f /* Branch if DC ZVA is prohibited */ - and w1, w1, #0xf - mov x2, #4 - lsl x1, x2, x1 -@@ -25,5 +26,14 @@ SYM_FUNC_START_PI(clear_page) - tst x0, #(PAGE_SIZE - 1) - b.ne 1b - ret -+ -+2: stnp xzr, xzr, [x0] -+ stnp xzr, xzr, [x0, #16] -+ stnp xzr, xzr, [x0, #32] -+ stnp xzr, xzr, [x0, #48] -+ add x0, x0, #64 -+ tst x0, #(PAGE_SIZE - 1) -+ b.ne 2b -+ ret - SYM_FUNC_END_PI(clear_page) - EXPORT_SYMBOL(clear_page) -diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c -index 78b87a64ca0a3..2432683e48a61 100644 ---- a/arch/arm64/lib/csum.c -+++ b/arch/arm64/lib/csum.c -@@ -24,7 +24,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len) - const u64 *ptr; - u64 data, sum64 = 0; - -- if (unlikely(len == 0)) -+ if (unlikely(len <= 0)) - return 0; - - offset = (unsigned long)buff & 7; -diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S -index e83643b3995f4..f531dcb95174a 100644 ---- a/arch/arm64/lib/mte.S -+++ b/arch/arm64/lib/mte.S -@@ -43,17 +43,23 @@ SYM_FUNC_END(mte_clear_page_tags) - * x0 - address to the beginning of the page - */ - SYM_FUNC_START(mte_zero_clear_page_tags) -+ and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag - mrs x1, dczid_el0 -+ tbnz x1, #4, 2f // Branch if DC GZVA is prohibited - and w1, w1, #0xf - mov x2, #4 - lsl x1, x2, x1 -- and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag - - 1: dc gzva, x0 - add x0, x0, x1 - tst x0, #(PAGE_SIZE - 1) - b.ne 1b - ret -+ -+2: stz2g x0, [x0], #(MTE_GRANULE_SIZE * 2) -+ tst x0, #(PAGE_SIZE - 1) -+ b.ne 2b -+ ret - SYM_FUNC_END(mte_zero_clear_page_tags) - - /* -diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S -index e42bcfcd37e6f..a4884b97e9a81 100644 ---- a/arch/arm64/lib/strncmp.S -+++ b/arch/arm64/lib/strncmp.S -@@ -1,9 +1,9 @@ - /* SPDX-License-Identifier: GPL-2.0-only */ - /* -- * Copyright (c) 2013-2021, Arm Limited. -+ * Copyright (c) 2013-2022, Arm Limited. - * - * Adapted from the original at: -- * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/strncmp.S -+ * https://github.com/ARM-software/optimized-routines/blob/189dfefe37d54c5b/string/aarch64/strncmp.S - */ - - #include -@@ -11,14 +11,14 @@ - - /* Assumptions: - * -- * ARMv8-a, AArch64 -+ * ARMv8-a, AArch64. -+ * MTE compatible. - */ - - #define L(label) .L ## label - - #define REP8_01 0x0101010101010101 - #define REP8_7f 0x7f7f7f7f7f7f7f7f --#define REP8_80 0x8080808080808080 - - /* Parameters and result. */ - #define src1 x0 -@@ -39,10 +39,24 @@ - #define tmp3 x10 - #define zeroones x11 - #define pos x12 --#define limit_wd x13 --#define mask x14 --#define endloop x15 -+#define mask x13 -+#define endloop x14 - #define count mask -+#define offset pos -+#define neg_offset x15 -+ -+/* Define endian dependent shift operations. -+ On big-endian early bytes are at MSB and on little-endian LSB. -+ LS_FW means shifting towards early bytes. -+ LS_BK means shifting towards later bytes. -+ */ -+#ifdef __AARCH64EB__ -+#define LS_FW lsl -+#define LS_BK lsr -+#else -+#define LS_FW lsr -+#define LS_BK lsl -+#endif - - SYM_FUNC_START_WEAK_PI(strncmp) - cbz limit, L(ret0) -@@ -52,9 +66,6 @@ SYM_FUNC_START_WEAK_PI(strncmp) - and count, src1, #7 - b.ne L(misaligned8) - cbnz count, L(mutual_align) -- /* Calculate the number of full and partial words -1. */ -- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */ -- lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */ - - /* NUL detection works on the principle that (X - 1) & (~X) & 0x80 - (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and -@@ -64,56 +75,52 @@ L(loop_aligned): - ldr data1, [src1], #8 - ldr data2, [src2], #8 - L(start_realigned): -- subs limit_wd, limit_wd, #1 -+ subs limit, limit, #8 - sub tmp1, data1, zeroones - orr tmp2, data1, #REP8_7f - eor diff, data1, data2 /* Non-zero if differences found. */ -- csinv endloop, diff, xzr, pl /* Last Dword or differences. */ -+ csinv endloop, diff, xzr, hi /* Last Dword or differences. */ - bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ - ccmp endloop, #0, #0, eq - b.eq L(loop_aligned) - /* End of main loop */ - -- /* Not reached the limit, must have found the end or a diff. */ -- tbz limit_wd, #63, L(not_limit) -- -- /* Limit % 8 == 0 => all bytes significant. */ -- ands limit, limit, #7 -- b.eq L(not_limit) -- -- lsl limit, limit, #3 /* Bits -> bytes. */ -- mov mask, #~0 --#ifdef __AARCH64EB__ -- lsr mask, mask, limit --#else -- lsl mask, mask, limit --#endif -- bic data1, data1, mask -- bic data2, data2, mask -- -- /* Make sure that the NUL byte is marked in the syndrome. */ -- orr has_nul, has_nul, mask -- --L(not_limit): -+L(full_check): -+#ifndef __AARCH64EB__ - orr syndrome, diff, has_nul -- --#ifndef __AARCH64EB__ -+ add limit, limit, 8 /* Rewind limit to before last subs. */ -+L(syndrome_check): -+ /* Limit was reached. Check if the NUL byte or the difference -+ is before the limit. */ - rev syndrome, syndrome - rev data1, data1 -- /* The MS-non-zero bit of the syndrome marks either the first bit -- that is different, or the top bit of the first zero byte. -- Shifting left now will bring the critical information into the -- top bits. */ - clz pos, syndrome - rev data2, data2 - lsl data1, data1, pos -+ cmp limit, pos, lsr #3 - lsl data2, data2, pos - /* But we need to zero-extend (char is unsigned) the value and then - perform a signed 32-bit subtraction. */ - lsr data1, data1, #56 - sub result, data1, data2, lsr #56 -+ csel result, result, xzr, hi - ret - #else -+ /* Not reached the limit, must have found the end or a diff. */ -+ tbz limit, #63, L(not_limit) -+ add tmp1, limit, 8 -+ cbz limit, L(not_limit) -+ -+ lsl limit, tmp1, #3 /* Bits -> bytes. */ -+ mov mask, #~0 -+ lsr mask, mask, limit -+ bic data1, data1, mask -+ bic data2, data2, mask -+ -+ /* Make sure that the NUL byte is marked in the syndrome. */ -+ orr has_nul, has_nul, mask -+ -+L(not_limit): - /* For big-endian we cannot use the trick with the syndrome value - as carry-propagation can corrupt the upper bits if the trailing - bytes in the string contain 0x01. */ -@@ -134,10 +141,11 @@ L(not_limit): - rev has_nul, has_nul - orr syndrome, diff, has_nul - clz pos, syndrome -- /* The MS-non-zero bit of the syndrome marks either the first bit -- that is different, or the top bit of the first zero byte. -+ /* The most-significant-non-zero bit of the syndrome marks either the -+ first bit that is different, or the top bit of the first zero byte. - Shifting left now will bring the critical information into the - top bits. */ -+L(end_quick): - lsl data1, data1, pos - lsl data2, data2, pos - /* But we need to zero-extend (char is unsigned) the value and then -@@ -159,22 +167,12 @@ L(mutual_align): - neg tmp3, count, lsl #3 /* 64 - bits(bytes beyond align). */ - ldr data2, [src2], #8 - mov tmp2, #~0 -- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */ --#ifdef __AARCH64EB__ -- /* Big-endian. Early bytes are at MSB. */ -- lsl tmp2, tmp2, tmp3 /* Shift (count & 63). */ --#else -- /* Little-endian. Early bytes are at LSB. */ -- lsr tmp2, tmp2, tmp3 /* Shift (count & 63). */ --#endif -- and tmp3, limit_wd, #7 -- lsr limit_wd, limit_wd, #3 -- /* Adjust the limit. Only low 3 bits used, so overflow irrelevant. */ -- add limit, limit, count -- add tmp3, tmp3, count -+ LS_FW tmp2, tmp2, tmp3 /* Shift (count & 63). */ -+ /* Adjust the limit and ensure it doesn't overflow. */ -+ adds limit, limit, count -+ csinv limit, limit, xzr, lo - orr data1, data1, tmp2 - orr data2, data2, tmp2 -- add limit_wd, limit_wd, tmp3, lsr #3 - b L(start_realigned) - - .p2align 4 -@@ -197,13 +195,11 @@ L(done): - /* Align the SRC1 to a dword by doing a bytewise compare and then do - the dword loop. */ - L(try_misaligned_words): -- lsr limit_wd, limit, #3 -- cbz count, L(do_misaligned) -+ cbz count, L(src1_aligned) - - neg count, count - and count, count, #7 - sub limit, limit, count -- lsr limit_wd, limit, #3 - - L(page_end_loop): - ldrb data1w, [src1], #1 -@@ -214,48 +210,100 @@ L(page_end_loop): - subs count, count, #1 - b.hi L(page_end_loop) - --L(do_misaligned): -- /* Prepare ourselves for the next page crossing. Unlike the aligned -- loop, we fetch 1 less dword because we risk crossing bounds on -- SRC2. */ -- mov count, #8 -- subs limit_wd, limit_wd, #1 -- b.lo L(done_loop) --L(loop_misaligned): -- and tmp2, src2, #0xff8 -- eor tmp2, tmp2, #0xff8 -- cbz tmp2, L(page_end_loop) -+ /* The following diagram explains the comparison of misaligned strings. -+ The bytes are shown in natural order. For little-endian, it is -+ reversed in the registers. The "x" bytes are before the string. -+ The "|" separates data that is loaded at one time. -+ src1 | a a a a a a a a | b b b c c c c c | . . . -+ src2 | x x x x x a a a a a a a a b b b | c c c c c . . . -+ -+ After shifting in each step, the data looks like this: -+ STEP_A STEP_B STEP_C -+ data1 a a a a a a a a b b b c c c c c b b b c c c c c -+ data2 a a a a a a a a b b b 0 0 0 0 0 0 0 0 c c c c c - -+ The bytes with "0" are eliminated from the syndrome via mask. -+ -+ Align SRC2 down to 16 bytes. This way we can read 16 bytes at a -+ time from SRC2. The comparison happens in 3 steps. After each step -+ the loop can exit, or read from SRC1 or SRC2. */ -+L(src1_aligned): -+ /* Calculate offset from 8 byte alignment to string start in bits. No -+ need to mask offset since shifts are ignoring upper bits. */ -+ lsl offset, src2, #3 -+ bic src2, src2, #0xf -+ mov mask, -1 -+ neg neg_offset, offset - ldr data1, [src1], #8 -- ldr data2, [src2], #8 -- sub tmp1, data1, zeroones -- orr tmp2, data1, #REP8_7f -- eor diff, data1, data2 /* Non-zero if differences found. */ -- bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ -- ccmp diff, #0, #0, eq -- b.ne L(not_limit) -- subs limit_wd, limit_wd, #1 -- b.pl L(loop_misaligned) -+ ldp tmp1, tmp2, [src2], #16 -+ LS_BK mask, mask, neg_offset -+ and neg_offset, neg_offset, #63 /* Need actual value for cmp later. */ -+ /* Skip the first compare if data in tmp1 is irrelevant. */ -+ tbnz offset, 6, L(misaligned_mid_loop) - --L(done_loop): -- /* We found a difference or a NULL before the limit was reached. */ -- and limit, limit, #7 -- cbz limit, L(not_limit) -- /* Read the last word. */ -- sub src1, src1, 8 -- sub src2, src2, 8 -- ldr data1, [src1, limit] -- ldr data2, [src2, limit] -- sub tmp1, data1, zeroones -- orr tmp2, data1, #REP8_7f -+L(loop_misaligned): -+ /* STEP_A: Compare full 8 bytes when there is enough data from SRC2.*/ -+ LS_FW data2, tmp1, offset -+ LS_BK tmp1, tmp2, neg_offset -+ subs limit, limit, #8 -+ orr data2, data2, tmp1 /* 8 bytes from SRC2 combined from two regs.*/ -+ sub has_nul, data1, zeroones - eor diff, data1, data2 /* Non-zero if differences found. */ -- bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ -- ccmp diff, #0, #0, eq -- b.ne L(not_limit) -+ orr tmp3, data1, #REP8_7f -+ csinv endloop, diff, xzr, hi /* If limit, set to all ones. */ -+ bic has_nul, has_nul, tmp3 /* Non-zero if NUL byte found in SRC1. */ -+ orr tmp3, endloop, has_nul -+ cbnz tmp3, L(full_check) -+ -+ ldr data1, [src1], #8 -+L(misaligned_mid_loop): -+ /* STEP_B: Compare first part of data1 to second part of tmp2. */ -+ LS_FW data2, tmp2, offset -+#ifdef __AARCH64EB__ -+ /* For big-endian we do a byte reverse to avoid carry-propagation -+ problem described above. This way we can reuse the has_nul in the -+ next step and also use syndrome value trick at the end. */ -+ rev tmp3, data1 -+ #define data1_fixed tmp3 -+#else -+ #define data1_fixed data1 -+#endif -+ sub has_nul, data1_fixed, zeroones -+ orr tmp3, data1_fixed, #REP8_7f -+ eor diff, data2, data1 /* Non-zero if differences found. */ -+ bic has_nul, has_nul, tmp3 /* Non-zero if NUL terminator. */ -+#ifdef __AARCH64EB__ -+ rev has_nul, has_nul -+#endif -+ cmp limit, neg_offset, lsr #3 -+ orr syndrome, diff, has_nul -+ bic syndrome, syndrome, mask /* Ignore later bytes. */ -+ csinv tmp3, syndrome, xzr, hi /* If limit, set to all ones. */ -+ cbnz tmp3, L(syndrome_check) -+ -+ /* STEP_C: Compare second part of data1 to first part of tmp1. */ -+ ldp tmp1, tmp2, [src2], #16 -+ cmp limit, #8 -+ LS_BK data2, tmp1, neg_offset -+ eor diff, data2, data1 /* Non-zero if differences found. */ -+ orr syndrome, diff, has_nul -+ and syndrome, syndrome, mask /* Ignore earlier bytes. */ -+ csinv tmp3, syndrome, xzr, hi /* If limit, set to all ones. */ -+ cbnz tmp3, L(syndrome_check) -+ -+ ldr data1, [src1], #8 -+ sub limit, limit, #8 -+ b L(loop_misaligned) -+ -+#ifdef __AARCH64EB__ -+L(syndrome_check): -+ clz pos, syndrome -+ cmp pos, limit, lsl #3 -+ b.lo L(end_quick) -+#endif - - L(ret0): - mov result, #0 - ret -- - SYM_FUNC_END_PI(strncmp) - EXPORT_SYMBOL_NOHWKASAN(strncmp) -diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S -index 5051b3c1a4f12..79164e4390369 100644 ---- a/arch/arm64/mm/cache.S -+++ b/arch/arm64/mm/cache.S -@@ -231,8 +231,6 @@ SYM_FUNC_END_PI(__dma_flush_area) - */ - SYM_FUNC_START_PI(__dma_map_area) - add x1, x0, x1 -- cmp w2, #DMA_FROM_DEVICE -- b.eq __dma_inv_area - b __dma_clean_area - SYM_FUNC_END_PI(__dma_map_area) - -diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c -index b5447e53cd73e..b44931deb227b 100644 ---- a/arch/arm64/mm/copypage.c -+++ b/arch/arm64/mm/copypage.c -@@ -16,14 +16,15 @@ - - void copy_highpage(struct page *to, struct page *from) - { -- struct page *kto = page_address(to); -- struct page *kfrom = page_address(from); -+ void *kto = page_address(to); -+ void *kfrom = page_address(from); - - copy_page(kto, kfrom); - -+ page_kasan_tag_reset(to); -+ - if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { - set_bit(PG_mte_tagged, &to->flags); -- page_kasan_tag_reset(to); - /* - * We need smp_wmb() in between setting the flags and clearing the - * tags because if another thread reads page->flags and builds a -diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c -index aa0060178343a..60a8b6a8a42b5 100644 ---- a/arch/arm64/mm/extable.c -+++ b/arch/arm64/mm/extable.c -@@ -9,14 +9,19 @@ - int fixup_exception(struct pt_regs *regs) - { - const struct exception_table_entry *fixup; -+ unsigned long addr; - -- fixup = search_exception_tables(instruction_pointer(regs)); -- if (!fixup) -- return 0; -+ addr = instruction_pointer(regs); - -- if (in_bpf_jit(regs)) -+ /* Search the BPF tables first, these are formatted differently */ -+ fixup = search_bpf_extables(addr); -+ if (fixup) - return arm64_bpf_fixup_exception(fixup, regs); - -+ fixup = search_exception_tables(addr); -+ if (!fixup) -+ return 0; -+ - regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; - return 1; - } -diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c -index 9ae24e3b72be1..6327620397142 100644 ---- a/arch/arm64/mm/fault.c -+++ b/arch/arm64/mm/fault.c -@@ -43,7 +43,7 @@ - #include - - struct fault_info { -- int (*fn)(unsigned long far, unsigned int esr, -+ int (*fn)(unsigned long far, unsigned long esr, - struct pt_regs *regs); - int sig; - int code; -@@ -53,17 +53,17 @@ struct fault_info { - static const struct fault_info fault_info[]; - static struct fault_info debug_fault_info[]; - --static inline const struct fault_info *esr_to_fault_info(unsigned int esr) -+static inline const struct fault_info *esr_to_fault_info(unsigned long esr) - { - return fault_info + (esr & ESR_ELx_FSC); - } - --static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) -+static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr) - { - return debug_fault_info + DBG_ESR_EVT(esr); - } - --static void data_abort_decode(unsigned int esr) -+static void data_abort_decode(unsigned long esr) - { - pr_alert("Data abort info:\n"); - -@@ -85,11 +85,11 @@ static void data_abort_decode(unsigned int esr) - (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); - } - --static void mem_abort_decode(unsigned int esr) -+static void mem_abort_decode(unsigned long esr) - { - pr_alert("Mem abort info:\n"); - -- pr_alert(" ESR = 0x%08x\n", esr); -+ pr_alert(" ESR = 0x%016lx\n", esr); - pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", - ESR_ELx_EC(esr), esr_get_class_string(esr), - (esr & ESR_ELx_IL) ? 32 : 16); -@@ -99,7 +99,7 @@ static void mem_abort_decode(unsigned int esr) - pr_alert(" EA = %lu, S1PTW = %lu\n", - (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, - (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); -- pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC), -+ pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC), - esr_to_fault_info(esr)->name); - - if (esr_is_data_abort(esr)) -@@ -229,20 +229,20 @@ int ptep_set_access_flags(struct vm_area_struct *vma, - return 1; - } - --static bool is_el1_instruction_abort(unsigned int esr) -+static bool is_el1_instruction_abort(unsigned long esr) - { - return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; - } - --static bool is_el1_data_abort(unsigned int esr) -+static bool is_el1_data_abort(unsigned long esr) - { - return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; - } - --static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, -+static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr, - struct pt_regs *regs) - { -- unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; -+ unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE; - - if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) - return false; -@@ -258,7 +258,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, - } - - static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, -- unsigned int esr, -+ unsigned long esr, - struct pt_regs *regs) - { - unsigned long flags; -@@ -290,7 +290,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, - } - - static void die_kernel_fault(const char *msg, unsigned long addr, -- unsigned int esr, struct pt_regs *regs) -+ unsigned long esr, struct pt_regs *regs) - { - bust_spinlocks(1); - -@@ -302,11 +302,11 @@ static void die_kernel_fault(const char *msg, unsigned long addr, - show_pte(addr); - die("Oops", regs, esr); - bust_spinlocks(0); -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - } - - #ifdef CONFIG_KASAN_HW_TAGS --static void report_tag_fault(unsigned long addr, unsigned int esr, -+static void report_tag_fault(unsigned long addr, unsigned long esr, - struct pt_regs *regs) - { - /* -@@ -318,11 +318,11 @@ static void report_tag_fault(unsigned long addr, unsigned int esr, - } - #else - /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ --static inline void report_tag_fault(unsigned long addr, unsigned int esr, -+static inline void report_tag_fault(unsigned long addr, unsigned long esr, - struct pt_regs *regs) { } - #endif - --static void do_tag_recovery(unsigned long addr, unsigned int esr, -+static void do_tag_recovery(unsigned long addr, unsigned long esr, - struct pt_regs *regs) - { - -@@ -337,9 +337,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr, - isb(); - } - --static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) -+static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) - { -- unsigned int fsc = esr & ESR_ELx_FSC; -+ unsigned long fsc = esr & ESR_ELx_FSC; - - if (!is_el1_data_abort(esr)) - return false; -@@ -350,7 +350,12 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) - return false; - } - --static void __do_kernel_fault(unsigned long addr, unsigned int esr, -+static bool is_translation_fault(unsigned long esr) -+{ -+ return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT; -+} -+ -+static void __do_kernel_fault(unsigned long addr, unsigned long esr, - struct pt_regs *regs) - { - const char *msg; -@@ -382,7 +387,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, - } else if (addr < PAGE_SIZE) { - msg = "NULL pointer dereference"; - } else { -- if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) -+ if (is_translation_fault(esr) && -+ kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) - return; - - msg = "paging request"; -@@ -391,7 +397,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, - die_kernel_fault(msg, addr, esr, regs); - } - --static void set_thread_esr(unsigned long address, unsigned int esr) -+static void set_thread_esr(unsigned long address, unsigned long esr) - { - current->thread.fault_address = address; - -@@ -439,7 +445,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) - * exception level). Fail safe by not providing an ESR - * context record at all. - */ -- WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); -+ WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr); - esr = 0; - break; - } -@@ -448,7 +454,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) - current->thread.fault_code = esr; - } - --static void do_bad_area(unsigned long far, unsigned int esr, -+static void do_bad_area(unsigned long far, unsigned long esr, - struct pt_regs *regs) - { - unsigned long addr = untagged_addr(far); -@@ -467,8 +473,8 @@ static void do_bad_area(unsigned long far, unsigned int esr, - } - } - --#define VM_FAULT_BADMAP 0x010000 --#define VM_FAULT_BADACCESS 0x020000 -+#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) -+#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) - - static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, - unsigned int mm_flags, unsigned long vm_flags, -@@ -499,7 +505,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, - return handle_mm_fault(vma, addr, mm_flags, regs); - } - --static bool is_el0_instruction_abort(unsigned int esr) -+static bool is_el0_instruction_abort(unsigned long esr) - { - return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; - } -@@ -508,12 +514,12 @@ static bool is_el0_instruction_abort(unsigned int esr) - * Note: not valid for EL1 DC IVAC, but we never use that such that it - * should fault. EL0 cannot issue DC IVAC (undef). - */ --static bool is_write_abort(unsigned int esr) -+static bool is_write_abort(unsigned long esr) - { - return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); - } - --static int __kprobes do_page_fault(unsigned long far, unsigned int esr, -+static int __kprobes do_page_fault(unsigned long far, unsigned long esr, - struct pt_regs *regs) - { - const struct fault_info *inf; -@@ -671,7 +677,7 @@ no_context: - } - - static int __kprobes do_translation_fault(unsigned long far, -- unsigned int esr, -+ unsigned long esr, - struct pt_regs *regs) - { - unsigned long addr = untagged_addr(far); -@@ -683,19 +689,19 @@ static int __kprobes do_translation_fault(unsigned long far, - return 0; - } - --static int do_alignment_fault(unsigned long far, unsigned int esr, -+static int do_alignment_fault(unsigned long far, unsigned long esr, - struct pt_regs *regs) - { - do_bad_area(far, esr, regs); - return 0; - } - --static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs) -+static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) - { - return 1; /* "fault" */ - } - --static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) -+static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) - { - const struct fault_info *inf; - unsigned long siaddr; -@@ -725,7 +731,7 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) - return 0; - } - --static int do_tag_check_fault(unsigned long far, unsigned int esr, -+static int do_tag_check_fault(unsigned long far, unsigned long esr, - struct pt_regs *regs) - { - /* -@@ -805,7 +811,7 @@ static const struct fault_info fault_info[] = { - { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, - }; - --void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) -+void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs) - { - const struct fault_info *inf = esr_to_fault_info(esr); - unsigned long addr = untagged_addr(far); -@@ -828,14 +834,14 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) - } - NOKPROBE_SYMBOL(do_mem_abort); - --void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) -+void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs) - { - arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, - addr, esr); - } - NOKPROBE_SYMBOL(do_sp_pc_abort); - --int __init early_brk64(unsigned long addr, unsigned int esr, -+int __init early_brk64(unsigned long addr, unsigned long esr, - struct pt_regs *regs); - - /* -@@ -855,7 +861,7 @@ static struct fault_info __refdata debug_fault_info[] = { - }; - - void __init hook_debug_fault_code(int nr, -- int (*fn)(unsigned long, unsigned int, struct pt_regs *), -+ int (*fn)(unsigned long, unsigned long, struct pt_regs *), - int sig, int code, const char *name) - { - BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); -@@ -888,7 +894,7 @@ static void debug_exception_exit(struct pt_regs *regs) - } - NOKPROBE_SYMBOL(debug_exception_exit); - --void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, -+void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, - struct pt_regs *regs) - { - const struct fault_info *inf = esr_to_debug_fault_info(esr); -diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c -index 37a81754d9b61..3b269c7567984 100644 ---- a/arch/arm64/mm/init.c -+++ b/arch/arm64/mm/init.c -@@ -61,8 +61,34 @@ EXPORT_SYMBOL(memstart_addr); - * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). - * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, - * otherwise it is empty. -+ * -+ * Memory reservation for crash kernel either done early or deferred -+ * depending on DMA memory zones configs (ZONE_DMA) -- -+ * -+ * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized -+ * here instead of max_zone_phys(). This lets early reservation of -+ * crash kernel memory which has a dependency on arm64_dma_phys_limit. -+ * Reserving memory early for crash kernel allows linear creation of block -+ * mappings (greater than page-granularity) for all the memory bank rangs. -+ * In this scheme a comparatively quicker boot is observed. -+ * -+ * If ZONE_DMA configs are defined, crash kernel memory reservation -+ * is delayed until DMA zone memory range size initilazation performed in -+ * zone_sizes_init(). The defer is necessary to steer clear of DMA zone -+ * memory range to avoid overlap allocation. So crash kernel memory boundaries -+ * are not known when mapping all bank memory ranges, which otherwise means -+ * not possible to exclude crash kernel range from creating block mappings -+ * so page-granularity mappings are created for the entire memory range. -+ * Hence a slightly slower boot is observed. -+ * -+ * Note: Page-granularity mapppings are necessary for crash kernel memory -+ * range for shrinking its size via /sys/kernel/kexec_crash_size interface. - */ --phys_addr_t arm64_dma_phys_limit __ro_after_init; -+#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32) -+phys_addr_t __ro_after_init arm64_dma_phys_limit; -+#else -+phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1; -+#endif - - #ifdef CONFIG_KEXEC_CORE - /* -@@ -153,50 +179,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) - if (!arm64_dma_phys_limit) - arm64_dma_phys_limit = dma32_phys_limit; - #endif -- if (!arm64_dma_phys_limit) -- arm64_dma_phys_limit = PHYS_MASK + 1; - max_zone_pfns[ZONE_NORMAL] = max; - - free_area_init(max_zone_pfns); - } - --int pfn_valid(unsigned long pfn) --{ -- phys_addr_t addr = PFN_PHYS(pfn); -- struct mem_section *ms; -- -- /* -- * Ensure the upper PAGE_SHIFT bits are clear in the -- * pfn. Else it might lead to false positives when -- * some of the upper bits are set, but the lower bits -- * match a valid pfn. -- */ -- if (PHYS_PFN(addr) != pfn) -- return 0; -- -- if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) -- return 0; -- -- ms = __pfn_to_section(pfn); -- if (!valid_section(ms)) -- return 0; -- -- /* -- * ZONE_DEVICE memory does not have the memblock entries. -- * memblock_is_map_memory() check for ZONE_DEVICE based -- * addresses will always fail. Even the normal hotplugged -- * memory will never have MEMBLOCK_NOMAP flag set in their -- * memblock entries. Skip memblock search for all non early -- * memory sections covering all of hotplug memory including -- * both normal and ZONE_DEVICE based. -- */ -- if (!early_section(ms)) -- return pfn_section_valid(ms, pfn); -- -- return memblock_is_memory(addr); --} --EXPORT_SYMBOL(pfn_valid); -- - int pfn_is_map_memory(unsigned long pfn) - { - phys_addr_t addr = PFN_PHYS(pfn); -@@ -352,6 +339,9 @@ void __init arm64_memblock_init(void) - - early_init_fdt_scan_reserved_mem(); - -+ if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) -+ reserve_crashkernel(); -+ - high_memory = __va(memblock_end_of_DRAM() - 1) + 1; - } - -@@ -398,7 +388,8 @@ void __init bootmem_init(void) - * request_standard_resources() depends on crashkernel's memory being - * reserved, so do it here. - */ -- reserve_crashkernel(); -+ if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)) -+ reserve_crashkernel(); - - memblock_dump_all(); - } -diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c -index b7c81dacabf07..b21f91cd830db 100644 ---- a/arch/arm64/mm/ioremap.c -+++ b/arch/arm64/mm/ioremap.c -@@ -99,3 +99,11 @@ void __init early_ioremap_init(void) - { - early_ioremap_setup(); - } -+ -+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, -+ unsigned long flags) -+{ -+ unsigned long pfn = PHYS_PFN(offset); -+ -+ return pfn_is_map_memory(pfn); -+} -diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c -index a38f54cd638c2..77ada00280d93 100644 ---- a/arch/arm64/mm/mmap.c -+++ b/arch/arm64/mm/mmap.c -@@ -7,8 +7,10 @@ - - #include - #include -+#include - #include - -+#include - #include - - /* -@@ -38,3 +40,18 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) - { - return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); - } -+ -+static int __init adjust_protection_map(void) -+{ -+ /* -+ * With Enhanced PAN we can honour the execute-only permissions as -+ * there is no PAN override with such mappings. -+ */ -+ if (cpus_have_const_cap(ARM64_HAS_EPAN)) { -+ protection_map[VM_EXEC] = PAGE_EXECONLY; -+ protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY; -+ } -+ -+ return 0; -+} -+arch_initcall(adjust_protection_map); -diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c -index cfd9deb347c38..6680689242df3 100644 ---- a/arch/arm64/mm/mmu.c -+++ b/arch/arm64/mm/mmu.c -@@ -63,6 +63,7 @@ static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; - static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; - - static DEFINE_SPINLOCK(swapper_pgdir_lock); -+static DEFINE_MUTEX(fixmap_lock); - - void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) - { -@@ -328,6 +329,12 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, - } - BUG_ON(p4d_bad(p4d)); - -+ /* -+ * No need for locking during early boot. And it doesn't work as -+ * expected with KASLR enabled. -+ */ -+ if (system_state != SYSTEM_BOOTING) -+ mutex_lock(&fixmap_lock); - pudp = pud_set_fixmap_offset(p4dp, addr); - do { - pud_t old_pud = READ_ONCE(*pudp); -@@ -358,6 +365,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, - } while (pudp++, addr = next, addr != end); - - pud_clear_fixmap(); -+ if (system_state != SYSTEM_BOOTING) -+ mutex_unlock(&fixmap_lock); - } - - static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, -@@ -516,7 +525,7 @@ static void __init map_mem(pgd_t *pgdp) - */ - BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); - -- if (can_set_direct_map() || crash_mem_map || IS_ENABLED(CONFIG_KFENCE)) -+ if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) - flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; - - /* -@@ -527,6 +536,17 @@ static void __init map_mem(pgd_t *pgdp) - */ - memblock_mark_nomap(kernel_start, kernel_end - kernel_start); - -+#ifdef CONFIG_KEXEC_CORE -+ if (crash_mem_map) { -+ if (IS_ENABLED(CONFIG_ZONE_DMA) || -+ IS_ENABLED(CONFIG_ZONE_DMA32)) -+ flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; -+ else if (crashk_res.end) -+ memblock_mark_nomap(crashk_res.start, -+ resource_size(&crashk_res)); -+ } -+#endif -+ - /* map all the memory banks */ - for_each_mem_range(i, &start, &end) { - if (start >= end) -@@ -553,6 +573,25 @@ static void __init map_mem(pgd_t *pgdp) - __map_memblock(pgdp, kernel_start, kernel_end, - PAGE_KERNEL, NO_CONT_MAPPINGS); - memblock_clear_nomap(kernel_start, kernel_end - kernel_start); -+ -+ /* -+ * Use page-level mappings here so that we can shrink the region -+ * in page granularity and put back unused memory to buddy system -+ * through /sys/kernel/kexec_crash_size interface. -+ */ -+#ifdef CONFIG_KEXEC_CORE -+ if (crash_mem_map && -+ !IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) { -+ if (crashk_res.end) { -+ __map_memblock(pgdp, crashk_res.start, -+ crashk_res.end + 1, -+ PAGE_KERNEL, -+ NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); -+ memblock_clear_nomap(crashk_res.start, -+ resource_size(&crashk_res)); -+ } -+ } -+#endif - } - - void mark_rodata_ro(void) -@@ -616,6 +655,8 @@ early_param("rodata", parse_rodata); - #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - static int __init map_entry_trampoline(void) - { -+ int i; -+ - pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; - phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); - -@@ -624,11 +665,15 @@ static int __init map_entry_trampoline(void) - - /* Map only the text into the trampoline page table */ - memset(tramp_pg_dir, 0, PGD_SIZE); -- __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, -- prot, __pgd_pgtable_alloc, 0); -+ __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, -+ entry_tramp_text_size(), prot, -+ __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS); - - /* Map both the text and data into the kernel page table */ -- __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); -+ for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) -+ __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, -+ pa_start + i * PAGE_SIZE, prot); -+ - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { - extern char __entry_tramp_data_start[]; - -@@ -1499,6 +1544,11 @@ int arch_add_memory(int nid, u64 start, u64 size, - if (ret) - __remove_pgd_mapping(swapper_pg_dir, - __phys_to_virt(start), size); -+ else { -+ max_pfn = PFN_UP(start + size); -+ max_low_pfn = max_pfn; -+ } -+ - return ret; - } - -diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c -index 7c4ef56265ee1..fd6cabc6d033a 100644 ---- a/arch/arm64/mm/mteswap.c -+++ b/arch/arm64/mm/mteswap.c -@@ -62,7 +62,12 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page) - * the new page->flags are visible before the tags were updated. - */ - smp_wmb(); -- mte_restore_page_tags(page_address(page), tags); -+ /* -+ * Test PG_mte_tagged again in case it was racing with another -+ * set_pte_at(). -+ */ -+ if (!test_and_set_bit(PG_mte_tagged, &page->flags)) -+ mte_restore_page_tags(page_address(page), tags); - - return true; - } -diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S -index d35c90d2e47ad..1a9684b114745 100644 ---- a/arch/arm64/mm/proc.S -+++ b/arch/arm64/mm/proc.S -@@ -46,18 +46,20 @@ - #endif - - #ifdef CONFIG_KASAN_HW_TAGS --#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1 --#else -+#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 -+#elif defined(CONFIG_ARM64_MTE) - /* - * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on - * TBI being enabled at EL1. - */ - #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 -+#else -+#define TCR_MTE_FLAGS 0 - #endif - - /* - * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and -- * changed during __cpu_setup to Normal Tagged if the system supports MTE. -+ * changed during mte_cpu_setup to Normal Tagged if the system supports MTE. - */ - #define MAIR_EL1_SET \ - (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ -@@ -421,46 +423,8 @@ SYM_FUNC_START(__cpu_setup) - mov_q mair, MAIR_EL1_SET - mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ -- TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS -- --#ifdef CONFIG_ARM64_MTE -- /* -- * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported -- * (ID_AA64PFR1_EL1[11:8] > 1). -- */ -- mrs x10, ID_AA64PFR1_EL1 -- ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 -- cmp x10, #ID_AA64PFR1_MTE -- b.lt 1f -- -- /* Normal Tagged memory type at the corresponding MAIR index */ -- mov x10, #MAIR_ATTR_NORMAL_TAGGED -- bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8 -+ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS - -- mov x10, #KERNEL_GCR_EL1 -- msr_s SYS_GCR_EL1, x10 -- -- /* -- * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then -- * RGSR_EL1.SEED must be non-zero for IRG to produce -- * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we -- * must initialize it. -- */ -- mrs x10, CNTVCT_EL0 -- ands x10, x10, #SYS_RGSR_EL1_SEED_MASK -- csinc x10, x10, xzr, ne -- lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT -- msr_s SYS_RGSR_EL1, x10 -- -- /* clear any pending tag check faults in TFSR*_EL1 */ -- msr_s SYS_TFSR_EL1, xzr -- msr_s SYS_TFSRE0_EL1, xzr -- -- /* set the TCR_EL1 bits */ -- mov_q x10, TCR_MTE_FLAGS -- orr tcr, tcr, x10 --1: --#endif - tcr_clear_errata_bits tcr, x9, x5 - - #ifdef CONFIG_ARM64_VA_BITS_52 -diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c -index 1c403536c9bb0..9bc4066c5bf33 100644 ---- a/arch/arm64/mm/ptdump.c -+++ b/arch/arm64/mm/ptdump.c -@@ -41,8 +41,6 @@ static struct addr_marker address_markers[] = { - { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, - { KASAN_SHADOW_END, "Kasan shadow end" }, - #endif -- { BPF_JIT_REGION_START, "BPF start" }, -- { BPF_JIT_REGION_END, "BPF end" }, - { MODULES_VADDR, "Modules start" }, - { MODULES_END, "Modules end" }, - { VMALLOC_START, "vmalloc() area" }, -diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c -index 803e7773fa869..4895b4d7e150f 100644 ---- a/arch/arm64/net/bpf_jit_comp.c -+++ b/arch/arm64/net/bpf_jit_comp.c -@@ -788,7 +788,10 @@ emit_cond_jmp: - u64 imm64; - - imm64 = (u64)insn1.imm << 32 | (u32)imm; -- emit_a64_mov_i64(dst, imm64, ctx); -+ if (bpf_pseudo_func(insn)) -+ emit_addr_mov_i64(dst, imm64, ctx); -+ else -+ emit_a64_mov_i64(dst, imm64, ctx); - - return 1; - } -@@ -1042,15 +1045,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) - goto out_off; - } - -- /* 1. Initial fake pass to compute ctx->idx. */ -- -- /* Fake pass to fill in ctx->offset. */ -- if (build_body(&ctx, extra_pass)) { -+ /* -+ * 1. Initial fake pass to compute ctx->idx and ctx->offset. -+ * -+ * BPF line info needs ctx->offset[i] to be the offset of -+ * instruction[i] in jited image, so build prologue first. -+ */ -+ if (build_prologue(&ctx, was_classic)) { - prog = orig_prog; - goto out_off; - } - -- if (build_prologue(&ctx, was_classic)) { -+ if (build_body(&ctx, extra_pass)) { - prog = orig_prog; - goto out_off; - } -@@ -1110,6 +1116,7 @@ skip_init_ctx: - bpf_jit_binary_free(header); - prog->bpf_func = NULL; - prog->jited = 0; -+ prog->jited_len = 0; - goto out_off; - } - bpf_jit_binary_lock_ro(header); -@@ -1123,6 +1130,11 @@ skip_init_ctx: - prog->jited_len = prog_size; - - if (!prog->is_func || extra_pass) { -+ int i; -+ -+ /* offset[prog->len] is the size of program */ -+ for (i = 0; i <= prog->len; i++) -+ ctx.offset[i] *= AARCH64_INSN_SIZE; - bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); - out_off: - kfree(ctx.offset); -@@ -1138,15 +1150,12 @@ out: - - u64 bpf_jit_alloc_exec_limit(void) - { -- return BPF_JIT_REGION_SIZE; -+ return VMALLOC_END - VMALLOC_START; - } - - void *bpf_jit_alloc_exec(unsigned long size) - { -- return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, -- BPF_JIT_REGION_END, GFP_KERNEL, -- PAGE_KERNEL, 0, NUMA_NO_NODE, -- __builtin_return_address(0)); -+ return vmalloc(size); - } - - void bpf_jit_free_exec(void *addr) -diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps -index 49305c2e6dfd3..fcaeec5a51258 100644 ---- a/arch/arm64/tools/cpucaps -+++ b/arch/arm64/tools/cpucaps -@@ -42,6 +42,7 @@ MTE - SPECTRE_V2 - SPECTRE_V3A - SPECTRE_V4 -+SPECTRE_BHB - SSBS - SVE - UNMAP_KERNEL_AT_EL0 -@@ -53,6 +54,11 @@ WORKAROUND_1418040 - WORKAROUND_1463225 - WORKAROUND_1508412 - WORKAROUND_1542419 -+WORKAROUND_1742098 -+WORKAROUND_2457168 -+WORKAROUND_TRBE_OVERWRITE_FILL_MODE -+WORKAROUND_TSB_FLUSH_FAILURE -+WORKAROUND_TRBE_WRITE_OUT_OF_RANGE - WORKAROUND_CAVIUM_23154 - WORKAROUND_CAVIUM_27456 - WORKAROUND_CAVIUM_30115 -diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c -index cb2a0d94a144d..2df115d0e2105 100644 ---- a/arch/csky/abiv1/alignment.c -+++ b/arch/csky/abiv1/alignment.c -@@ -294,7 +294,7 @@ bad_area: - __func__, opcode, rz, rx, imm, addr); - show_regs(regs); - bust_spinlocks(0); -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - } - - force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); -diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h -index c40f06ee8d3ef..ac5a54f57d407 100644 ---- a/arch/csky/include/asm/uaccess.h -+++ b/arch/csky/include/asm/uaccess.h -@@ -3,14 +3,13 @@ - #ifndef __ASM_CSKY_UACCESS_H - #define __ASM_CSKY_UACCESS_H - --#define user_addr_max() \ -- (uaccess_kernel() ? KERNEL_DS.seg : get_fs().seg) -+#define user_addr_max() (current_thread_info()->addr_limit.seg) - - static inline int __access_ok(unsigned long addr, unsigned long size) - { -- unsigned long limit = current_thread_info()->addr_limit.seg; -+ unsigned long limit = user_addr_max(); - -- return ((addr < limit) && ((addr + size) < limit)); -+ return (size <= limit) && (addr <= (limit - size)); - } - #define __access_ok __access_ok - -diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c -index ab55e98ee8f62..75e1f9df5f604 100644 ---- a/arch/csky/kernel/perf_callchain.c -+++ b/arch/csky/kernel/perf_callchain.c -@@ -49,7 +49,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, - { - struct stackframe buftail; - unsigned long lr = 0; -- unsigned long *user_frame_tail = (unsigned long *)fp; -+ unsigned long __user *user_frame_tail = (unsigned long __user *)fp; - - /* Check accessibility of one struct frame_tail beyond */ - if (!access_ok(user_frame_tail, sizeof(buftail))) -@@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, - void perf_callchain_user(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - unsigned long fp = 0; - - /* C-SKY does not support virtualization. */ -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) -+ if (guest_cbs && guest_cbs->is_in_guest()) - return; - - fp = regs->regs[4]; -@@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, - void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - struct stackframe fr; - - /* C-SKY does not support virtualization. */ -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ if (guest_cbs && guest_cbs->is_in_guest()) { - pr_warn("C-SKY does not support perf in guest mode!"); - return; - } -diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c -index 8fffa34d4e1c5..bd92ac376e157 100644 ---- a/arch/csky/kernel/probes/kprobes.c -+++ b/arch/csky/kernel/probes/kprobes.c -@@ -1,5 +1,7 @@ - // SPDX-License-Identifier: GPL-2.0+ - -+#define pr_fmt(fmt) "kprobes: " fmt -+ - #include - #include - #include -@@ -28,7 +30,7 @@ static int __kprobes patch_text_cb(void *priv) - struct csky_insn_patch *param = priv; - unsigned int addr = (unsigned int)param->addr; - -- if (atomic_inc_return(¶m->cpu_count) == 1) { -+ if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) { - *(u16 *) addr = cpu_to_le16(param->opcode); - dcache_wb_range(addr, addr + 2); - atomic_inc(¶m->cpu_count); -@@ -77,10 +79,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) - { - unsigned long probe_addr = (unsigned long)p->addr; - -- if (probe_addr & 0x1) { -- pr_warn("Address not aligned.\n"); -- return -EINVAL; -- } -+ if (probe_addr & 0x1) -+ return -EILSEQ; - - /* copy instruction */ - p->opcode = le32_to_cpu(*p->addr); -@@ -124,6 +124,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) - - void __kprobes arch_remove_kprobe(struct kprobe *p) - { -+ if (p->ainsn.api.insn) { -+ free_insn_slot(p->ainsn.api.insn, 0); -+ p->ainsn.api.insn = NULL; -+ } - } - - static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) -@@ -225,7 +229,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, - break; - case KPROBE_HIT_SS: - case KPROBE_REENTER: -- pr_warn("Unrecoverable kprobe detected.\n"); -+ pr_warn("Failed to recover from reentered kprobes.\n"); - dump_kprobe(p); - BUG(); - break; -diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c -index c7b763d2f526e..8867ddf3e6c77 100644 ---- a/arch/csky/kernel/signal.c -+++ b/arch/csky/kernel/signal.c -@@ -136,7 +136,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig, - static int - setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) - { -- struct rt_sigframe *frame; -+ struct rt_sigframe __user *frame; - int err = 0; - - frame = get_sigframe(ksig, regs, sizeof(*frame)); -diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c -index e5fbf8653a215..6e426fba01193 100644 ---- a/arch/csky/kernel/traps.c -+++ b/arch/csky/kernel/traps.c -@@ -109,7 +109,7 @@ void die(struct pt_regs *regs, const char *str) - if (panic_on_oops) - panic("Fatal exception"); - if (ret != NOTIFY_STOP) -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) -@@ -209,7 +209,7 @@ asmlinkage void do_trap_illinsn(struct pt_regs *regs) - - asmlinkage void do_trap_fpe(struct pt_regs *regs) - { --#ifdef CONFIG_CPU_HAS_FP -+#ifdef CONFIG_CPU_HAS_FPU - return fpu_fpe(regs); - #else - do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, -@@ -219,7 +219,7 @@ asmlinkage void do_trap_fpe(struct pt_regs *regs) - - asmlinkage void do_trap_priv(struct pt_regs *regs) - { --#ifdef CONFIG_CPU_HAS_FP -+#ifdef CONFIG_CPU_HAS_FPU - if (user_mode(regs) && fpu_libc_helper(regs)) - return; - #endif -diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c -index 466ad949818a6..7215a46b6b8eb 100644 ---- a/arch/csky/mm/fault.c -+++ b/arch/csky/mm/fault.c -@@ -67,7 +67,7 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) - pr_alert("Unable to handle kernel paging request at virtual " - "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc); - die(regs, "Oops"); -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - } - - static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) -diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c -index bdbe988d8dbcf..a92c39e03802e 100644 ---- a/arch/h8300/kernel/traps.c -+++ b/arch/h8300/kernel/traps.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -106,7 +107,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err) - dump(fp); - - spin_unlock_irq(&die_lock); -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - static int kstack_depth_to_print = 24; -diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c -index d4bc9c16f2df9..b465441f490df 100644 ---- a/arch/h8300/mm/fault.c -+++ b/arch/h8300/mm/fault.c -@@ -51,7 +51,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, - printk(" at virtual address %08lx\n", address); - if (!user_mode(regs)) - die("Oops", regs, error_code); -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - - return 1; - } -diff --git a/arch/hexagon/include/asm/timer-regs.h b/arch/hexagon/include/asm/timer-regs.h -deleted file mode 100644 -index ee6c61423a058..0000000000000 ---- a/arch/hexagon/include/asm/timer-regs.h -+++ /dev/null -@@ -1,26 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0-only */ --/* -- * Timer support for Hexagon -- * -- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. -- */ -- --#ifndef _ASM_TIMER_REGS_H --#define _ASM_TIMER_REGS_H -- --/* This stuff should go into a platform specific file */ --#define TCX0_CLK_RATE 19200 --#define TIMER_ENABLE 0 --#define TIMER_CLR_ON_MATCH 1 -- --/* -- * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until -- * release 1.1, and then it's "adjustable" and probably not defaulted. -- */ --#define RTOS_TIMER_INT 3 --#ifdef CONFIG_HEXAGON_COMET --#define RTOS_TIMER_REGS_ADDR 0xAB000000UL --#endif --#define SLEEP_CLK_RATE 32000 -- --#endif -diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h -index 8d4ec76fceb45..dfe69e118b2be 100644 ---- a/arch/hexagon/include/asm/timex.h -+++ b/arch/hexagon/include/asm/timex.h -@@ -7,11 +7,10 @@ - #define _ASM_TIMEX_H - - #include --#include - #include - - /* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */ --#define CLOCK_TICK_RATE TCX0_CLK_RATE -+#define CLOCK_TICK_RATE 19200 - - #define ARCH_HAS_READ_CURRENT_TIMER - -diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h -index ef5bfef8d490c..719ba3f3c45cd 100644 ---- a/arch/hexagon/include/asm/uaccess.h -+++ b/arch/hexagon/include/asm/uaccess.h -@@ -25,17 +25,17 @@ - * Returns true (nonzero) if the memory block *may* be valid, false (zero) - * if it is definitely invalid. - * -- * User address space in Hexagon, like x86, goes to 0xbfffffff, so the -- * simple MSB-based tests used by MIPS won't work. Some further -- * optimization is probably possible here, but for now, keep it -- * reasonably simple and not *too* slow. After all, we've got the -- * MMU for backup. - */ -+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -+#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) - --#define __access_ok(addr, size) \ -- ((get_fs().seg == KERNEL_DS.seg) || \ -- (((unsigned long)addr < get_fs().seg) && \ -- (unsigned long)size < (get_fs().seg - (unsigned long)addr))) -+static inline int __access_ok(unsigned long addr, unsigned long size) -+{ -+ unsigned long limit = TASK_SIZE; -+ -+ return (size <= limit) && (addr <= (limit - size)); -+} -+#define __access_ok __access_ok - - /* - * When a kernel-mode page fault is taken, the faulting instruction -diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c -index feffe527ac929..febc95714d756 100644 ---- a/arch/hexagon/kernel/time.c -+++ b/arch/hexagon/kernel/time.c -@@ -17,9 +17,10 @@ - #include - #include - --#include - #include - -+#define TIMER_ENABLE BIT(0) -+ - /* - * For the clocksource we need: - * pcycle frequency (600MHz) -@@ -33,6 +34,13 @@ cycles_t pcycle_freq_mhz; - cycles_t thread_freq_mhz; - cycles_t sleep_clk_freq; - -+/* -+ * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until -+ * release 1.1, and then it's "adjustable" and probably not defaulted. -+ */ -+#define RTOS_TIMER_INT 3 -+#define RTOS_TIMER_REGS_ADDR 0xAB000000UL -+ - static struct resource rtos_timer_resources[] = { - { - .start = RTOS_TIMER_REGS_ADDR, -@@ -80,7 +88,7 @@ static int set_next_event(unsigned long delta, struct clock_event_device *evt) - iowrite32(0, &rtos_timer->clear); - - iowrite32(delta, &rtos_timer->match); -- iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable); -+ iowrite32(TIMER_ENABLE, &rtos_timer->enable); - return 0; - } - -diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c -index edfc35dafeb19..1240f038cce02 100644 ---- a/arch/hexagon/kernel/traps.c -+++ b/arch/hexagon/kernel/traps.c -@@ -214,7 +214,7 @@ int die(const char *str, struct pt_regs *regs, long err) - panic("Fatal exception"); - - oops_exit(); -- do_exit(err); -+ make_task_dead(err); - return 0; - } - -diff --git a/arch/hexagon/lib/io.c b/arch/hexagon/lib/io.c -index d35d69d6588c4..55f75392857b0 100644 ---- a/arch/hexagon/lib/io.c -+++ b/arch/hexagon/lib/io.c -@@ -27,6 +27,7 @@ void __raw_readsw(const void __iomem *addr, void *data, int len) - *dst++ = *src; - - } -+EXPORT_SYMBOL(__raw_readsw); - - /* - * __raw_writesw - read words a short at a time -@@ -47,6 +48,7 @@ void __raw_writesw(void __iomem *addr, const void *data, int len) - - - } -+EXPORT_SYMBOL(__raw_writesw); - - /* Pretty sure len is pre-adjusted for the length of the access already */ - void __raw_readsl(const void __iomem *addr, void *data, int len) -@@ -62,6 +64,7 @@ void __raw_readsl(const void __iomem *addr, void *data, int len) - - - } -+EXPORT_SYMBOL(__raw_readsl); - - void __raw_writesl(void __iomem *addr, const void *data, int len) - { -@@ -76,3 +79,4 @@ void __raw_writesl(void __iomem *addr, const void *data, int len) - - - } -+EXPORT_SYMBOL(__raw_writesl); -diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig -index 1e33666fa679b..89869aff8ca29 100644 ---- a/arch/ia64/Kconfig -+++ b/arch/ia64/Kconfig -@@ -8,6 +8,7 @@ menu "Processor type and features" - - config IA64 - bool -+ select ARCH_HAS_CPU_FINALIZE_INIT - select ARCH_HAS_DMA_MARK_CLEAN - select ARCH_HAS_STRNCPY_FROM_USER - select ARCH_HAS_STRNLEN_USER -@@ -323,7 +324,7 @@ config ARCH_PROC_KCORE_TEXT - depends on PROC_KCORE - - config IA64_MCA_RECOVERY -- tristate "MCA recovery from errors other than TLB." -+ bool "MCA recovery from errors other than TLB." - - config IA64_PALINFO - tristate "/proc/pal support" -diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug -index 40ca23bd228d6..2ce008e2d1644 100644 ---- a/arch/ia64/Kconfig.debug -+++ b/arch/ia64/Kconfig.debug -@@ -39,7 +39,7 @@ config DISABLE_VHPT - - config IA64_DEBUG_CMPXCHG - bool "Turn on compare-and-exchange bug checking (slow!)" -- depends on DEBUG_KERNEL -+ depends on DEBUG_KERNEL && PRINTK - help - Selecting this option turns on bug checking for the IA-64 - compare-and-exchange instructions. This is slow! Itaniums -diff --git a/arch/ia64/include/asm/bugs.h b/arch/ia64/include/asm/bugs.h -deleted file mode 100644 -index 0d6b9bded56c6..0000000000000 ---- a/arch/ia64/include/asm/bugs.h -+++ /dev/null -@@ -1,20 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 */ --/* -- * This is included by init/main.c to check for architecture-dependent bugs. -- * -- * Needs: -- * void check_bugs(void); -- * -- * Based on . -- * -- * Modified 1998, 1999, 2003 -- * David Mosberger-Tang , Hewlett-Packard Co. -- */ --#ifndef _ASM_IA64_BUGS_H --#define _ASM_IA64_BUGS_H -- --#include -- --extern void check_bugs (void); -- --#endif /* _ASM_IA64_BUGS_H */ -diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h -index 2d8bcdc27d7f8..05e7c9ad1a965 100644 ---- a/arch/ia64/include/asm/processor.h -+++ b/arch/ia64/include/asm/processor.h -@@ -542,7 +542,7 @@ ia64_get_irr(unsigned int vector) - { - unsigned int reg = vector / 64; - unsigned int bit = vector % 64; -- u64 irr; -+ unsigned long irr; - - switch (reg) { - case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; -diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h -index 869a3ac6bf23a..7ccc077a60bed 100644 ---- a/arch/ia64/include/asm/timex.h -+++ b/arch/ia64/include/asm/timex.h -@@ -39,6 +39,7 @@ get_cycles (void) - ret = ia64_getreg(_IA64_REG_AR_ITC); - return ret; - } -+#define get_cycles get_cycles - - extern void ia64_cpu_local_tick (void); - extern unsigned long long ia64_native_sched_clock (void); -diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c -index 35adcf89035ad..99300850abc19 100644 ---- a/arch/ia64/kernel/iosapic.c -+++ b/arch/ia64/kernel/iosapic.c -@@ -834,7 +834,7 @@ iosapic_unregister_intr (unsigned int gsi) - if (iosapic_intr_info[irq].count == 0) { - #ifdef CONFIG_SMP - /* Clear affinity */ -- cpumask_setall(irq_get_affinity_mask(irq)); -+ irq_data_update_affinity(irq_get_irq_data(irq), cpu_all_mask); - #endif - /* Clear the interrupt information */ - iosapic_intr_info[irq].dest = 0; -diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c -index ecef17c7c35b1..275b9ea58c643 100644 ---- a/arch/ia64/kernel/irq.c -+++ b/arch/ia64/kernel/irq.c -@@ -57,8 +57,8 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; - void set_irq_affinity_info (unsigned int irq, int hwid, int redir) - { - if (irq < NR_IRQS) { -- cpumask_copy(irq_get_affinity_mask(irq), -- cpumask_of(cpu_logical_id(hwid))); -+ irq_data_update_affinity(irq_get_irq_data(irq), -+ cpumask_of(cpu_logical_id(hwid))); - irq_redir[irq] = (char) (redir & 0xff); - } - } -diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c -index 441ed04b10378..d4048518a1d7d 100644 ---- a/arch/ia64/kernel/kprobes.c -+++ b/arch/ia64/kernel/kprobes.c -@@ -398,7 +398,8 @@ static void kretprobe_trampoline(void) - - int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) - { -- regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL); -+ regs->cr_iip = __kretprobe_trampoline_handler(regs, -+ dereference_function_descriptor(kretprobe_trampoline), NULL); - /* - * By returning a non-zero value, we are telling - * kprobe_handler() that we don't want the post_handler -@@ -414,7 +415,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, - ri->fp = NULL; - - /* Replace the return addr with trampoline addr */ -- regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; -+ regs->b0 = (unsigned long)dereference_function_descriptor(kretprobe_trampoline); - } - - /* Check the instruction in the slot is break */ -@@ -902,14 +903,14 @@ static struct kprobe trampoline_p = { - int __init arch_init_kprobes(void) - { - trampoline_p.addr = -- (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; -+ dereference_function_descriptor(kretprobe_trampoline); - return register_kprobe(&trampoline_p); - } - - int __kprobes arch_trampoline_kprobe(struct kprobe *p) - { - if (p->addr == -- (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip) -+ dereference_function_descriptor(kretprobe_trampoline)) - return 1; - - return 0; -diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c -index 5bfc79be4cefe..23c203639a968 100644 ---- a/arch/ia64/kernel/mca_drv.c -+++ b/arch/ia64/kernel/mca_drv.c -@@ -176,7 +176,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) - spin_unlock(&mca_bh_lock); - - /* This process is about to be killed itself */ -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - } - - /** -diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c -index df5c28f252e3d..025e5133c860c 100644 ---- a/arch/ia64/kernel/msi_ia64.c -+++ b/arch/ia64/kernel/msi_ia64.c -@@ -37,7 +37,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, - msg.data = data; - - pci_write_msi_msg(irq, &msg); -- cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu)); -+ irq_data_update_affinity(idata, cpumask_of(cpu)); - - return 0; - } -@@ -132,7 +132,7 @@ static int dmar_msi_set_affinity(struct irq_data *data, - msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); - - dmar_msi_write(irq, &msg); -- cpumask_copy(irq_data_get_affinity_mask(data), mask); -+ irq_data_update_affinity(data, mask); - - return 0; - } -diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c -index a25ab9b37953e..bb99b543dc672 100644 ---- a/arch/ia64/kernel/salinfo.c -+++ b/arch/ia64/kernel/salinfo.c -@@ -581,7 +581,7 @@ static int salinfo_cpu_pre_down(unsigned int cpu) - * 'data' contains an integer that corresponds to the feature we're - * testing - */ --static int proc_salinfo_show(struct seq_file *m, void *v) -+static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v) - { - unsigned long data = (unsigned long)v; - seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n"); -diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c -index 31fb84de2d214..041681e5de472 100644 ---- a/arch/ia64/kernel/setup.c -+++ b/arch/ia64/kernel/setup.c -@@ -1070,8 +1070,7 @@ cpu_init (void) - } - } - --void __init --check_bugs (void) -+void __init arch_cpu_finalize_init(void) - { - ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, - (unsigned long) __end___mckinley_e9_bundles); -diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c -index e13cb905930fb..753642366e12e 100644 ---- a/arch/ia64/kernel/traps.c -+++ b/arch/ia64/kernel/traps.c -@@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err) - if (panic_on_oops) - panic("Fatal exception"); - -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - return 0; - } - -diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c -index 42e025cfbd088..9817caba07026 100644 ---- a/arch/ia64/mm/contig.c -+++ b/arch/ia64/mm/contig.c -@@ -77,7 +77,7 @@ skip: - return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; - } - --static inline void -+static inline __init void - alloc_per_cpu_data(void) - { - size_t size = PERCPU_PAGE_SIZE * num_possible_cpus(); -diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c -index 02de2e70c5874..4796cccbf74f3 100644 ---- a/arch/ia64/mm/fault.c -+++ b/arch/ia64/mm/fault.c -@@ -259,7 +259,7 @@ retry: - regs = NULL; - bust_spinlocks(0); - if (regs) -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - return; - - out_of_memory: -diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c -index f993cb36c0626..921db957d2e67 100644 ---- a/arch/ia64/mm/hugetlbpage.c -+++ b/arch/ia64/mm/hugetlbpage.c -@@ -58,7 +58,7 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz) - - pgd = pgd_offset(mm, taddr); - if (pgd_present(*pgd)) { -- p4d = p4d_offset(pgd, addr); -+ p4d = p4d_offset(pgd, taddr); - if (p4d_present(*p4d)) { - pud = pud_offset(p4d, taddr); - if (pud_present(*pud)) { -diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c -index d6579ec3ea324..4c7b1f50e3b7d 100644 ---- a/arch/ia64/mm/numa.c -+++ b/arch/ia64/mm/numa.c -@@ -75,5 +75,6 @@ int memory_add_physaddr_to_nid(u64 addr) - return 0; - return nid; - } -+EXPORT_SYMBOL(memory_add_physaddr_to_nid); - #endif - #endif -diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c -index acb55a41260dd..2bcdd7d3a1ada 100644 ---- a/arch/ia64/pci/fixup.c -+++ b/arch/ia64/pci/fixup.c -@@ -76,5 +76,5 @@ static void pci_fixup_video(struct pci_dev *pdev) - } - } - } --DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, -- PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); -+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, -+ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); -diff --git a/arch/m68k/68000/entry.S b/arch/m68k/68000/entry.S -index 997b549330156..7d63e2f1555a0 100644 ---- a/arch/m68k/68000/entry.S -+++ b/arch/m68k/68000/entry.S -@@ -45,6 +45,8 @@ do_trace: - jbsr syscall_trace_enter - RESTORE_SWITCH_STACK - addql #4,%sp -+ addql #1,%d0 -+ jeq ret_from_exception - movel %sp@(PT_OFF_ORIG_D0),%d1 - movel #-ENOSYS,%d0 - cmpl #NR_syscalls,%d1 -diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig -index 0b50da08a9c56..810056d8ea678 100644 ---- a/arch/m68k/Kconfig -+++ b/arch/m68k/Kconfig -@@ -4,6 +4,7 @@ config M68K - default y - select ARCH_32BIT_OFF_T - select ARCH_HAS_BINFMT_FLAT -+ select ARCH_HAS_CPU_FINALIZE_INIT if MMU - select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE - select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA - select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS -diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu -index 277d61a094637..29558055c71bf 100644 ---- a/arch/m68k/Kconfig.cpu -+++ b/arch/m68k/Kconfig.cpu -@@ -338,7 +338,7 @@ comment "Processor Specific Options" - - config M68KFPU_EMU - bool "Math emulation support" -- depends on MMU -+ depends on M68KCLASSIC && FPU - help - At some point in the future, this will cause floating-point math - instructions to be emulated by the kernel on machines that lack a -diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices -index 6a87b4a5fcac2..e6e3efac18407 100644 ---- a/arch/m68k/Kconfig.devices -+++ b/arch/m68k/Kconfig.devices -@@ -19,6 +19,7 @@ config HEARTBEAT - # We have a dedicated heartbeat LED. :-) - config PROC_HARDWARE - bool "/proc/hardware support" -+ depends on PROC_FS - help - Say Y here to support the /proc/hardware file, which gives you - access to information about the machine you're running on, -diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine -index 36fa0c3ef1296..946853a08502e 100644 ---- a/arch/m68k/Kconfig.machine -+++ b/arch/m68k/Kconfig.machine -@@ -203,6 +203,7 @@ config INIT_LCD - config MEMORY_RESERVE - int "Memory reservation (MiB)" - depends on (UCSIMM || UCDIMM) -+ default 0 - help - Reserve certain memory regions on 68x328 based boards. - -@@ -334,6 +335,7 @@ comment "Machine Options" - - config UBOOT - bool "Support for U-Boot command line parameters" -+ depends on COLDFIRE - help - If you say Y here kernel will try to collect command - line parameters from the initial u-boot stack. -diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c -index 0386252e9d043..7dab46728aeda 100644 ---- a/arch/m68k/coldfire/device.c -+++ b/arch/m68k/coldfire/device.c -@@ -480,7 +480,7 @@ static struct platform_device mcf_i2c5 = { - #endif /* MCFI2C_BASE5 */ - #endif /* IS_ENABLED(CONFIG_I2C_IMX) */ - --#if IS_ENABLED(CONFIG_MCF_EDMA) -+#ifdef MCFEDMA_BASE - - static const struct dma_slave_map mcf_edma_map[] = { - { "dreq0", "rx-tx", MCF_EDMA_FILTER_PARAM(0) }, -@@ -552,7 +552,7 @@ static struct platform_device mcf_edma = { - .platform_data = &mcf_edma_data, - } - }; --#endif /* IS_ENABLED(CONFIG_MCF_EDMA) */ -+#endif /* MCFEDMA_BASE */ - - #ifdef MCFSDHC_BASE - static struct mcf_esdhc_platform_data mcf_esdhc_data = { -@@ -581,7 +581,7 @@ static struct platform_device mcf_esdhc = { - }; - #endif /* MCFSDHC_BASE */ - --#if IS_ENABLED(CONFIG_CAN_FLEXCAN) -+#ifdef MCFFLEXCAN_SIZE - - #include - -@@ -620,7 +620,7 @@ static struct platform_device mcf_flexcan0 = { - .resource = mcf5441x_flexcan0_resource, - .dev.platform_data = &mcf5441x_flexcan_info, - }; --#endif /* IS_ENABLED(CONFIG_CAN_FLEXCAN) */ -+#endif /* MCFFLEXCAN_SIZE */ - - static struct platform_device *mcf_devices[] __initdata = { - &mcf_uart, -@@ -651,13 +651,13 @@ static struct platform_device *mcf_devices[] __initdata = { - &mcf_i2c5, - #endif - #endif --#if IS_ENABLED(CONFIG_MCF_EDMA) -+#ifdef MCFEDMA_BASE - &mcf_edma, - #endif - #ifdef MCFSDHC_BASE - &mcf_esdhc, - #endif --#if IS_ENABLED(CONFIG_CAN_FLEXCAN) -+#ifdef MCFFLEXCAN_SIZE - &mcf_flexcan0, - #endif - }; -diff --git a/arch/m68k/coldfire/entry.S b/arch/m68k/coldfire/entry.S -index 9f337c70243a3..35104c5417ff4 100644 ---- a/arch/m68k/coldfire/entry.S -+++ b/arch/m68k/coldfire/entry.S -@@ -90,6 +90,8 @@ ENTRY(system_call) - jbsr syscall_trace_enter - RESTORE_SWITCH_STACK - addql #4,%sp -+ addql #1,%d0 -+ jeq ret_from_exception - movel %d3,%a0 - jbsr %a0@ - movel %d0,%sp@(PT_OFF_D0) /* save the return value */ -diff --git a/arch/m68k/fpsp040/skeleton.S b/arch/m68k/fpsp040/skeleton.S -index 439395aa6fb42..081922c72daaa 100644 ---- a/arch/m68k/fpsp040/skeleton.S -+++ b/arch/m68k/fpsp040/skeleton.S -@@ -499,13 +499,13 @@ in_ea: - dbf %d0,morein - rts - -- .section .fixup,#alloc,#execinstr -+ .section .fixup,"ax" - .even - 1: - jbsr fpsp040_die - jbra .Lnotkern - -- .section __ex_table,#alloc -+ .section __ex_table,"a" - .align 4 - - .long in_ea,1b -diff --git a/arch/m68k/ifpsp060/os.S b/arch/m68k/ifpsp060/os.S -index 7a0d6e4280665..89e2ec224ab6c 100644 ---- a/arch/m68k/ifpsp060/os.S -+++ b/arch/m68k/ifpsp060/os.S -@@ -379,11 +379,11 @@ _060_real_access: - - - | Execption handling for movs access to illegal memory -- .section .fixup,#alloc,#execinstr -+ .section .fixup,"ax" - .even - 1: moveq #-1,%d1 - rts --.section __ex_table,#alloc -+.section __ex_table,"a" - .align 4 - .long dmrbuae,1b - .long dmrwuae,1b -diff --git a/arch/m68k/include/asm/bugs.h b/arch/m68k/include/asm/bugs.h -deleted file mode 100644 -index 745530651e0bf..0000000000000 ---- a/arch/m68k/include/asm/bugs.h -+++ /dev/null -@@ -1,21 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 */ --/* -- * include/asm-m68k/bugs.h -- * -- * Copyright (C) 1994 Linus Torvalds -- */ -- --/* -- * This is included by init/main.c to check for architecture-dependent bugs. -- * -- * Needs: -- * void check_bugs(void); -- */ -- --#ifdef CONFIG_MMU --extern void check_bugs(void); /* in arch/m68k/kernel/setup.c */ --#else --static void check_bugs(void) --{ --} --#endif -diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h -index 87151d67d91e7..bce5ca56c3883 100644 ---- a/arch/m68k/include/asm/pgtable_no.h -+++ b/arch/m68k/include/asm/pgtable_no.h -@@ -42,7 +42,8 @@ extern void paging_init(void); - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ --#define ZERO_PAGE(vaddr) (virt_to_page(0)) -+extern void *empty_zero_page; -+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) - - /* - * All 32bit addresses are effectively valid for vmalloc... -diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h -index 80eb2396d01eb..3ba40bc1dfaa9 100644 ---- a/arch/m68k/include/asm/raw_io.h -+++ b/arch/m68k/include/asm/raw_io.h -@@ -80,14 +80,14 @@ - ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; }) - - #define rom_out_8(addr, b) \ -- ({u8 __maybe_unused __w, __v = (b); u32 _addr = ((u32) (addr)); \ -+ (void)({u8 __maybe_unused __w, __v = (b); u32 _addr = ((u32) (addr)); \ - __w = ((*(__force volatile u8 *) ((_addr | 0x10000) + (__v<<1)))); }) - #define rom_out_be16(addr, w) \ -- ({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \ -+ (void)({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \ - __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v & 0xFF)<<1)))); \ - __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v >> 8)<<1)))); }) - #define rom_out_le16(addr, w) \ -- ({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \ -+ (void)({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \ - __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v >> 8)<<1)))); \ - __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v & 0xFF)<<1)))); }) - -diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h -index 6a21d93582805..f4a7a340f4cae 100644 ---- a/arch/m68k/include/asm/timex.h -+++ b/arch/m68k/include/asm/timex.h -@@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void) - { - if (mach_random_get_entropy) - return mach_random_get_entropy(); -- return 0; -+ return random_get_entropy_fallback(); - } - #define random_get_entropy random_get_entropy - -diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h -index ba670523885c8..60b786eb2254e 100644 ---- a/arch/m68k/include/asm/uaccess.h -+++ b/arch/m68k/include/asm/uaccess.h -@@ -12,14 +12,17 @@ - #include - - /* We let the MMU do all checking */ --static inline int access_ok(const void __user *addr, -+static inline int access_ok(const void __user *ptr, - unsigned long size) - { -- /* -- * XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check -- * for TASK_SIZE! -- */ -- return 1; -+ unsigned long limit = TASK_SIZE; -+ unsigned long addr = (unsigned long)ptr; -+ -+ if (IS_ENABLED(CONFIG_CPU_HAS_ADDRESS_SPACES) || -+ !IS_ENABLED(CONFIG_MMU)) -+ return 1; -+ -+ return (size <= limit) && (addr <= (limit - size)); - } - - /* -diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S -index 9434fca68de5d..9f3663facaa0e 100644 ---- a/arch/m68k/kernel/entry.S -+++ b/arch/m68k/kernel/entry.S -@@ -184,9 +184,12 @@ do_trace_entry: - jbsr syscall_trace - RESTORE_SWITCH_STACK - addql #4,%sp -+ addql #1,%d0 | optimization for cmpil #-1,%d0 -+ jeq ret_from_syscall - movel %sp@(PT_OFF_ORIG_D0),%d0 - cmpl #NR_syscalls,%d0 - jcs syscall -+ jra ret_from_syscall - badsys: - movel #-ENOSYS,%sp@(PT_OFF_D0) - jra ret_from_syscall -diff --git a/arch/m68k/kernel/relocate_kernel.S b/arch/m68k/kernel/relocate_kernel.S -index ab0f1e7d46535..f7667079e08e9 100644 ---- a/arch/m68k/kernel/relocate_kernel.S -+++ b/arch/m68k/kernel/relocate_kernel.S -@@ -26,7 +26,7 @@ ENTRY(relocate_new_kernel) - lea %pc@(.Lcopy),%a4 - 2: addl #0x00000000,%a4 /* virt_to_phys() */ - -- .section ".m68k_fixup","aw" -+ .section .m68k_fixup,"aw" - .long M68K_FIXUP_MEMOFFSET, 2b+2 - .previous - -@@ -49,7 +49,7 @@ ENTRY(relocate_new_kernel) - lea %pc@(.Lcont040),%a4 - 5: addl #0x00000000,%a4 /* virt_to_phys() */ - -- .section ".m68k_fixup","aw" -+ .section .m68k_fixup,"aw" - .long M68K_FIXUP_MEMOFFSET, 5b+2 - .previous - -diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c -index 4b51bfd38e5f2..868641a536236 100644 ---- a/arch/m68k/kernel/setup_mm.c -+++ b/arch/m68k/kernel/setup_mm.c -@@ -10,6 +10,7 @@ - */ - - #include -+#include - #include - #include - #include -@@ -87,15 +88,8 @@ void (*mach_sched_init) (void) __initdata = NULL; - void (*mach_init_IRQ) (void) __initdata = NULL; - void (*mach_get_model) (char *model); - void (*mach_get_hardware_list) (struct seq_file *m); --/* machine dependent timer functions */ --int (*mach_hwclk) (int, struct rtc_time*); --EXPORT_SYMBOL(mach_hwclk); - unsigned int (*mach_get_ss)(void); --int (*mach_get_rtc_pll)(struct rtc_pll_info *); --int (*mach_set_rtc_pll)(struct rtc_pll_info *); - EXPORT_SYMBOL(mach_get_ss); --EXPORT_SYMBOL(mach_get_rtc_pll); --EXPORT_SYMBOL(mach_set_rtc_pll); - void (*mach_reset)( void ); - void (*mach_halt)( void ); - void (*mach_power_off)( void ); -@@ -519,7 +513,7 @@ static int __init proc_hardware_init(void) - module_init(proc_hardware_init); - #endif - --void check_bugs(void) -+void __init arch_cpu_finalize_init(void) - { - #if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU) - if (m68k_fputype == 0) { -diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c -index 5e4104f07a443..19eea73d3c170 100644 ---- a/arch/m68k/kernel/setup_no.c -+++ b/arch/m68k/kernel/setup_no.c -@@ -50,7 +50,6 @@ char __initdata command_line[COMMAND_LINE_SIZE]; - - /* machine dependent timer functions */ - void (*mach_sched_init)(void) __initdata = NULL; --int (*mach_hwclk) (int, struct rtc_time*); - - /* machine dependent reboot functions */ - void (*mach_reset)(void); -diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c -index 338817d0cb3fb..6cc68f29ab13c 100644 ---- a/arch/m68k/kernel/signal.c -+++ b/arch/m68k/kernel/signal.c -@@ -625,6 +625,7 @@ static inline void siginfo_build_tests(void) - /* _sigfault._perf */ - BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10); - BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14); -+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_flags) != 0x18); - - /* _sigpoll */ - BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c); -@@ -857,11 +858,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs * - } - - static inline void __user * --get_sigframe(struct ksignal *ksig, size_t frame_size) -+get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size) - { - unsigned long usp = sigsp(rdusp(), ksig); -+ unsigned long gap = 0; - -- return (void __user *)((usp - frame_size) & -8UL); -+ if (CPU_IS_020_OR_030 && tregs->format == 0xb) { -+ /* USP is unreliable so use worst-case value */ -+ gap = 256; -+ } -+ -+ return (void __user *)((usp - gap - frame_size) & -8UL); - } - - static int setup_frame(struct ksignal *ksig, sigset_t *set, -@@ -879,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, - return -EFAULT; - } - -- frame = get_sigframe(ksig, sizeof(*frame) + fsize); -+ frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize); - - if (fsize) - err |= copy_to_user (frame + 1, regs + 1, fsize); -@@ -951,7 +958,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, - return -EFAULT; - } - -- frame = get_sigframe(ksig, sizeof(*frame)); -+ frame = get_sigframe(ksig, tregs, sizeof(*frame)); - - if (fsize) - err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize); -diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c -index 340ffeea0a9dc..a97600b2af502 100644 ---- a/arch/m68k/kernel/time.c -+++ b/arch/m68k/kernel/time.c -@@ -63,6 +63,15 @@ void timer_heartbeat(void) - #endif /* CONFIG_HEARTBEAT */ - - #ifdef CONFIG_M68KCLASSIC -+/* machine dependent timer functions */ -+int (*mach_hwclk) (int, struct rtc_time*); -+EXPORT_SYMBOL(mach_hwclk); -+ -+int (*mach_get_rtc_pll)(struct rtc_pll_info *); -+int (*mach_set_rtc_pll)(struct rtc_pll_info *); -+EXPORT_SYMBOL(mach_get_rtc_pll); -+EXPORT_SYMBOL(mach_set_rtc_pll); -+ - #if !IS_BUILTIN(CONFIG_RTC_DRV_GENERIC) - void read_persistent_clock64(struct timespec64 *ts) - { -diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c -index 9718ce94cc845..6f647742a6ca9 100644 ---- a/arch/m68k/kernel/traps.c -+++ b/arch/m68k/kernel/traps.c -@@ -30,6 +30,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -544,7 +545,8 @@ static inline void bus_error030 (struct frame *fp) - errorcode |= 2; - - if (mmusr & (MMU_I | MMU_WP)) { -- if (ssw & 4) { -+ /* We might have an exception table for this PC */ -+ if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) { - pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", - ssw & RW ? "read" : "write", - fp->un.fmtb.daddr, -@@ -1131,7 +1133,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr) - pr_crit("%s: %08x\n", str, nr); - show_registers(fp); - add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - asmlinkage void set_esp0(unsigned long ssp) -@@ -1145,7 +1147,7 @@ asmlinkage void set_esp0(unsigned long ssp) - */ - asmlinkage void fpsp040_die(void) - { -- force_sigsegv(SIGSEGV); -+ force_exit_sig(SIGSEGV); - } - - #ifdef CONFIG_M68KFPU_EMU -diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c -index ef46e77e97a5b..fcb3a0d8421c5 100644 ---- a/arch/m68k/mm/fault.c -+++ b/arch/m68k/mm/fault.c -@@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs) - pr_alert("Unable to handle kernel access"); - pr_cont(" at virtual address %p\n", addr); - die_if_kernel("Oops", regs, 0 /*error_code*/); -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - } - - return 1; -diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h -index d2a8ef9f89787..3fe96979d2c62 100644 ---- a/arch/microblaze/include/asm/uaccess.h -+++ b/arch/microblaze/include/asm/uaccess.h -@@ -39,24 +39,13 @@ - - # define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - --static inline int access_ok(const void __user *addr, unsigned long size) -+static inline int __access_ok(unsigned long addr, unsigned long size) - { -- if (!size) -- goto ok; -+ unsigned long limit = user_addr_max(); - -- if ((get_fs().seg < ((unsigned long)addr)) || -- (get_fs().seg < ((unsigned long)addr + size - 1))) { -- pr_devel("ACCESS fail at 0x%08x (size 0x%x), seg 0x%08x\n", -- (__force u32)addr, (u32)size, -- (u32)get_fs().seg); -- return 0; -- } --ok: -- pr_devel("ACCESS OK at 0x%08x (size 0x%x), seg 0x%08x\n", -- (__force u32)addr, (u32)size, -- (u32)get_fs().seg); -- return 1; -+ return (size <= limit) && (addr <= (limit - size)); - } -+#define access_ok(addr, size) __access_ok((unsigned long)addr, size) - - # define __FIXUP_SECTION ".section .fixup,\"ax\"\n" - # define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n" -@@ -141,27 +130,27 @@ extern long __user_bad(void); - - #define __get_user(x, ptr) \ - ({ \ -- unsigned long __gu_val = 0; \ - long __gu_err; \ - switch (sizeof(*(ptr))) { \ - case 1: \ -- __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \ -+ __get_user_asm("lbu", (ptr), x, __gu_err); \ - break; \ - case 2: \ -- __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \ -+ __get_user_asm("lhu", (ptr), x, __gu_err); \ - break; \ - case 4: \ -- __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ -+ __get_user_asm("lw", (ptr), x, __gu_err); \ - break; \ -- case 8: \ -- __gu_err = __copy_from_user(&__gu_val, ptr, 8); \ -- if (__gu_err) \ -- __gu_err = -EFAULT; \ -+ case 8: { \ -+ __u64 __x = 0; \ -+ __gu_err = raw_copy_from_user(&__x, ptr, 8) ? \ -+ -EFAULT : 0; \ -+ (x) = (typeof(x))(typeof((x) - (x)))__x; \ - break; \ -+ } \ - default: \ - /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\ - } \ -- x = (__force __typeof__(*(ptr))) __gu_val; \ - __gu_err; \ - }) - -diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c -index 908788497b287..fd153d5fab982 100644 ---- a/arch/microblaze/kernel/exceptions.c -+++ b/arch/microblaze/kernel/exceptions.c -@@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err) - pr_warn("Oops: %s, sig: %ld\n", str, err); - show_regs(fp); - spin_unlock_irq(&die_lock); -- /* do_exit() should take care of panic'ing from an interrupt -+ /* make_task_dead() should take care of panic'ing from an interrupt - * context so we don't handle it here - */ -- do_exit(err); -+ make_task_dead(err); - } - - /* for user application debugging */ -diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms -index 584081df89c28..6e3f36c841e5d 100644 ---- a/arch/mips/Kbuild.platforms -+++ b/arch/mips/Kbuild.platforms -@@ -38,4 +38,4 @@ platform-$(CONFIG_MACH_TX49XX) += txx9/ - platform-$(CONFIG_MACH_VR41XX) += vr41xx/ - - # include the platform specific files --include $(patsubst %, $(srctree)/arch/mips/%/Platform, $(platform-y)) -+include $(patsubst %/, $(srctree)/arch/mips/%/Platform, $(platform-y)) -diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig -index 6b8f591c5054c..13b09c7516e91 100644 ---- a/arch/mips/Kconfig -+++ b/arch/mips/Kconfig -@@ -4,6 +4,7 @@ config MIPS - default y - select ARCH_32BIT_OFF_T if !64BIT - select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT -+ select ARCH_HAS_CPU_FINALIZE_INIT - select ARCH_HAS_DEBUG_VIRTUAL if !64BIT - select ARCH_HAS_FORTIFY_SOURCE - select ARCH_HAS_KCOV -@@ -81,6 +82,7 @@ config MIPS - select HAVE_LD_DEAD_CODE_DATA_ELIMINATION - select HAVE_MOD_ARCH_SPECIFIC - select HAVE_NMI -+ select HAVE_PATA_PLATFORM - select HAVE_PERF_EVENTS - select HAVE_PERF_REGS - select HAVE_PERF_USER_STACK_DUMP -@@ -332,6 +334,9 @@ config BCM63XX - select SYS_SUPPORTS_32BIT_KERNEL - select SYS_SUPPORTS_BIG_ENDIAN - select SYS_HAS_EARLY_PRINTK -+ select SYS_HAS_CPU_BMIPS32_3300 -+ select SYS_HAS_CPU_BMIPS4350 -+ select SYS_HAS_CPU_BMIPS4380 - select SWAP_IO_SPACE - select GPIOLIB - select MIPS_L1_CACHE_SHIFT_4 -@@ -1379,6 +1384,7 @@ config CPU_LOONGSON64 - select MIPS_ASID_BITS_VARIABLE - select MIPS_PGD_C0_CONTEXT - select MIPS_L1_CACHE_SHIFT_6 -+ select MIPS_FP_SUPPORT - select GPIOLIB - select SWIOTLB - select HAVE_KVM -@@ -1989,6 +1995,10 @@ config SYS_HAS_CPU_MIPS64_R1 - config SYS_HAS_CPU_MIPS64_R2 - bool - -+config SYS_HAS_CPU_MIPS64_R5 -+ bool -+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT -+ - config SYS_HAS_CPU_MIPS64_R6 - bool - select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT -@@ -2153,7 +2163,7 @@ config CPU_SUPPORTS_ADDRWINCFG - bool - config CPU_SUPPORTS_HUGEPAGES - bool -- depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA)) -+ depends on !(32BIT && (PHYS_ADDR_T_64BIT || EVA)) - config MIPS_PGD_C0_CONTEXT - bool - depends on 64BIT -@@ -3185,7 +3195,7 @@ config STACKTRACE_SUPPORT - config PGTABLE_LEVELS - int - default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48 -- default 3 if 64BIT && !PAGE_SIZE_64KB -+ default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48) - default 2 - - config MIPS_AUTO_PFN_OFFSET -diff --git a/arch/mips/Makefile b/arch/mips/Makefile -index ea3cd080a1c7d..151e98698f763 100644 ---- a/arch/mips/Makefile -+++ b/arch/mips/Makefile -@@ -254,7 +254,9 @@ endif - # - # Board-dependent options and extra files - # -+ifdef need-compiler - include $(srctree)/arch/mips/Kbuild.platforms -+endif - - ifdef CONFIG_PHYSICAL_START - load-y = $(CONFIG_PHYSICAL_START) -@@ -277,8 +279,8 @@ ifdef CONFIG_64BIT - endif - endif - -- ifeq ($(KBUILD_SYM32)$(call cc-option-yn,-msym32), yy) -- cflags-y += -msym32 -DKBUILD_64BIT_SYM32 -+ ifeq ($(KBUILD_SYM32), y) -+ cflags-$(KBUILD_SYM32) += -msym32 -DKBUILD_64BIT_SYM32 - else - ifeq ($(CONFIG_CPU_DADDI_WORKAROUNDS), y) - $(error CONFIG_CPU_DADDI_WORKAROUNDS unsupported without -msym32) -@@ -319,7 +321,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables - - KBUILD_LDFLAGS += -m $(ld-emul) - --ifdef CONFIG_MIPS -+ifdef need-compiler - CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ - egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \ - sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g') -diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c -index 4ca2c28878e0f..e9ee9ab90a0c6 100644 ---- a/arch/mips/alchemy/common/dbdma.c -+++ b/arch/mips/alchemy/common/dbdma.c -@@ -30,6 +30,7 @@ - * - */ - -+#include /* for dma_default_coherent */ - #include - #include - #include -@@ -623,17 +624,18 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags) - dp->dscr_cmd0 &= ~DSCR_CMD0_IE; - - /* -- * There is an errata on the Au1200/Au1550 parts that could result -- * in "stale" data being DMA'ed. It has to do with the snoop logic on -- * the cache eviction buffer. DMA_NONCOHERENT is on by default for -- * these parts. If it is fixed in the future, these dma_cache_inv will -- * just be nothing more than empty macros. See io.h. -+ * There is an erratum on certain Au1200/Au1550 revisions that could -+ * result in "stale" data being DMA'ed. It has to do with the snoop -+ * logic on the cache eviction buffer. dma_default_coherent is set -+ * to false on these parts. - */ -- dma_cache_wback_inv((unsigned long)buf, nbytes); -+ if (!dma_default_coherent) -+ dma_cache_wback_inv(KSEG0ADDR(buf), nbytes); - dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ - wmb(); /* drain writebuffer */ - dma_cache_wback_inv((unsigned long)dp, sizeof(*dp)); - ctp->chan_ptr->ddma_dbell = 0; -+ wmb(); /* force doorbell write out to dma engine */ - - /* Get next descriptor pointer. */ - ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); -@@ -685,17 +687,18 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags) - dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1); - #endif - /* -- * There is an errata on the Au1200/Au1550 parts that could result in -- * "stale" data being DMA'ed. It has to do with the snoop logic on the -- * cache eviction buffer. DMA_NONCOHERENT is on by default for these -- * parts. If it is fixed in the future, these dma_cache_inv will just -- * be nothing more than empty macros. See io.h. -+ * There is an erratum on certain Au1200/Au1550 revisions that could -+ * result in "stale" data being DMA'ed. It has to do with the snoop -+ * logic on the cache eviction buffer. dma_default_coherent is set -+ * to false on these parts. - */ -- dma_cache_inv((unsigned long)buf, nbytes); -+ if (!dma_default_coherent) -+ dma_cache_inv(KSEG0ADDR(buf), nbytes); - dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ - wmb(); /* drain writebuffer */ - dma_cache_wback_inv((unsigned long)dp, sizeof(*dp)); - ctp->chan_ptr->ddma_dbell = 0; -+ wmb(); /* force doorbell write out to dma engine */ - - /* Get next descriptor pointer. */ - ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); -diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c -index 2c52ee27b4f25..50de86eb8784c 100644 ---- a/arch/mips/alchemy/devboards/db1000.c -+++ b/arch/mips/alchemy/devboards/db1000.c -@@ -14,7 +14,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -167,12 +166,7 @@ static struct platform_device db1x00_audio_dev = { - - static irqreturn_t db1100_mmc_cd(int irq, void *ptr) - { -- void (*mmc_cd)(struct mmc_host *, unsigned long); -- /* link against CONFIG_MMC=m */ -- mmc_cd = symbol_get(mmc_detect_change); -- mmc_cd(ptr, msecs_to_jiffies(500)); -- symbol_put(mmc_detect_change); -- -+ mmc_detect_change(ptr, msecs_to_jiffies(500)); - return IRQ_HANDLED; - } - -diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c -index 1864eb935ca57..76080c71a2a7b 100644 ---- a/arch/mips/alchemy/devboards/db1200.c -+++ b/arch/mips/alchemy/devboards/db1200.c -@@ -10,7 +10,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -340,14 +339,7 @@ static irqreturn_t db1200_mmc_cd(int irq, void *ptr) - - static irqreturn_t db1200_mmc_cdfn(int irq, void *ptr) - { -- void (*mmc_cd)(struct mmc_host *, unsigned long); -- -- /* link against CONFIG_MMC=m */ -- mmc_cd = symbol_get(mmc_detect_change); -- if (mmc_cd) { -- mmc_cd(ptr, msecs_to_jiffies(200)); -- symbol_put(mmc_detect_change); -- } -+ mmc_detect_change(ptr, msecs_to_jiffies(200)); - - msleep(100); /* debounce */ - if (irq == DB1200_SD0_INSERT_INT) -@@ -431,14 +423,7 @@ static irqreturn_t pb1200_mmc1_cd(int irq, void *ptr) - - static irqreturn_t pb1200_mmc1_cdfn(int irq, void *ptr) - { -- void (*mmc_cd)(struct mmc_host *, unsigned long); -- -- /* link against CONFIG_MMC=m */ -- mmc_cd = symbol_get(mmc_detect_change); -- if (mmc_cd) { -- mmc_cd(ptr, msecs_to_jiffies(200)); -- symbol_put(mmc_detect_change); -- } -+ mmc_detect_change(ptr, msecs_to_jiffies(200)); - - msleep(100); /* debounce */ - if (irq == PB1200_SD1_INSERT_INT) -diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c -index cd72eaa1168f7..ca71e5ed51abd 100644 ---- a/arch/mips/alchemy/devboards/db1300.c -+++ b/arch/mips/alchemy/devboards/db1300.c -@@ -17,7 +17,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -459,14 +458,7 @@ static irqreturn_t db1300_mmc_cd(int irq, void *ptr) - - static irqreturn_t db1300_mmc_cdfn(int irq, void *ptr) - { -- void (*mmc_cd)(struct mmc_host *, unsigned long); -- -- /* link against CONFIG_MMC=m. We can only be called once MMC core has -- * initialized the controller, so symbol_get() should always succeed. -- */ -- mmc_cd = symbol_get(mmc_detect_change); -- mmc_cd(ptr, msecs_to_jiffies(200)); -- symbol_put(mmc_detect_change); -+ mmc_detect_change(ptr, msecs_to_jiffies(200)); - - msleep(100); /* debounce */ - if (irq == DB1300_SD1_INSERT_INT) -diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c -index 0a63721d0fbf3..5a33d6b48d779 100644 ---- a/arch/mips/bcm47xx/prom.c -+++ b/arch/mips/bcm47xx/prom.c -@@ -86,7 +86,7 @@ static __init void prom_init_mem(void) - pr_debug("Assume 128MB RAM\n"); - break; - } -- if (!memcmp(prom_init, prom_init + mem, 32)) -+ if (!memcmp((void *)prom_init, (void *)prom_init + mem, 32)) - break; - } - lowmem = mem; -@@ -159,7 +159,7 @@ void __init bcm47xx_prom_highmem_init(void) - - off = EXTVBASE + __pa(off); - for (extmem = 128 << 20; extmem < 512 << 20; extmem <<= 1) { -- if (!memcmp(prom_init, (void *)(off + extmem), 16)) -+ if (!memcmp((void *)prom_init, (void *)(off + extmem), 16)) - break; - } - extmem -= lowmem; -diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c -index 5a3e325275d0d..86a6e25908664 100644 ---- a/arch/mips/bcm63xx/clk.c -+++ b/arch/mips/bcm63xx/clk.c -@@ -361,6 +361,8 @@ static struct clk clk_periph = { - */ - int clk_enable(struct clk *clk) - { -+ if (!clk) -+ return 0; - mutex_lock(&clocks_mutex); - clk_enable_unlocked(clk); - mutex_unlock(&clocks_mutex); -@@ -381,6 +383,18 @@ void clk_disable(struct clk *clk) - - EXPORT_SYMBOL(clk_disable); - -+struct clk *clk_get_parent(struct clk *clk) -+{ -+ return NULL; -+} -+EXPORT_SYMBOL(clk_get_parent); -+ -+int clk_set_parent(struct clk *clk, struct clk *parent) -+{ -+ return 0; -+} -+EXPORT_SYMBOL(clk_set_parent); -+ - unsigned long clk_get_rate(struct clk *clk) - { - if (!clk) -diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c -index 915ce4b189c15..76c5d8e4d6e2d 100644 ---- a/arch/mips/bmips/dma.c -+++ b/arch/mips/bmips/dma.c -@@ -64,6 +64,8 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) - return dma_addr; - } - -+bool bmips_rac_flush_disable; -+ - void arch_sync_dma_for_cpu_all(void) - { - void __iomem *cbr = BMIPS_GET_CBR(); -@@ -74,6 +76,9 @@ void arch_sync_dma_for_cpu_all(void) - boot_cpu_type() != CPU_BMIPS4380) - return; - -+ if (unlikely(bmips_rac_flush_disable)) -+ return; -+ - /* Flush stale data out of the readahead cache */ - cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); - __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG); -diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c -index 31bcfa4e08b99..45c7cf582348e 100644 ---- a/arch/mips/bmips/setup.c -+++ b/arch/mips/bmips/setup.c -@@ -34,6 +34,8 @@ - #define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c)) - #define BCM6328_TP1_DISABLED BIT(9) - -+extern bool bmips_rac_flush_disable; -+ - static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000; - - struct bmips_quirk { -@@ -103,6 +105,12 @@ static void bcm6358_quirks(void) - * disable SMP for now - */ - bmips_smp_enabled = 0; -+ -+ /* -+ * RAC flush causes kernel panics on BCM6358 when booting from TP1 -+ * because the bootloader is not initializing it properly. -+ */ -+ bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)); - } - - static void bcm6368_quirks(void) -diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile -index 3548b3b452699..705b9e7f8035a 100644 ---- a/arch/mips/boot/compressed/Makefile -+++ b/arch/mips/boot/compressed/Makefile -@@ -56,6 +56,8 @@ $(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c - - vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o - -+vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o $(obj)/clz_ctz.o -+ - extra-y += ashldi3.c - $(obj)/ashldi3.c: $(obj)/%.c: $(srctree)/lib/%.c FORCE - $(call if_changed,shipped) -@@ -64,6 +66,10 @@ extra-y += bswapsi.c - $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c FORCE - $(call if_changed,shipped) - -+extra-y += bswapdi.c -+$(obj)/bswapdi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c FORCE -+ $(call if_changed,shipped) -+ - targets := $(notdir $(vmlinuzobjs-y)) - - targets += vmlinux.bin -diff --git a/arch/mips/boot/compressed/clz_ctz.c b/arch/mips/boot/compressed/clz_ctz.c -new file mode 100644 -index 0000000000000..b4a1b6eb2f8ad ---- /dev/null -+++ b/arch/mips/boot/compressed/clz_ctz.c -@@ -0,0 +1,2 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+#include "../../../../lib/clz_ctz.c" -diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts -index a688809beebca..74d49dc134384 100644 ---- a/arch/mips/boot/dts/ingenic/ci20.dts -+++ b/arch/mips/boot/dts/ingenic/ci20.dts -@@ -99,7 +99,7 @@ - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - -- gpio = <&gpf 14 GPIO_ACTIVE_LOW>; -+ gpio = <&gpf 15 GPIO_ACTIVE_LOW>; - enable-active-high; - }; - }; -diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi -index 9e34f433b9b58..efbbddaf0fde5 100644 ---- a/arch/mips/boot/dts/ingenic/jz4780.dtsi -+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi -@@ -450,7 +450,7 @@ - #address-cells = <1>; - #size-cells = <1>; - -- eth0_addr: eth-mac-addr@0x22 { -+ eth0_addr: eth-mac-addr@22 { - reg = <0x22 0x6>; - }; - }; -diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c -index 1daa0c6b6f4ea..572a053e30ed5 100644 ---- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c -+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c -@@ -211,7 +211,7 @@ union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port) - { - union cvmx_helper_link_info result; - -- WARN(!octeon_is_simulation(), -+ WARN_ONCE(!octeon_is_simulation(), - "Using deprecated link status - please update your DT"); - - /* Unless we fix it later, all links are defaulted to down */ -diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c -index 6044ff4710022..a18ad2daf0052 100644 ---- a/arch/mips/cavium-octeon/executive/cvmx-helper.c -+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c -@@ -1100,7 +1100,7 @@ union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port) - if (index == 0) - result = __cvmx_helper_rgmii_link_get(ipd_port); - else { -- WARN(1, "Using deprecated link status - please update your DT"); -+ WARN_ONCE(1, "Using deprecated link status - please update your DT"); - result.s.full_duplex = 1; - result.s.link_up = 1; - result.s.speed = 1000; -diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c -index be5d4afcd30f9..353dfeee0a6d3 100644 ---- a/arch/mips/cavium-octeon/octeon-irq.c -+++ b/arch/mips/cavium-octeon/octeon-irq.c -@@ -127,6 +127,16 @@ static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) - static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, - int irq, int line, int bit) - { -+ struct device_node *of_node; -+ int ret; -+ -+ of_node = irq_domain_get_of_node(domain); -+ if (!of_node) -+ return -EINVAL; -+ ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node)); -+ if (ret < 0) -+ return ret; -+ - return irq_domain_associate(domain, irq, line << 6 | bit); - } - -diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S -index 0a515cde1c183..25860fba6218d 100644 ---- a/arch/mips/cavium-octeon/octeon-memcpy.S -+++ b/arch/mips/cavium-octeon/octeon-memcpy.S -@@ -74,7 +74,7 @@ - #define EXC(inst_reg,addr,handler) \ - 9: inst_reg, addr; \ - .section __ex_table,"a"; \ -- PTR 9b, handler; \ -+ PTR_WD 9b, handler; \ - .previous - - /* -diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c -index d56e9b9d2e434..ce05c0dd3acd7 100644 ---- a/arch/mips/cavium-octeon/octeon-platform.c -+++ b/arch/mips/cavium-octeon/octeon-platform.c -@@ -86,11 +86,12 @@ static void octeon2_usb_clocks_start(struct device *dev) - "refclk-frequency", &clock_rate); - if (i) { - dev_err(dev, "No UCTL \"refclk-frequency\"\n"); -+ of_node_put(uctl_node); - goto exit; - } - i = of_property_read_string(uctl_node, - "refclk-type", &clock_type); -- -+ of_node_put(uctl_node); - if (!i && strcmp("crystal", clock_type) == 0) - is_crystal_clock = true; - } -@@ -328,6 +329,7 @@ static int __init octeon_ehci_device_init(void) - - pd->dev.platform_data = &octeon_ehci_pdata; - octeon_ehci_hw_start(&pd->dev); -+ put_device(&pd->dev); - - return ret; - } -@@ -391,6 +393,7 @@ static int __init octeon_ohci_device_init(void) - - pd->dev.platform_data = &octeon_ohci_pdata; - octeon_ohci_hw_start(&pd->dev); -+ put_device(&pd->dev); - - return ret; - } -diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c -index 6e4d3619137af..4df919d26b082 100644 ---- a/arch/mips/cavium-octeon/octeon-usb.c -+++ b/arch/mips/cavium-octeon/octeon-usb.c -@@ -537,6 +537,7 @@ static int __init dwc3_octeon_device_init(void) - devm_iounmap(&pdev->dev, base); - devm_release_mem_region(&pdev->dev, res->start, - resource_size(res)); -+ put_device(&pdev->dev); - } - } while (node != NULL); - -diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig -index 85f1955b4b004..4a81297e21a72 100644 ---- a/arch/mips/configs/decstation_64_defconfig -+++ b/arch/mips/configs/decstation_64_defconfig -@@ -53,8 +53,6 @@ CONFIG_IPV6_SUBTREES=y - CONFIG_NETWORK_SECMARK=y - CONFIG_IP_SCTP=m - CONFIG_VLAN_8021Q=m --CONFIG_DECNET=m --CONFIG_DECNET_ROUTER=y - # CONFIG_WIRELESS is not set - # CONFIG_UEVENT_HELPER is not set - # CONFIG_FW_LOADER is not set -diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig -index 30a6eafdb1d01..fd35454bae4ce 100644 ---- a/arch/mips/configs/decstation_defconfig -+++ b/arch/mips/configs/decstation_defconfig -@@ -49,8 +49,6 @@ CONFIG_IPV6_SUBTREES=y - CONFIG_NETWORK_SECMARK=y - CONFIG_IP_SCTP=m - CONFIG_VLAN_8021Q=m --CONFIG_DECNET=m --CONFIG_DECNET_ROUTER=y - # CONFIG_WIRELESS is not set - # CONFIG_UEVENT_HELPER is not set - # CONFIG_FW_LOADER is not set -diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig -index e2b58dbf4aa9a..7ed8f4c7cbdd9 100644 ---- a/arch/mips/configs/decstation_r4k_defconfig -+++ b/arch/mips/configs/decstation_r4k_defconfig -@@ -48,8 +48,6 @@ CONFIG_IPV6_SUBTREES=y - CONFIG_NETWORK_SECMARK=y - CONFIG_IP_SCTP=m - CONFIG_VLAN_8021Q=m --CONFIG_DECNET=m --CONFIG_DECNET_ROUTER=y - # CONFIG_WIRELESS is not set - # CONFIG_UEVENT_HELPER is not set - # CONFIG_FW_LOADER is not set -diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig -index 5c24ac7fdf56d..ba47c5e929b7f 100644 ---- a/arch/mips/configs/fuloong2e_defconfig -+++ b/arch/mips/configs/fuloong2e_defconfig -@@ -206,7 +206,6 @@ CONFIG_NFSD_V3_ACL=y - CONFIG_NFSD_V4=y - CONFIG_CIFS=m - CONFIG_CIFS_STATS2=y --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y - CONFIG_CIFS_DEBUG2=y -diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig -index 5cb91509bb7cf..fc2e9b50d2862 100644 ---- a/arch/mips/configs/gpr_defconfig -+++ b/arch/mips/configs/gpr_defconfig -@@ -69,7 +69,6 @@ CONFIG_IP_NF_RAW=m - CONFIG_IP_NF_ARPTABLES=m - CONFIG_IP_NF_ARPFILTER=m - CONFIG_IP_NF_ARP_MANGLE=m --CONFIG_DECNET_NF_GRABULATOR=m - CONFIG_BRIDGE_NF_EBTABLES=m - CONFIG_BRIDGE_EBT_BROUTE=m - CONFIG_BRIDGE_EBT_T_FILTER=m -@@ -99,7 +98,6 @@ CONFIG_ATM_MPOA=m - CONFIG_ATM_BR2684=m - CONFIG_BRIDGE=m - CONFIG_VLAN_8021Q=m --CONFIG_DECNET=m - CONFIG_LLC2=m - CONFIG_ATALK=m - CONFIG_DEV_APPLETALK=m -diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig -index 614af02d83e6e..6fb9bc29f4a03 100644 ---- a/arch/mips/configs/malta_qemu_32r6_defconfig -+++ b/arch/mips/configs/malta_qemu_32r6_defconfig -@@ -165,7 +165,6 @@ CONFIG_TMPFS=y - CONFIG_NFS_FS=y - CONFIG_ROOT_NFS=y - CONFIG_CIFS=m --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y - CONFIG_NLS_CODEPAGE_437=m -diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig -index 9c051f8fd3300..eb72df528243a 100644 ---- a/arch/mips/configs/maltaaprp_defconfig -+++ b/arch/mips/configs/maltaaprp_defconfig -@@ -166,7 +166,6 @@ CONFIG_TMPFS=y - CONFIG_NFS_FS=y - CONFIG_ROOT_NFS=y - CONFIG_CIFS=m --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y - CONFIG_NLS_CODEPAGE_437=m -diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig -index 2e90d97551d6f..1fb40d310f49c 100644 ---- a/arch/mips/configs/maltasmvp_defconfig -+++ b/arch/mips/configs/maltasmvp_defconfig -@@ -167,7 +167,6 @@ CONFIG_TMPFS=y - CONFIG_NFS_FS=y - CONFIG_ROOT_NFS=y - CONFIG_CIFS=m --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y - CONFIG_NLS_CODEPAGE_437=m -diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig -index d1f7fdb27284b..75cb778c61496 100644 ---- a/arch/mips/configs/maltasmvp_eva_defconfig -+++ b/arch/mips/configs/maltasmvp_eva_defconfig -@@ -169,7 +169,6 @@ CONFIG_TMPFS=y - CONFIG_NFS_FS=y - CONFIG_ROOT_NFS=y - CONFIG_CIFS=m --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y - CONFIG_NLS_CODEPAGE_437=m -diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig -index 48e5bd4924522..7b4f247dc60cc 100644 ---- a/arch/mips/configs/maltaup_defconfig -+++ b/arch/mips/configs/maltaup_defconfig -@@ -165,7 +165,6 @@ CONFIG_TMPFS=y - CONFIG_NFS_FS=y - CONFIG_ROOT_NFS=y - CONFIG_CIFS=m --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y - CONFIG_NLS_CODEPAGE_437=m -diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig -index 205d3b34528c3..42e75cb72e6be 100644 ---- a/arch/mips/configs/mtx1_defconfig -+++ b/arch/mips/configs/mtx1_defconfig -@@ -116,7 +116,6 @@ CONFIG_IP6_NF_FILTER=m - CONFIG_IP6_NF_TARGET_REJECT=m - CONFIG_IP6_NF_MANGLE=m - CONFIG_IP6_NF_RAW=m --CONFIG_DECNET_NF_GRABULATOR=m - CONFIG_BRIDGE_NF_EBTABLES=m - CONFIG_BRIDGE_EBT_BROUTE=m - CONFIG_BRIDGE_EBT_T_FILTER=m -@@ -146,7 +145,6 @@ CONFIG_ATM_MPOA=m - CONFIG_ATM_BR2684=m - CONFIG_BRIDGE=m - CONFIG_VLAN_8021Q=m --CONFIG_DECNET=m - CONFIG_LLC2=m - CONFIG_ATALK=m - CONFIG_DEV_APPLETALK=m -diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig -index 32c2906117232..3700c6ccd80ba 100644 ---- a/arch/mips/configs/nlm_xlp_defconfig -+++ b/arch/mips/configs/nlm_xlp_defconfig -@@ -200,7 +200,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m - CONFIG_IP6_NF_MANGLE=m - CONFIG_IP6_NF_RAW=m - CONFIG_IP6_NF_SECURITY=m --CONFIG_DECNET_NF_GRABULATOR=m - CONFIG_BRIDGE_NF_EBTABLES=m - CONFIG_BRIDGE_EBT_BROUTE=m - CONFIG_BRIDGE_EBT_T_FILTER=m -@@ -234,7 +233,6 @@ CONFIG_ATM_BR2684=m - CONFIG_BRIDGE=m - CONFIG_VLAN_8021Q=m - CONFIG_VLAN_8021Q_GVRP=y --CONFIG_DECNET=m - CONFIG_LLC2=m - CONFIG_ATALK=m - CONFIG_DEV_APPLETALK=m -diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig -index bf9b9244929ec..00e7264789a4c 100644 ---- a/arch/mips/configs/nlm_xlr_defconfig -+++ b/arch/mips/configs/nlm_xlr_defconfig -@@ -198,7 +198,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m - CONFIG_IP6_NF_MANGLE=m - CONFIG_IP6_NF_RAW=m - CONFIG_IP6_NF_SECURITY=m --CONFIG_DECNET_NF_GRABULATOR=m - CONFIG_BRIDGE_NF_EBTABLES=m - CONFIG_BRIDGE_EBT_BROUTE=m - CONFIG_BRIDGE_EBT_T_FILTER=m -@@ -232,7 +231,6 @@ CONFIG_ATM_BR2684=m - CONFIG_BRIDGE=m - CONFIG_VLAN_8021Q=m - CONFIG_VLAN_8021Q_GVRP=y --CONFIG_DECNET=m - CONFIG_LLC2=m - CONFIG_ATALK=m - CONFIG_DEV_APPLETALK=m -diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig -index 3dc2da2bee0df..aa02825b3ed87 100644 ---- a/arch/mips/configs/rm200_defconfig -+++ b/arch/mips/configs/rm200_defconfig -@@ -116,7 +116,6 @@ CONFIG_IP6_NF_FILTER=m - CONFIG_IP6_NF_TARGET_REJECT=m - CONFIG_IP6_NF_MANGLE=m - CONFIG_IP6_NF_RAW=m --CONFIG_DECNET_NF_GRABULATOR=m - CONFIG_BRIDGE_NF_EBTABLES=m - CONFIG_BRIDGE_EBT_BROUTE=m - CONFIG_BRIDGE_EBT_T_FILTER=m -@@ -137,7 +136,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m - CONFIG_BRIDGE_EBT_SNAT=m - CONFIG_BRIDGE_EBT_LOG=m - CONFIG_BRIDGE=m --CONFIG_DECNET=m - CONFIG_NET_SCHED=y - CONFIG_NET_SCH_CBQ=m - CONFIG_NET_SCH_HTB=m -diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S -index ea5b5a83f1e11..011d1d678840a 100644 ---- a/arch/mips/dec/int-handler.S -+++ b/arch/mips/dec/int-handler.S -@@ -131,7 +131,7 @@ - */ - mfc0 t0,CP0_CAUSE # get pending interrupts - mfc0 t1,CP0_STATUS --#ifdef CONFIG_32BIT -+#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) - lw t2,cpu_fpu_mask - #endif - andi t0,ST0_IM # CAUSE.CE may be non-zero! -@@ -139,7 +139,7 @@ - - beqz t0,spurious - --#ifdef CONFIG_32BIT -+#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) - and t2,t0 - bnez t2,fpu # handle FPU immediately - #endif -@@ -280,7 +280,7 @@ handle_it: - j dec_irq_dispatch - nop - --#ifdef CONFIG_32BIT -+#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) - fpu: - lw t0,fpu_kstat_irq - nop -diff --git a/arch/mips/dec/prom/Makefile b/arch/mips/dec/prom/Makefile -index d95016016b42b..2bad87551203b 100644 ---- a/arch/mips/dec/prom/Makefile -+++ b/arch/mips/dec/prom/Makefile -@@ -6,4 +6,4 @@ - - lib-y += init.o memory.o cmdline.o identify.o console.o - --lib-$(CONFIG_32BIT) += locore.o -+lib-$(CONFIG_CPU_R3000) += locore.o -diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c -index eaad0ed4b523b..99b9b29750db3 100644 ---- a/arch/mips/dec/setup.c -+++ b/arch/mips/dec/setup.c -@@ -746,7 +746,8 @@ void __init arch_init_irq(void) - dec_interrupt[DEC_IRQ_HALT] = -1; - - /* Register board interrupts: FPU and cascade. */ -- if (dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) { -+ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT) && -+ dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) { - struct irq_desc *desc_fpu; - int irq_fpu; - -diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c -index f24cbb4a39b50..892765b742bbc 100644 ---- a/arch/mips/fw/lib/cmdline.c -+++ b/arch/mips/fw/lib/cmdline.c -@@ -53,7 +53,7 @@ char *fw_getenv(char *envname) - { - char *result = NULL; - -- if (_fw_envp != NULL) { -+ if (_fw_envp != NULL && fw_envp(0) != NULL) { - /* - * Return a pointer to the given environment variable. - * YAMON uses "name", "value" pairs, while U-Boot uses -diff --git a/arch/mips/generic/yamon-dt.c b/arch/mips/generic/yamon-dt.c -index a3aa22c77cadc..a07a5edbcda78 100644 ---- a/arch/mips/generic/yamon-dt.c -+++ b/arch/mips/generic/yamon-dt.c -@@ -75,7 +75,7 @@ static unsigned int __init gen_fdt_mem_array( - __init int yamon_dt_append_memory(void *fdt, - const struct yamon_mem_region *regions) - { -- unsigned long phys_memsize, memsize; -+ unsigned long phys_memsize = 0, memsize; - __be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES]; - unsigned int mem_entries; - int i, err, mem_off; -diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h -index 2f8ce94ebaafe..cc69f1deb1ca8 100644 ---- a/arch/mips/include/asm/asm.h -+++ b/arch/mips/include/asm/asm.h -@@ -276,7 +276,7 @@ symbol = value - - #define PTR_SCALESHIFT 2 - --#define PTR .word -+#define PTR_WD .word - #define PTRSIZE 4 - #define PTRLOG 2 - #endif -@@ -301,7 +301,7 @@ symbol = value - - #define PTR_SCALESHIFT 3 - --#define PTR .dword -+#define PTR_WD .dword - #define PTRSIZE 8 - #define PTRLOG 3 - #endif -diff --git a/arch/mips/include/asm/bugs.h b/arch/mips/include/asm/bugs.h -index d72dc6e1cf3cd..8d4cf29861b87 100644 ---- a/arch/mips/include/asm/bugs.h -+++ b/arch/mips/include/asm/bugs.h -@@ -1,17 +1,11 @@ - /* SPDX-License-Identifier: GPL-2.0 */ - /* -- * This is included by init/main.c to check for architecture-dependent bugs. -- * - * Copyright (C) 2007 Maciej W. Rozycki -- * -- * Needs: -- * void check_bugs(void); - */ - #ifndef _ASM_BUGS_H - #define _ASM_BUGS_H - - #include --#include - #include - - #include -@@ -30,17 +24,6 @@ static inline void check_bugs_early(void) - check_bugs64_early(); - } - --static inline void check_bugs(void) --{ -- unsigned int cpu = smp_processor_id(); -- -- cpu_data[cpu].udelay_val = loops_per_jiffy; -- check_bugs32(); -- -- if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) -- check_bugs64(); --} -- - static inline int r4k_daddiu_bug(void) - { - if (!IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) -diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h -index 0b983800f48b7..66a8b293fd80b 100644 ---- a/arch/mips/include/asm/cmpxchg.h -+++ b/arch/mips/include/asm/cmpxchg.h -@@ -249,6 +249,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, - /* Load 64 bits from ptr */ - " " __SYNC(full, loongson3_war) " \n" - "1: lld %L0, %3 # __cmpxchg64 \n" -+ " .set pop \n" - /* - * Split the 64 bit value we loaded into the 2 registers that hold the - * ret variable. -@@ -276,12 +277,14 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, - " or %L1, %L1, $at \n" - " .set at \n" - # endif -+ " .set push \n" -+ " .set " MIPS_ISA_ARCH_LEVEL " \n" - /* Attempt to store new at ptr */ - " scd %L1, %2 \n" - /* If we failed, loop! */ - "\t" __SC_BEQZ "%L1, 1b \n" -- " .set pop \n" - "2: " __SYNC(full, loongson3_war) " \n" -+ " .set pop \n" - : "=&r"(ret), - "=&r"(tmp), - "=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr) -diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h -index 3d71081afc55f..e69833213e792 100644 ---- a/arch/mips/include/asm/cpu-features.h -+++ b/arch/mips/include/asm/cpu-features.h -@@ -124,7 +124,24 @@ - #define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE) - #endif - #ifndef cpu_has_octeon_cache --#define cpu_has_octeon_cache 0 -+#define cpu_has_octeon_cache \ -+({ \ -+ int __res; \ -+ \ -+ switch (boot_cpu_type()) { \ -+ case CPU_CAVIUM_OCTEON: \ -+ case CPU_CAVIUM_OCTEON_PLUS: \ -+ case CPU_CAVIUM_OCTEON2: \ -+ case CPU_CAVIUM_OCTEON3: \ -+ __res = 1; \ -+ break; \ -+ \ -+ default: \ -+ __res = 0; \ -+ } \ -+ \ -+ __res; \ -+}) - #endif - /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */ - #ifndef cpu_has_fpu -@@ -351,7 +368,7 @@ - ({ \ - int __res; \ - \ -- switch (current_cpu_type()) { \ -+ switch (boot_cpu_type()) { \ - case CPU_M14KC: \ - case CPU_74K: \ - case CPU_1074K: \ -diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h -index 62c7dfb90e06c..908e96e3a3117 100644 ---- a/arch/mips/include/asm/dec/prom.h -+++ b/arch/mips/include/asm/dec/prom.h -@@ -43,16 +43,11 @@ - */ - #define REX_PROM_MAGIC 0x30464354 - --#ifdef CONFIG_64BIT -- --#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */ -- --#else /* !CONFIG_64BIT */ -- --#define prom_is_rex(magic) ((magic) == REX_PROM_MAGIC) -- --#endif /* !CONFIG_64BIT */ -- -+/* KN04 and KN05 are REX PROMs, so only do the check for R3k systems. */ -+static inline bool prom_is_rex(u32 magic) -+{ -+ return !IS_ENABLED(CONFIG_CPU_R3000) || magic == REX_PROM_MAGIC; -+} - - /* - * 3MIN/MAXINE PROM entry points for DS5000/1xx's, DS5000/xx's and -@@ -75,7 +70,7 @@ - */ - typedef struct { - int pagesize; -- unsigned char bitmap[0]; -+ unsigned char bitmap[]; - } memmap; - - -diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h -index b463f2aa5a613..db497a8167da2 100644 ---- a/arch/mips/include/asm/ftrace.h -+++ b/arch/mips/include/asm/ftrace.h -@@ -32,7 +32,7 @@ do { \ - ".previous\n" \ - \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR) "\t1b, 3b\n\t" \ -+ STR(PTR_WD) "\t1b, 3b\n\t" \ - ".previous\n" \ - \ - : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\ -@@ -54,7 +54,7 @@ do { \ - ".previous\n" \ - \ - ".section\t__ex_table,\"a\"\n\t"\ -- STR(PTR) "\t1b, 3b\n\t" \ -+ STR(PTR_WD) "\t1b, 3b\n\t" \ - ".previous\n" \ - \ - : [tmp_err] "=r" (error) \ -diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h -index d0ef8b4892bbe..d0494ce4b3373 100644 ---- a/arch/mips/include/asm/fw/fw.h -+++ b/arch/mips/include/asm/fw/fw.h -@@ -26,6 +26,6 @@ extern char *fw_getcmdline(void); - extern void fw_meminit(void); - extern char *fw_getenv(char *name); - extern unsigned long fw_getenvl(char *name); --extern void fw_init_early_console(char port); -+extern void fw_init_early_console(void); - - #endif /* __ASM_FW_H_ */ -diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h -index 696f6b0093776..cfd9e15817774 100644 ---- a/arch/mips/include/asm/kvm_host.h -+++ b/arch/mips/include/asm/kvm_host.h -@@ -318,7 +318,7 @@ struct kvm_vcpu_arch { - unsigned int aux_inuse; - - /* COP0 State */ -- struct mips_coproc *cop0; -+ struct mips_coproc cop0; - - /* Resume PC after MMIO completion */ - unsigned long io_pc; -@@ -699,7 +699,7 @@ static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) - static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) - { - return kvm_mips_guest_can_have_fpu(vcpu) && -- kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; -+ kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP; - } - - static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) -@@ -711,7 +711,7 @@ static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) - static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) - { - return kvm_mips_guest_can_have_msa(vcpu) && -- kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; -+ kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA; - } - - struct kvm_mips_callbacks { -diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h -index ecda7295ddcd1..3fa6340903882 100644 ---- a/arch/mips/include/asm/local.h -+++ b/arch/mips/include/asm/local.h -@@ -5,6 +5,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -39,7 +40,7 @@ static __inline__ long local_add_return(long i, local_t * l) - " .set arch=r4000 \n" - __SYNC(full, loongson3_war) " \n" - "1:" __LL "%1, %2 # local_add_return \n" -- " addu %0, %1, %3 \n" -+ __stringify(LONG_ADDU) " %0, %1, %3 \n" - __SC "%0, %2 \n" - " beqzl %0, 1b \n" - " addu %0, %1, %3 \n" -@@ -55,7 +56,7 @@ static __inline__ long local_add_return(long i, local_t * l) - " .set "MIPS_ISA_ARCH_LEVEL" \n" - __SYNC(full, loongson3_war) " \n" - "1:" __LL "%1, %2 # local_add_return \n" -- " addu %0, %1, %3 \n" -+ __stringify(LONG_ADDU) " %0, %1, %3 \n" - __SC "%0, %2 \n" - " beqz %0, 1b \n" - " addu %0, %1, %3 \n" -@@ -88,7 +89,7 @@ static __inline__ long local_sub_return(long i, local_t * l) - " .set arch=r4000 \n" - __SYNC(full, loongson3_war) " \n" - "1:" __LL "%1, %2 # local_sub_return \n" -- " subu %0, %1, %3 \n" -+ __stringify(LONG_SUBU) " %0, %1, %3 \n" - __SC "%0, %2 \n" - " beqzl %0, 1b \n" - " subu %0, %1, %3 \n" -@@ -104,7 +105,7 @@ static __inline__ long local_sub_return(long i, local_t * l) - " .set "MIPS_ISA_ARCH_LEVEL" \n" - __SYNC(full, loongson3_war) " \n" - "1:" __LL "%1, %2 # local_sub_return \n" -- " subu %0, %1, %3 \n" -+ __stringify(LONG_SUBU) " %0, %1, %3 \n" - __SC "%0, %2 \n" - " beqz %0, 1b \n" - " subu %0, %1, %3 \n" -diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h -index 58f829c9b6c70..79d6fd249583f 100644 ---- a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h -+++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h -@@ -26,7 +26,6 @@ - #define cpu_has_3k_cache 0 - #define cpu_has_4k_cache 1 - #define cpu_has_tx39_cache 0 --#define cpu_has_fpu 1 - #define cpu_has_nofpuex 0 - #define cpu_has_32fpr 1 - #define cpu_has_counter 1 -diff --git a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h -index 49a93e82c2528..2635b6ba1cb54 100644 ---- a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h -+++ b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h -@@ -29,7 +29,6 @@ - #define cpu_has_3k_cache 0 - #define cpu_has_4k_cache 1 - #define cpu_has_tx39_cache 0 --#define cpu_has_fpu 1 - #define cpu_has_nofpuex 0 - #define cpu_has_32fpr 1 - #define cpu_has_counter 1 -diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h -index 13373c5144f89..efb41b3519747 100644 ---- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h -+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h -@@ -32,7 +32,7 @@ - nop - /* Loongson-3A R2/R3 */ - andi t0, (PRID_IMP_MASK | PRID_REV_MASK) -- slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) -+ slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) - bnez t0, 2f - nop - 1: -@@ -63,7 +63,7 @@ - nop - /* Loongson-3A R2/R3 */ - andi t0, (PRID_IMP_MASK | PRID_REV_MASK) -- slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) -+ slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) - bnez t0, 2f - nop - 1: -diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h -index 6bbf082dd149e..79d5bb0e06d63 100644 ---- a/arch/mips/include/asm/mach-ralink/mt7621.h -+++ b/arch/mips/include/asm/mach-ralink/mt7621.h -@@ -7,10 +7,12 @@ - #ifndef _MT7621_REGS_H_ - #define _MT7621_REGS_H_ - -+#define IOMEM(x) ((void __iomem *)(KSEG1ADDR(x))) -+ - #define MT7621_PALMBUS_BASE 0x1C000000 - #define MT7621_PALMBUS_SIZE 0x03FFFFFF - --#define MT7621_SYSC_BASE 0x1E000000 -+#define MT7621_SYSC_BASE IOMEM(0x1E000000) - - #define SYSC_REG_CHIP_NAME0 0x00 - #define SYSC_REG_CHIP_NAME1 0x04 -diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h -index 9a6eefd127571..3eb767c8a4eec 100644 ---- a/arch/mips/include/asm/mach-rc32434/pci.h -+++ b/arch/mips/include/asm/mach-rc32434/pci.h -@@ -374,7 +374,7 @@ struct pci_msu { - PCI_CFG04_STAT_SSE | \ - PCI_CFG04_STAT_PE) - --#define KORINA_CNFG1 ((KORINA_STAT<<16)|KORINA_CMD) -+#define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD) - - #define KORINA_REVID 0 - #define KORINA_CLASS_CODE 0 -diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h -index aeae2effa123d..23c67c0871b17 100644 ---- a/arch/mips/include/asm/mips-cm.h -+++ b/arch/mips/include/asm/mips-cm.h -@@ -11,6 +11,7 @@ - #ifndef __MIPS_ASM_MIPS_CM_H__ - #define __MIPS_ASM_MIPS_CM_H__ - -+#include - #include - #include - -@@ -153,8 +154,8 @@ GCR_ACCESSOR_RO(32, 0x030, rev) - #define CM_GCR_REV_MINOR GENMASK(7, 0) - - #define CM_ENCODE_REV(major, minor) \ -- (((major) << __ffs(CM_GCR_REV_MAJOR)) | \ -- ((minor) << __ffs(CM_GCR_REV_MINOR))) -+ (FIELD_PREP(CM_GCR_REV_MAJOR, major) | \ -+ FIELD_PREP(CM_GCR_REV_MINOR, minor)) - - #define CM_REV_CM2 CM_ENCODE_REV(6, 0) - #define CM_REV_CM2_5 CM_ENCODE_REV(7, 0) -@@ -362,10 +363,10 @@ static inline int mips_cm_revision(void) - static inline unsigned int mips_cm_max_vp_width(void) - { - extern int smp_num_siblings; -- uint32_t cfg; - - if (mips_cm_revision() >= CM_REV_CM3) -- return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW; -+ return FIELD_GET(CM_GCR_SYS_CONFIG2_MAXVPW, -+ read_gcr_sys_config2()); - - if (mips_cm_present()) { - /* -@@ -373,8 +374,7 @@ static inline unsigned int mips_cm_max_vp_width(void) - * number of VP(E)s, and if that ever changes then this will - * need revisiting. - */ -- cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE; -- return (cfg >> __ffs(CM_GCR_Cx_CONFIG_PVPE)) + 1; -+ return FIELD_GET(CM_GCR_Cx_CONFIG_PVPE, read_gcr_cl_config()) + 1; - } - - if (IS_ENABLED(CONFIG_SMP)) -diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h -index 0e6bf220db618..6c61e0a639249 100644 ---- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h -+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h -@@ -318,7 +318,7 @@ enum cvmx_chip_types_enum { - - /* Functions to return string based on type */ - #define ENUM_BRD_TYPE_CASE(x) \ -- case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */ -+ case x: return (&#x[16]); /* Skip CVMX_BOARD_TYPE_ */ - static inline const char *cvmx_board_type_to_string(enum - cvmx_board_types_enum type) - { -@@ -410,7 +410,7 @@ static inline const char *cvmx_board_type_to_string(enum - } - - #define ENUM_CHIP_TYPE_CASE(x) \ -- case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */ -+ case x: return (&#x[15]); /* Skip CVMX_CHIP_TYPE */ - static inline const char *cvmx_chip_type_to_string(enum - cvmx_chip_types_enum type) - { -diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h -index c7925d0e98746..867e9c3db76e9 100644 ---- a/arch/mips/include/asm/pgalloc.h -+++ b/arch/mips/include/asm/pgalloc.h -@@ -15,6 +15,7 @@ - - #define __HAVE_ARCH_PMD_ALLOC_ONE - #define __HAVE_ARCH_PUD_ALLOC_ONE -+#define __HAVE_ARCH_PGD_FREE - #include - - static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, -@@ -48,6 +49,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) - extern void pgd_init(unsigned long page); - extern pgd_t *pgd_alloc(struct mm_struct *mm); - -+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -+{ -+ free_pages((unsigned long)pgd, PGD_ORDER); -+} -+ - #define __pte_free_tlb(tlb,pte,address) \ - do { \ - pgtable_pte_page_dtor(pte); \ -diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h -index af3788589ee6d..431a1c9d53fc7 100644 ---- a/arch/mips/include/asm/r4kcache.h -+++ b/arch/mips/include/asm/r4kcache.h -@@ -119,7 +119,7 @@ static inline void flush_scache_line(unsigned long addr) - " j 2b \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ -- " "STR(PTR)" 1b, 3b \n" \ -+ " "STR(PTR_WD)" 1b, 3b \n" \ - " .previous" \ - : "+r" (__err) \ - : "i" (op), "r" (addr), "i" (-EFAULT)); \ -@@ -142,7 +142,7 @@ static inline void flush_scache_line(unsigned long addr) - " j 2b \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ -- " "STR(PTR)" 1b, 3b \n" \ -+ " "STR(PTR_WD)" 1b, 3b \n" \ - " .previous" \ - : "+r" (__err) \ - : "i" (op), "r" (addr), "i" (-EFAULT)); \ -diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h -index bb36a400203df..8c56b862fd9c2 100644 ---- a/arch/mips/include/asm/setup.h -+++ b/arch/mips/include/asm/setup.h -@@ -16,7 +16,7 @@ static inline void setup_8250_early_printk_port(unsigned long base, - unsigned int reg_shift, unsigned int timeout) {} - #endif - --extern void set_handler(unsigned long offset, void *addr, unsigned long len); -+void set_handler(unsigned long offset, const void *addr, unsigned long len); - extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); - - typedef void (*vi_handler_t)(void); -diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h -index 25fa651c937d5..ebdf4d910af2f 100644 ---- a/arch/mips/include/asm/syscall.h -+++ b/arch/mips/include/asm/syscall.h -@@ -38,7 +38,7 @@ static inline bool mips_syscall_is_indirect(struct task_struct *task, - static inline long syscall_get_nr(struct task_struct *task, - struct pt_regs *regs) - { -- return current_thread_info()->syscall; -+ return task_thread_info(task)->syscall; - } - - static inline void mips_syscall_update_nr(struct task_struct *task, -diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h -index b05bb70a2e46f..2e107886f97ac 100644 ---- a/arch/mips/include/asm/timex.h -+++ b/arch/mips/include/asm/timex.h -@@ -40,9 +40,9 @@ - typedef unsigned int cycles_t; - - /* -- * On R4000/R4400 before version 5.0 an erratum exists such that if the -- * cycle counter is read in the exact moment that it is matching the -- * compare register, no interrupt will be generated. -+ * On R4000/R4400 an erratum exists such that if the cycle counter is -+ * read in the exact moment that it is matching the compare register, -+ * no interrupt will be generated. - * - * There is a suggested workaround and also the erratum can't strike if - * the compare interrupt isn't being used as the clock source device. -@@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid) - if (!__builtin_constant_p(cpu_has_counter)) - asm volatile("" : "=m" (cpu_data[0].options)); - if (likely(cpu_has_counter && -- prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0)))) -+ prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15)))) - return 1; - else - return 0; -@@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void) - else - return 0; /* no usable counter */ - } -+#define get_cycles get_cycles - - /* - * Like get_cycles - but where c0_count is not available we desperately - * use c0_random in an attempt to get at least a little bit of entropy. -- * -- * R6000 and R6000A neither have a count register nor a random register. -- * That leaves no entropy source in the CPU itself. - */ - static inline unsigned long random_get_entropy(void) - { -- unsigned int prid = read_c0_prid(); -- unsigned int imp = prid & PRID_IMP_MASK; -+ unsigned int c0_random; - -- if (can_use_mips_counter(prid)) -+ if (can_use_mips_counter(read_c0_prid())) - return read_c0_count(); -- else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A)) -- return read_c0_random(); -+ -+ if (cpu_has_3kex) -+ c0_random = (read_c0_random() >> 8) & 0x3f; - else -- return 0; /* no usable register */ -+ c0_random = read_c0_random() & 0x3f; -+ return (random_get_entropy_fallback() << 6) | (0x3f - c0_random); - } - #define random_get_entropy random_get_entropy - -diff --git a/arch/mips/include/asm/unaligned-emul.h b/arch/mips/include/asm/unaligned-emul.h -index 2022b18944b97..9af0f4d3d288c 100644 ---- a/arch/mips/include/asm/unaligned-emul.h -+++ b/arch/mips/include/asm/unaligned-emul.h -@@ -20,8 +20,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -41,8 +41,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -74,10 +74,10 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -102,8 +102,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -125,8 +125,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -145,8 +145,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -178,10 +178,10 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -223,14 +223,14 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -- STR(PTR)"\t5b, 11b\n\t" \ -- STR(PTR)"\t6b, 11b\n\t" \ -- STR(PTR)"\t7b, 11b\n\t" \ -- STR(PTR)"\t8b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t5b, 11b\n\t" \ -+ STR(PTR_WD)"\t6b, 11b\n\t" \ -+ STR(PTR_WD)"\t7b, 11b\n\t" \ -+ STR(PTR_WD)"\t8b, 11b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -255,8 +255,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT));\ -@@ -276,8 +276,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); \ -@@ -296,8 +296,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); \ -@@ -325,10 +325,10 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ - ".previous" \ - : "=&r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT) \ -@@ -365,14 +365,14 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -- STR(PTR)"\t5b, 11b\n\t" \ -- STR(PTR)"\t6b, 11b\n\t" \ -- STR(PTR)"\t7b, 11b\n\t" \ -- STR(PTR)"\t8b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t5b, 11b\n\t" \ -+ STR(PTR_WD)"\t6b, 11b\n\t" \ -+ STR(PTR_WD)"\t7b, 11b\n\t" \ -+ STR(PTR_WD)"\t8b, 11b\n\t" \ - ".previous" \ - : "=&r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT) \ -@@ -398,8 +398,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -419,8 +419,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -452,10 +452,10 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -481,8 +481,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -504,8 +504,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -524,8 +524,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -557,10 +557,10 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -602,14 +602,14 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -- STR(PTR)"\t5b, 11b\n\t" \ -- STR(PTR)"\t6b, 11b\n\t" \ -- STR(PTR)"\t7b, 11b\n\t" \ -- STR(PTR)"\t8b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t5b, 11b\n\t" \ -+ STR(PTR_WD)"\t6b, 11b\n\t" \ -+ STR(PTR_WD)"\t7b, 11b\n\t" \ -+ STR(PTR_WD)"\t8b, 11b\n\t" \ - ".previous" \ - : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); \ -@@ -632,8 +632,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT));\ -@@ -653,8 +653,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); \ -@@ -673,8 +673,8 @@ do { \ - "j\t3b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 4b\n\t" \ -- STR(PTR)"\t2b, 4b\n\t" \ -+ STR(PTR_WD)"\t1b, 4b\n\t" \ -+ STR(PTR_WD)"\t2b, 4b\n\t" \ - ".previous" \ - : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); \ -@@ -703,10 +703,10 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ - ".previous" \ - : "=&r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT) \ -@@ -743,14 +743,14 @@ do { \ - "j\t10b\n\t" \ - ".previous\n\t" \ - ".section\t__ex_table,\"a\"\n\t" \ -- STR(PTR)"\t1b, 11b\n\t" \ -- STR(PTR)"\t2b, 11b\n\t" \ -- STR(PTR)"\t3b, 11b\n\t" \ -- STR(PTR)"\t4b, 11b\n\t" \ -- STR(PTR)"\t5b, 11b\n\t" \ -- STR(PTR)"\t6b, 11b\n\t" \ -- STR(PTR)"\t7b, 11b\n\t" \ -- STR(PTR)"\t8b, 11b\n\t" \ -+ STR(PTR_WD)"\t1b, 11b\n\t" \ -+ STR(PTR_WD)"\t2b, 11b\n\t" \ -+ STR(PTR_WD)"\t3b, 11b\n\t" \ -+ STR(PTR_WD)"\t4b, 11b\n\t" \ -+ STR(PTR_WD)"\t5b, 11b\n\t" \ -+ STR(PTR_WD)"\t6b, 11b\n\t" \ -+ STR(PTR_WD)"\t7b, 11b\n\t" \ -+ STR(PTR_WD)"\t8b, 11b\n\t" \ - ".previous" \ - : "=&r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT) \ -diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c -index 630fcb4cb30e7..8ebcc298bf759 100644 ---- a/arch/mips/kernel/cpu-probe.c -+++ b/arch/mips/kernel/cpu-probe.c -@@ -1734,9 +1734,10 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c) - - static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) - { -- decode_configs(c); -+ c->cputype = CPU_LOONGSON64; - - /* All Loongson processors covered here define ExcCode 16 as GSExc. */ -+ decode_configs(c); - c->options |= MIPS_CPU_GSEXCEX; - - switch (c->processor_id & PRID_IMP_MASK) { -@@ -1746,7 +1747,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) - case PRID_REV_LOONGSON2K_R1_1: - case PRID_REV_LOONGSON2K_R1_2: - case PRID_REV_LOONGSON2K_R1_3: -- c->cputype = CPU_LOONGSON64; - __cpu_name[cpu] = "Loongson-2K"; - set_elf_platform(cpu, "gs264e"); - set_isa(c, MIPS_CPU_ISA_M64R2); -@@ -1759,14 +1759,12 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) - switch (c->processor_id & PRID_REV_MASK) { - case PRID_REV_LOONGSON3A_R2_0: - case PRID_REV_LOONGSON3A_R2_1: -- c->cputype = CPU_LOONGSON64; - __cpu_name[cpu] = "ICT Loongson-3"; - set_elf_platform(cpu, "loongson3a"); - set_isa(c, MIPS_CPU_ISA_M64R2); - break; - case PRID_REV_LOONGSON3A_R3_0: - case PRID_REV_LOONGSON3A_R3_1: -- c->cputype = CPU_LOONGSON64; - __cpu_name[cpu] = "ICT Loongson-3"; - set_elf_platform(cpu, "loongson3a"); - set_isa(c, MIPS_CPU_ISA_M64R2); -@@ -1786,7 +1784,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) - c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ - break; - case PRID_IMP_LOONGSON_64G: -- c->cputype = CPU_LOONGSON64; - __cpu_name[cpu] = "ICT Loongson-3"; - set_elf_platform(cpu, "loongson3a"); - set_isa(c, MIPS_CPU_ISA_M64R2); -diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c -index 662c8db9f45ba..9f5b1247b4ba4 100644 ---- a/arch/mips/kernel/jump_label.c -+++ b/arch/mips/kernel/jump_label.c -@@ -56,7 +56,7 @@ void arch_jump_label_transform(struct jump_entry *e, - * The branch offset must fit in the instruction's 26 - * bit field. - */ -- WARN_ON((offset >= BIT(25)) || -+ WARN_ON((offset >= (long)BIT(25)) || - (offset < -(long)BIT(25))); - - insn.j_format.opcode = bc6_op; -diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c -index 75bff0f773198..b0934a0d7aedd 100644 ---- a/arch/mips/kernel/kprobes.c -+++ b/arch/mips/kernel/kprobes.c -@@ -11,6 +11,8 @@ - * Copyright (C) IBM Corporation, 2002, 2004 - */ - -+#define pr_fmt(fmt) "kprobes: " fmt -+ - #include - #include - #include -@@ -80,8 +82,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) - insn = p->addr[0]; - - if (insn_has_ll_or_sc(insn)) { -- pr_notice("Kprobes for ll and sc instructions are not" -- "supported\n"); -+ pr_notice("Kprobes for ll and sc instructions are not supported\n"); - ret = -EINVAL; - goto out; - } -@@ -219,7 +220,7 @@ static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, - return 0; - - unaligned: -- pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); -+ pr_notice("Failed to emulate branch instruction because of unaligned epc - sending SIGBUS to %s.\n", current->comm); - force_sig(SIGBUS); - return -EFAULT; - -@@ -238,10 +239,8 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, - regs->cp0_epc = (unsigned long)p->addr; - else if (insn_has_delayslot(p->opcode)) { - ret = evaluate_branch_instruction(p, regs, kcb); -- if (ret < 0) { -- pr_notice("Kprobes: Error in evaluating branch\n"); -+ if (ret < 0) - return; -- } - } - regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; - } -diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c -index 90f1c3df1f0e4..b4f7d950c8468 100644 ---- a/arch/mips/kernel/mips-cm.c -+++ b/arch/mips/kernel/mips-cm.c -@@ -221,8 +221,7 @@ static void mips_cm_probe_l2sync(void) - phys_addr_t addr; - - /* L2-only sync was introduced with CM major revision 6 */ -- major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR) >> -- __ffs(CM_GCR_REV_MAJOR); -+ major_rev = FIELD_GET(CM_GCR_REV_MAJOR, read_gcr_rev()); - if (major_rev < 6) - return; - -@@ -306,13 +305,13 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, - preempt_disable(); - - if (cm_rev >= CM_REV_CM3) { -- val = core << __ffs(CM3_GCR_Cx_OTHER_CORE); -- val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP); -+ val = FIELD_PREP(CM3_GCR_Cx_OTHER_CORE, core) | -+ FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp); - - if (cm_rev >= CM_REV_CM3_5) { - val |= CM_GCR_Cx_OTHER_CLUSTER_EN; -- val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER); -- val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK); -+ val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster); -+ val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block); - } else { - WARN_ON(cluster != 0); - WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); -@@ -342,7 +341,7 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, - spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), - per_cpu(cm_core_lock_flags, curr_core)); - -- val = core << __ffs(CM_GCR_Cx_OTHER_CORENUM); -+ val = FIELD_PREP(CM_GCR_Cx_OTHER_CORENUM, core); - } - - write_gcr_cl_other(val); -@@ -386,8 +385,8 @@ void mips_cm_error_report(void) - cm_other = read_gcr_error_mult(); - - if (revision < CM_REV_CM3) { /* CM2 */ -- cause = cm_error >> __ffs(CM_GCR_ERROR_CAUSE_ERRTYPE); -- ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND); -+ cause = FIELD_GET(CM_GCR_ERROR_CAUSE_ERRTYPE, cm_error); -+ ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other); - - if (!cause) - return; -@@ -445,8 +444,8 @@ void mips_cm_error_report(void) - ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits; - ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit; - -- cause = cm_error >> __ffs64(CM3_GCR_ERROR_CAUSE_ERRTYPE); -- ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND); -+ cause = FIELD_GET(CM3_GCR_ERROR_CAUSE_ERRTYPE, cm_error); -+ ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other); - - if (!cause) - return; -diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c -index 8d2535123f11c..d005be84c482b 100644 ---- a/arch/mips/kernel/mips-cpc.c -+++ b/arch/mips/kernel/mips-cpc.c -@@ -27,6 +27,7 @@ phys_addr_t __weak mips_cpc_default_phys_base(void) - cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc"); - if (cpc_node) { - err = of_address_to_resource(cpc_node, 0, &res); -+ of_node_put(cpc_node); - if (!err) - return res.start; - } -diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c -index a39ec755e4c24..750fe569862b6 100644 ---- a/arch/mips/kernel/mips-r2-to-r6-emul.c -+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c -@@ -1258,10 +1258,10 @@ fpu_emul: - " j 10b\n" - " .previous\n" - " .section __ex_table,\"a\"\n" -- STR(PTR) " 1b,8b\n" -- STR(PTR) " 2b,8b\n" -- STR(PTR) " 3b,8b\n" -- STR(PTR) " 4b,8b\n" -+ STR(PTR_WD) " 1b,8b\n" -+ STR(PTR_WD) " 2b,8b\n" -+ STR(PTR_WD) " 3b,8b\n" -+ STR(PTR_WD) " 4b,8b\n" - " .previous\n" - " .set pop\n" - : "+&r"(rt), "=&r"(rs), -@@ -1333,10 +1333,10 @@ fpu_emul: - " j 10b\n" - " .previous\n" - " .section __ex_table,\"a\"\n" -- STR(PTR) " 1b,8b\n" -- STR(PTR) " 2b,8b\n" -- STR(PTR) " 3b,8b\n" -- STR(PTR) " 4b,8b\n" -+ STR(PTR_WD) " 1b,8b\n" -+ STR(PTR_WD) " 2b,8b\n" -+ STR(PTR_WD) " 3b,8b\n" -+ STR(PTR_WD) " 4b,8b\n" - " .previous\n" - " .set pop\n" - : "+&r"(rt), "=&r"(rs), -@@ -1404,10 +1404,10 @@ fpu_emul: - " j 9b\n" - " .previous\n" - " .section __ex_table,\"a\"\n" -- STR(PTR) " 1b,8b\n" -- STR(PTR) " 2b,8b\n" -- STR(PTR) " 3b,8b\n" -- STR(PTR) " 4b,8b\n" -+ STR(PTR_WD) " 1b,8b\n" -+ STR(PTR_WD) " 2b,8b\n" -+ STR(PTR_WD) " 3b,8b\n" -+ STR(PTR_WD) " 4b,8b\n" - " .previous\n" - " .set pop\n" - : "+&r"(rt), "=&r"(rs), -@@ -1474,10 +1474,10 @@ fpu_emul: - " j 9b\n" - " .previous\n" - " .section __ex_table,\"a\"\n" -- STR(PTR) " 1b,8b\n" -- STR(PTR) " 2b,8b\n" -- STR(PTR) " 3b,8b\n" -- STR(PTR) " 4b,8b\n" -+ STR(PTR_WD) " 1b,8b\n" -+ STR(PTR_WD) " 2b,8b\n" -+ STR(PTR_WD) " 3b,8b\n" -+ STR(PTR_WD) " 4b,8b\n" - " .previous\n" - " .set pop\n" - : "+&r"(rt), "=&r"(rs), -@@ -1589,14 +1589,14 @@ fpu_emul: - " j 9b\n" - " .previous\n" - " .section __ex_table,\"a\"\n" -- STR(PTR) " 1b,8b\n" -- STR(PTR) " 2b,8b\n" -- STR(PTR) " 3b,8b\n" -- STR(PTR) " 4b,8b\n" -- STR(PTR) " 5b,8b\n" -- STR(PTR) " 6b,8b\n" -- STR(PTR) " 7b,8b\n" -- STR(PTR) " 0b,8b\n" -+ STR(PTR_WD) " 1b,8b\n" -+ STR(PTR_WD) " 2b,8b\n" -+ STR(PTR_WD) " 3b,8b\n" -+ STR(PTR_WD) " 4b,8b\n" -+ STR(PTR_WD) " 5b,8b\n" -+ STR(PTR_WD) " 6b,8b\n" -+ STR(PTR_WD) " 7b,8b\n" -+ STR(PTR_WD) " 0b,8b\n" - " .previous\n" - " .set pop\n" - : "+&r"(rt), "=&r"(rs), -@@ -1708,14 +1708,14 @@ fpu_emul: - " j 9b\n" - " .previous\n" - " .section __ex_table,\"a\"\n" -- STR(PTR) " 1b,8b\n" -- STR(PTR) " 2b,8b\n" -- STR(PTR) " 3b,8b\n" -- STR(PTR) " 4b,8b\n" -- STR(PTR) " 5b,8b\n" -- STR(PTR) " 6b,8b\n" -- STR(PTR) " 7b,8b\n" -- STR(PTR) " 0b,8b\n" -+ STR(PTR_WD) " 1b,8b\n" -+ STR(PTR_WD) " 2b,8b\n" -+ STR(PTR_WD) " 3b,8b\n" -+ STR(PTR_WD) " 4b,8b\n" -+ STR(PTR_WD) " 5b,8b\n" -+ STR(PTR_WD) " 6b,8b\n" -+ STR(PTR_WD) " 7b,8b\n" -+ STR(PTR_WD) " 0b,8b\n" - " .previous\n" - " .set pop\n" - : "+&r"(rt), "=&r"(rs), -@@ -1827,14 +1827,14 @@ fpu_emul: - " j 9b\n" - " .previous\n" - " .section __ex_table,\"a\"\n" -- STR(PTR) " 1b,8b\n" -- STR(PTR) " 2b,8b\n" -- STR(PTR) " 3b,8b\n" -- STR(PTR) " 4b,8b\n" -- STR(PTR) " 5b,8b\n" -- STR(PTR) " 6b,8b\n" -- STR(PTR) " 7b,8b\n" -- STR(PTR) " 0b,8b\n" -+ STR(PTR_WD) " 1b,8b\n" -+ STR(PTR_WD) " 2b,8b\n" -+ STR(PTR_WD) " 3b,8b\n" -+ STR(PTR_WD) " 4b,8b\n" -+ STR(PTR_WD) " 5b,8b\n" -+ STR(PTR_WD) " 6b,8b\n" -+ STR(PTR_WD) " 7b,8b\n" -+ STR(PTR_WD) " 0b,8b\n" - " .previous\n" - " .set pop\n" - : "+&r"(rt), "=&r"(rs), -@@ -1945,14 +1945,14 @@ fpu_emul: - " j 9b\n" - " .previous\n" - " .section __ex_table,\"a\"\n" -- STR(PTR) " 1b,8b\n" -- STR(PTR) " 2b,8b\n" -- STR(PTR) " 3b,8b\n" -- STR(PTR) " 4b,8b\n" -- STR(PTR) " 5b,8b\n" -- STR(PTR) " 6b,8b\n" -- STR(PTR) " 7b,8b\n" -- STR(PTR) " 0b,8b\n" -+ STR(PTR_WD) " 1b,8b\n" -+ STR(PTR_WD) " 2b,8b\n" -+ STR(PTR_WD) " 3b,8b\n" -+ STR(PTR_WD) " 4b,8b\n" -+ STR(PTR_WD) " 5b,8b\n" -+ STR(PTR_WD) " 6b,8b\n" -+ STR(PTR_WD) " 7b,8b\n" -+ STR(PTR_WD) " 0b,8b\n" - " .previous\n" - " .set pop\n" - : "+&r"(rt), "=&r"(rs), -@@ -2007,7 +2007,7 @@ fpu_emul: - "j 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" -- STR(PTR) " 1b,3b\n" -+ STR(PTR_WD) " 1b,3b\n" - ".previous\n" - : "=&r"(res), "+&r"(err) - : "r"(vaddr), "i"(SIGSEGV) -@@ -2065,7 +2065,7 @@ fpu_emul: - "j 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" -- STR(PTR) " 1b,3b\n" -+ STR(PTR_WD) " 1b,3b\n" - ".previous\n" - : "+&r"(res), "+&r"(err) - : "r"(vaddr), "i"(SIGSEGV)); -@@ -2126,7 +2126,7 @@ fpu_emul: - "j 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" -- STR(PTR) " 1b,3b\n" -+ STR(PTR_WD) " 1b,3b\n" - ".previous\n" - : "=&r"(res), "+&r"(err) - : "r"(vaddr), "i"(SIGSEGV) -@@ -2189,7 +2189,7 @@ fpu_emul: - "j 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" -- STR(PTR) " 1b,3b\n" -+ STR(PTR_WD) " 1b,3b\n" - ".previous\n" - : "+&r"(res), "+&r"(err) - : "r"(vaddr), "i"(SIGSEGV)); -diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c -index 4184d641f05e0..33a02f3814f58 100644 ---- a/arch/mips/kernel/proc.c -+++ b/arch/mips/kernel/proc.c -@@ -172,7 +172,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) - { - unsigned long i = *pos; - -- return i < NR_CPUS ? (void *) (i + 1) : NULL; -+ return i < nr_cpu_ids ? (void *) (i + 1) : NULL; - } - - static void *c_next(struct seq_file *m, void *v, loff_t *pos) -diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S -index 12e58053544fc..2748c55820c24 100644 ---- a/arch/mips/kernel/r2300_fpu.S -+++ b/arch/mips/kernel/r2300_fpu.S -@@ -23,14 +23,14 @@ - #define EX(a,b) \ - 9: a,##b; \ - .section __ex_table,"a"; \ -- PTR 9b,fault; \ -+ PTR_WD 9b,fault; \ - .previous - - #define EX2(a,b) \ - 9: a,##b; \ - .section __ex_table,"a"; \ -- PTR 9b,bad_stack; \ -- PTR 9b+4,bad_stack; \ -+ PTR_WD 9b,fault; \ -+ PTR_WD 9b+4,fault; \ - .previous - - .set mips1 -diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S -index b91e911064756..2e687c60bc4f1 100644 ---- a/arch/mips/kernel/r4k_fpu.S -+++ b/arch/mips/kernel/r4k_fpu.S -@@ -31,7 +31,7 @@ - .ex\@: \insn \reg, \src - .set pop - .section __ex_table,"a" -- PTR .ex\@, fault -+ PTR_WD .ex\@, fault - .previous - .endm - -diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S -index f3c908abdbb80..f5b2ef979b437 100644 ---- a/arch/mips/kernel/relocate_kernel.S -+++ b/arch/mips/kernel/relocate_kernel.S -@@ -145,12 +145,11 @@ LEAF(kexec_smp_wait) - * kexec_args[0..3] are used to prepare register values. - */ - --kexec_args: -- EXPORT(kexec_args) --arg0: PTR 0x0 --arg1: PTR 0x0 --arg2: PTR 0x0 --arg3: PTR 0x0 -+EXPORT(kexec_args) -+arg0: PTR_WD 0x0 -+arg1: PTR_WD 0x0 -+arg2: PTR_WD 0x0 -+arg3: PTR_WD 0x0 - .size kexec_args,PTRSIZE*4 - - #ifdef CONFIG_SMP -@@ -159,31 +158,27 @@ arg3: PTR 0x0 - * their registers a0-a3. secondary_kexec_args[0..3] are used - * to prepare register values. - */ --secondary_kexec_args: -- EXPORT(secondary_kexec_args) --s_arg0: PTR 0x0 --s_arg1: PTR 0x0 --s_arg2: PTR 0x0 --s_arg3: PTR 0x0 -+EXPORT(secondary_kexec_args) -+s_arg0: PTR_WD 0x0 -+s_arg1: PTR_WD 0x0 -+s_arg2: PTR_WD 0x0 -+s_arg3: PTR_WD 0x0 - .size secondary_kexec_args,PTRSIZE*4 - kexec_flag: - LONG 0x1 - - #endif - --kexec_start_address: -- EXPORT(kexec_start_address) -- PTR 0x0 -+EXPORT(kexec_start_address) -+ PTR_WD 0x0 - .size kexec_start_address, PTRSIZE - --kexec_indirection_page: -- EXPORT(kexec_indirection_page) -- PTR 0 -+EXPORT(kexec_indirection_page) -+ PTR_WD 0 - .size kexec_indirection_page, PTRSIZE - - relocate_new_kernel_end: - --relocate_new_kernel_size: -- EXPORT(relocate_new_kernel_size) -- PTR relocate_new_kernel_end - relocate_new_kernel -+EXPORT(relocate_new_kernel_size) -+ PTR_WD relocate_new_kernel_end - relocate_new_kernel - .size relocate_new_kernel_size, PTRSIZE -diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S -index b1b2e106f7118..9bfce5f75f601 100644 ---- a/arch/mips/kernel/scall32-o32.S -+++ b/arch/mips/kernel/scall32-o32.S -@@ -72,10 +72,10 @@ loads_done: - .set pop - - .section __ex_table,"a" -- PTR load_a4, bad_stack_a4 -- PTR load_a5, bad_stack_a5 -- PTR load_a6, bad_stack_a6 -- PTR load_a7, bad_stack_a7 -+ PTR_WD load_a4, bad_stack_a4 -+ PTR_WD load_a5, bad_stack_a5 -+ PTR_WD load_a6, bad_stack_a6 -+ PTR_WD load_a7, bad_stack_a7 - .previous - - lw t0, TI_FLAGS($28) # syscall tracing enabled? -@@ -216,7 +216,7 @@ einval: li v0, -ENOSYS - #endif /* CONFIG_MIPS_MT_FPAFF */ - - #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) --#define __SYSCALL(nr, entry) PTR entry -+#define __SYSCALL(nr, entry) PTR_WD entry - .align 2 - .type sys_call_table, @object - EXPORT(sys_call_table) -diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S -index f650c55a17dc5..97456b2ca7dc3 100644 ---- a/arch/mips/kernel/scall64-n32.S -+++ b/arch/mips/kernel/scall64-n32.S -@@ -101,7 +101,7 @@ not_n32_scall: - - END(handle_sysn32) - --#define __SYSCALL(nr, entry) PTR entry -+#define __SYSCALL(nr, entry) PTR_WD entry - .type sysn32_call_table, @object - EXPORT(sysn32_call_table) - #include -diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S -index 5d7bfc65e4d0b..5f6ed4b4c3993 100644 ---- a/arch/mips/kernel/scall64-n64.S -+++ b/arch/mips/kernel/scall64-n64.S -@@ -109,7 +109,7 @@ illegal_syscall: - j n64_syscall_exit - END(handle_sys64) - --#define __SYSCALL(nr, entry) PTR entry -+#define __SYSCALL(nr, entry) PTR_WD entry - .align 3 - .type sys_call_table, @object - EXPORT(sys_call_table) -diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S -index cedc8bd888046..d3c2616cba226 100644 ---- a/arch/mips/kernel/scall64-o32.S -+++ b/arch/mips/kernel/scall64-o32.S -@@ -73,10 +73,10 @@ load_a7: lw a7, 28(t0) # argument #8 from usp - loads_done: - - .section __ex_table,"a" -- PTR load_a4, bad_stack_a4 -- PTR load_a5, bad_stack_a5 -- PTR load_a6, bad_stack_a6 -- PTR load_a7, bad_stack_a7 -+ PTR_WD load_a4, bad_stack_a4 -+ PTR_WD load_a5, bad_stack_a5 -+ PTR_WD load_a6, bad_stack_a6 -+ PTR_WD load_a7, bad_stack_a7 - .previous - - li t1, _TIF_WORK_SYSCALL_ENTRY -@@ -214,7 +214,7 @@ einval: li v0, -ENOSYS - END(sys32_syscall) - - #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) --#define __SYSCALL(nr, entry) PTR entry -+#define __SYSCALL(nr, entry) PTR_WD entry - .align 3 - .type sys32_call_table,@object - EXPORT(sys32_call_table) -diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c -index f979adfd4fc20..145f905fb3623 100644 ---- a/arch/mips/kernel/setup.c -+++ b/arch/mips/kernel/setup.c -@@ -11,6 +11,8 @@ - * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki - */ - #include -+#include -+#include - #include - #include - #include -@@ -156,10 +158,6 @@ static unsigned long __init init_initrd(void) - pr_err("initrd start must be page aligned\n"); - goto disable; - } -- if (initrd_start < PAGE_OFFSET) { -- pr_err("initrd start < PAGE_OFFSET\n"); -- goto disable; -- } - - /* - * Sanitize initrd addresses. For example firmware -@@ -172,6 +170,11 @@ static unsigned long __init init_initrd(void) - initrd_end = (unsigned long)__va(end); - initrd_start = (unsigned long)__va(__pa(initrd_start)); - -+ if (initrd_start < PAGE_OFFSET) { -+ pr_err("initrd start < PAGE_OFFSET\n"); -+ goto disable; -+ } -+ - ROOT_DEV = Root_RAM0; - return PFN_UP(end); - disable: -@@ -803,9 +806,20 @@ early_param("coherentio", setcoherentio); - - static int __init setnocoherentio(char *str) - { -- dma_default_coherent = true; -+ dma_default_coherent = false; - pr_info("Software DMA cache coherency (command line)\n"); - return 0; - } - early_param("nocoherentio", setnocoherentio); - #endif -+ -+void __init arch_cpu_finalize_init(void) -+{ -+ unsigned int cpu = smp_processor_id(); -+ -+ cpu_data[cpu].udelay_val = loops_per_jiffy; -+ check_bugs32(); -+ -+ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) -+ check_bugs64(); -+} -diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c -index d542fb7af3ba2..1986d13094100 100644 ---- a/arch/mips/kernel/smp.c -+++ b/arch/mips/kernel/smp.c -@@ -351,6 +351,9 @@ asmlinkage void start_secondary(void) - cpu = smp_processor_id(); - cpu_data[cpu].udelay_val = loops_per_jiffy; - -+ set_cpu_sibling_map(cpu); -+ set_cpu_core_map(cpu); -+ - cpumask_set_cpu(cpu, &cpu_coherent_mask); - notify_cpu_starting(cpu); - -@@ -362,9 +365,6 @@ asmlinkage void start_secondary(void) - /* The CPU is running and counters synchronised, now mark it online */ - set_cpu_online(cpu, true); - -- set_cpu_sibling_map(cpu); -- set_cpu_core_map(cpu); -- - calculate_cpu_foreign_map(); - - /* -diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c -index 2afa3eef486a9..ae93a607ddf7e 100644 ---- a/arch/mips/kernel/syscall.c -+++ b/arch/mips/kernel/syscall.c -@@ -122,8 +122,8 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) - " j 3b \n" - " .previous \n" - " .section __ex_table,\"a\" \n" -- " "STR(PTR)" 1b, 4b \n" -- " "STR(PTR)" 2b, 4b \n" -+ " "STR(PTR_WD)" 1b, 4b \n" -+ " "STR(PTR_WD)" 2b, 4b \n" - " .previous \n" - " .set pop \n" - : [old] "=&r" (old), -@@ -152,8 +152,8 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) - " j 3b \n" - " .previous \n" - " .section __ex_table,\"a\" \n" -- " "STR(PTR)" 1b, 5b \n" -- " "STR(PTR)" 2b, 5b \n" -+ " "STR(PTR_WD)" 1b, 5b \n" -+ " "STR(PTR_WD)" 2b, 5b \n" - " .previous \n" - " .set pop \n" - : [old] "=&r" (old), -@@ -240,12 +240,3 @@ SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op) - { - return -ENOSYS; - } -- --/* -- * If we ever come here the user sp is bad. Zap the process right away. -- * Due to the bad stack signaling wouldn't work. -- */ --asmlinkage void bad_stack(void) --{ -- do_exit(SIGSEGV); --} -diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c -index caa01457dce60..ed339d7979f3f 100644 ---- a/arch/mips/kernel/time.c -+++ b/arch/mips/kernel/time.c -@@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void) - case CPU_R4400MC: - /* - * The published errata for the R4400 up to 3.0 say the CPU -- * has the mfc0 from count bug. -+ * has the mfc0 from count bug. This seems the last version -+ * produced. - */ -- if ((current_cpu_data.processor_id & 0xff) <= 0x30) -- return 1; -- -- /* -- * we assume newer revisions are ok -- */ -- return 0; -+ return 1; - } - - return 0; -diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c -index 6f07362de5cec..afb2c955d99ef 100644 ---- a/arch/mips/kernel/traps.c -+++ b/arch/mips/kernel/traps.c -@@ -416,7 +416,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) - if (regs && kexec_should_crash(current)) - crash_kexec(regs); - -- do_exit(sig); -+ make_task_dead(sig); - } - - extern struct exception_table_entry __start___dbe_table[]; -@@ -2085,19 +2085,19 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) - * If no shadow set is selected then use the default handler - * that does normal register saving and standard interrupt exit - */ -- extern char except_vec_vi, except_vec_vi_lui; -- extern char except_vec_vi_ori, except_vec_vi_end; -- extern char rollback_except_vec_vi; -- char *vec_start = using_rollback_handler() ? -- &rollback_except_vec_vi : &except_vec_vi; -+ extern const u8 except_vec_vi[], except_vec_vi_lui[]; -+ extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; -+ extern const u8 rollback_except_vec_vi[]; -+ const u8 *vec_start = using_rollback_handler() ? -+ rollback_except_vec_vi : except_vec_vi; - #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) -- const int lui_offset = &except_vec_vi_lui - vec_start + 2; -- const int ori_offset = &except_vec_vi_ori - vec_start + 2; -+ const int lui_offset = except_vec_vi_lui - vec_start + 2; -+ const int ori_offset = except_vec_vi_ori - vec_start + 2; - #else -- const int lui_offset = &except_vec_vi_lui - vec_start; -- const int ori_offset = &except_vec_vi_ori - vec_start; -+ const int lui_offset = except_vec_vi_lui - vec_start; -+ const int ori_offset = except_vec_vi_ori - vec_start; - #endif -- const int handler_len = &except_vec_vi_end - vec_start; -+ const int handler_len = except_vec_vi_end - vec_start; - - if (handler_len > VECTORSPACING) { - /* -@@ -2305,7 +2305,7 @@ void per_cpu_trap_init(bool is_boot_cpu) - } - - /* Install CPU exception handler */ --void set_handler(unsigned long offset, void *addr, unsigned long size) -+void set_handler(unsigned long offset, const void *addr, unsigned long size) - { - #ifdef CONFIG_CPU_MICROMIPS - memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); -diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c -index 3d0cf471f2fe1..b2cc2c2dd4bfc 100644 ---- a/arch/mips/kernel/vdso.c -+++ b/arch/mips/kernel/vdso.c -@@ -159,7 +159,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - /* Map GIC user page. */ - if (gic_size) { - gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS; -- gic_pfn = virt_to_phys((void *)gic_base) >> PAGE_SHIFT; -+ gic_pfn = PFN_DOWN(__pa(gic_base)); - - ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, - pgprot_noncached(vma->vm_page_prot)); -diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S -index 1f98947fe715d..91d6a5360bb9c 100644 ---- a/arch/mips/kernel/vmlinux.lds.S -+++ b/arch/mips/kernel/vmlinux.lds.S -@@ -15,6 +15,8 @@ - #define EMITS_PT_NOTE - #endif - -+#define RUNTIME_DISCARD_EXIT -+ - #include - - #undef mips -diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c -index e673603e11e5d..92140edb3ce3e 100644 ---- a/arch/mips/kernel/vpe-cmp.c -+++ b/arch/mips/kernel/vpe-cmp.c -@@ -75,7 +75,6 @@ ATTRIBUTE_GROUPS(vpe); - - static void vpe_device_release(struct device *cd) - { -- kfree(cd); - } - - static struct class vpe_class = { -@@ -157,6 +156,7 @@ out_dev: - device_del(&vpe_device); - - out_class: -+ put_device(&vpe_device); - class_unregister(&vpe_class); - - out_chrdev: -@@ -169,7 +169,7 @@ void __exit vpe_module_exit(void) - { - struct vpe *v, *n; - -- device_del(&vpe_device); -+ device_unregister(&vpe_device); - class_unregister(&vpe_class); - unregister_chrdev(major, VPE_MODULE_NAME); - -diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c -index bad6b0891b2b5..84a82b551ec35 100644 ---- a/arch/mips/kernel/vpe-mt.c -+++ b/arch/mips/kernel/vpe-mt.c -@@ -313,7 +313,6 @@ ATTRIBUTE_GROUPS(vpe); - - static void vpe_device_release(struct device *cd) - { -- kfree(cd); - } - - static struct class vpe_class = { -@@ -497,6 +496,7 @@ out_dev: - device_del(&vpe_device); - - out_class: -+ put_device(&vpe_device); - class_unregister(&vpe_class); - - out_chrdev: -@@ -509,7 +509,7 @@ void __exit vpe_module_exit(void) - { - struct vpe *v, *n; - -- device_del(&vpe_device); -+ device_unregister(&vpe_device); - class_unregister(&vpe_class); - unregister_chrdev(major, VPE_MODULE_NAME); - -diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c -index 22e745e49b0ab..3e80b0b2deaab 100644 ---- a/arch/mips/kvm/emulate.c -+++ b/arch/mips/kvm/emulate.c -@@ -312,7 +312,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) - */ - int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - - return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || - (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); -@@ -384,7 +384,7 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) - */ - static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - ktime_t expires, threshold; - u32 count, compare; - int running; -@@ -444,7 +444,7 @@ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) - */ - u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - - /* If count disabled just read static copy of count */ - if (kvm_mips_count_disabled(vcpu)) -@@ -502,7 +502,7 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count) - static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, - ktime_t now, u32 count) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - u32 compare; - u64 delta; - ktime_t expire; -@@ -603,7 +603,7 @@ resume: - */ - void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - ktime_t now; - - /* Calculate bias */ -@@ -649,7 +649,7 @@ void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz) - */ - int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - int dc; - ktime_t now; - u32 count; -@@ -696,7 +696,7 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) - */ - void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - int dc; - u32 old_compare = kvm_read_c0_guest_compare(cop0); - s32 delta = compare - old_compare; -@@ -779,7 +779,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) - */ - static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - u32 count; - ktime_t now; - -@@ -806,7 +806,7 @@ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) - */ - void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - - kvm_set_c0_guest_cause(cop0, CAUSEF_DC); - if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) -@@ -826,7 +826,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) - */ - void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - u32 count; - - kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); -@@ -852,7 +852,7 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) - */ - int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - s64 changed = count_ctl ^ vcpu->arch.count_ctl; - s64 delta; - ktime_t expire, now; -diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c -index 75c6f264c626c..6b15ac9786583 100644 ---- a/arch/mips/kvm/mips.c -+++ b/arch/mips/kvm/mips.c -@@ -652,7 +652,7 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) - static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - struct mips_fpu_struct *fpu = &vcpu->arch.fpu; - int ret; - s64 v; -@@ -764,7 +764,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, - static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - struct mips_fpu_struct *fpu = &vcpu->arch.fpu; - s64 v; - s64 vs[2]; -@@ -1104,7 +1104,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) - int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) - { - return kvm_mips_pending_timer(vcpu) || -- kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI; -+ kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI; - } - - int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) -@@ -1128,7 +1128,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) - kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); - kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); - -- cop0 = vcpu->arch.cop0; -+ cop0 = &vcpu->arch.cop0; - kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n", - kvm_read_c0_guest_status(cop0), - kvm_read_c0_guest_cause(cop0)); -@@ -1250,7 +1250,7 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) - - case EXCCODE_TLBS: - kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n", -- cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, -+ cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc, - badvaddr); - - ++vcpu->stat.tlbmiss_st_exits; -@@ -1322,7 +1322,7 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) - kvm_get_badinstr(opc, vcpu, &inst); - kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", - exccode, opc, inst, badvaddr, -- kvm_read_c0_guest_status(vcpu->arch.cop0)); -+ kvm_read_c0_guest_status(&vcpu->arch.cop0)); - kvm_arch_vcpu_dump_regs(vcpu); - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - ret = RESUME_HOST; -@@ -1384,7 +1384,7 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) - /* Enable FPU for guest and restore context */ - void kvm_own_fpu(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - unsigned int sr, cfg5; - - preempt_disable(); -@@ -1428,7 +1428,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) - /* Enable MSA for guest and restore context */ - void kvm_own_msa(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - unsigned int sr, cfg5; - - preempt_disable(); -diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c -index 53f851a615542..3e6682018fbe6 100644 ---- a/arch/mips/kvm/stats.c -+++ b/arch/mips/kvm/stats.c -@@ -54,9 +54,9 @@ void kvm_mips_dump_stats(struct kvm_vcpu *vcpu) - kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); - for (i = 0; i < N_MIPS_COPROC_REGS; i++) { - for (j = 0; j < N_MIPS_COPROC_SEL; j++) { -- if (vcpu->arch.cop0->stat[i][j]) -+ if (vcpu->arch.cop0.stat[i][j]) - kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j, -- vcpu->arch.cop0->stat[i][j]); -+ vcpu->arch.cop0.stat[i][j]); - } - } - #endif -diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h -index a8c7fd7bf6d26..136c3535a1cbb 100644 ---- a/arch/mips/kvm/trace.h -+++ b/arch/mips/kvm/trace.h -@@ -322,11 +322,11 @@ TRACE_EVENT_FN(kvm_guest_mode_change, - ), - - TP_fast_assign( -- __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0); -+ __entry->epc = kvm_read_c0_guest_epc(&vcpu->arch.cop0); - __entry->pc = vcpu->arch.pc; -- __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0); -- __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0); -- __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0); -+ __entry->badvaddr = kvm_read_c0_guest_badvaddr(&vcpu->arch.cop0); -+ __entry->status = kvm_read_c0_guest_status(&vcpu->arch.cop0); -+ __entry->cause = kvm_read_c0_guest_cause(&vcpu->arch.cop0); - ), - - TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx", -diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c -index 4adca5abbc72d..717f883333164 100644 ---- a/arch/mips/kvm/vz.c -+++ b/arch/mips/kvm/vz.c -@@ -422,7 +422,7 @@ static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu, - */ - static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - u32 cause, compare; - - compare = kvm_read_sw_gc0_compare(cop0); -@@ -517,7 +517,7 @@ static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu, - */ - static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - u32 gctl0, compare, cause; - - gctl0 = read_c0_guestctl0(); -@@ -863,7 +863,7 @@ static unsigned long mips_process_maar(unsigned int op, unsigned long val) - - static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - - val &= MIPS_MAARI_INDEX; - if (val == MIPS_MAARI_INDEX) -@@ -876,7 +876,7 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, - u32 *opc, u32 cause, - struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - enum emulation_result er = EMULATE_DONE; - u32 rt, rd, sel; - unsigned long curr_pc; -@@ -1905,7 +1905,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - s64 *v) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - unsigned int idx; - - switch (reg->id) { -@@ -2075,7 +2075,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, - case KVM_REG_MIPS_CP0_MAARI: - if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) - return -EINVAL; -- *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); -+ *v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0); - break; - #ifdef CONFIG_64BIT - case KVM_REG_MIPS_CP0_XCONTEXT: -@@ -2129,7 +2129,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg, - s64 v) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - unsigned int idx; - int ret = 0; - unsigned int cur, change; -@@ -2556,7 +2556,7 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) - - static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - bool migrated, all; - - /* -@@ -2698,7 +2698,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - - static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - - if (current->flags & PF_VCPU) - kvm_vz_vcpu_save_wired(vcpu); -@@ -3070,7 +3070,7 @@ static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu) - - static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) - { -- struct mips_coproc *cop0 = vcpu->arch.cop0; -+ struct mips_coproc *cop0 = &vcpu->arch.cop0; - unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ - - /* -diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c -index dd819e31fcbbf..2d5a0bcb0cec1 100644 ---- a/arch/mips/lantiq/clk.c -+++ b/arch/mips/lantiq/clk.c -@@ -50,6 +50,7 @@ struct clk *clk_get_io(void) - { - return &cpu_clk_generic[2]; - } -+EXPORT_SYMBOL_GPL(clk_get_io); - - struct clk *clk_get_ppe(void) - { -@@ -158,6 +159,18 @@ void clk_deactivate(struct clk *clk) - } - EXPORT_SYMBOL(clk_deactivate); - -+struct clk *clk_get_parent(struct clk *clk) -+{ -+ return NULL; -+} -+EXPORT_SYMBOL(clk_get_parent); -+ -+int clk_set_parent(struct clk *clk, struct clk *parent) -+{ -+ return 0; -+} -+EXPORT_SYMBOL(clk_set_parent); -+ - static inline u32 get_counter_resolution(void) - { - u32 res; -diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c -index 42222f849bd25..446a2536999bf 100644 ---- a/arch/mips/lantiq/falcon/sysctrl.c -+++ b/arch/mips/lantiq/falcon/sysctrl.c -@@ -167,6 +167,8 @@ static inline void clkdev_add_sys(const char *dev, unsigned int module, - { - struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); - -+ if (!clk) -+ return; - clk->cl.dev_id = dev; - clk->cl.con_id = NULL; - clk->cl.clk = clk; -diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c -index 63dccb2ed08b2..53fcc672a2944 100644 ---- a/arch/mips/lantiq/xway/dma.c -+++ b/arch/mips/lantiq/xway/dma.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -30,6 +31,7 @@ - #define LTQ_DMA_PCTRL 0x44 - #define LTQ_DMA_IRNEN 0xf4 - -+#define DMA_ID_CHNR GENMASK(26, 20) /* channel number */ - #define DMA_DESCPT BIT(3) /* descriptor complete irq */ - #define DMA_TX BIT(8) /* TX channel direction */ - #define DMA_CHAN_ON BIT(0) /* channel on / off bit */ -@@ -39,8 +41,11 @@ - #define DMA_IRQ_ACK 0x7e /* IRQ status register */ - #define DMA_POLL BIT(31) /* turn on channel polling */ - #define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ --#define DMA_2W_BURST BIT(1) /* 2 word burst length */ --#define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */ -+#define DMA_PCTRL_2W_BURST 0x1 /* 2 word burst length */ -+#define DMA_PCTRL_4W_BURST 0x2 /* 4 word burst length */ -+#define DMA_PCTRL_8W_BURST 0x3 /* 8 word burst length */ -+#define DMA_TX_BURST_SHIFT 4 /* tx burst shift */ -+#define DMA_RX_BURST_SHIFT 2 /* rx burst shift */ - #define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */ - #define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ - -@@ -191,7 +196,8 @@ ltq_dma_init_port(int p) - break; - - case DMA_PORT_DEU: -- ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2), -+ ltq_dma_w32((DMA_PCTRL_2W_BURST << DMA_TX_BURST_SHIFT) | -+ (DMA_PCTRL_2W_BURST << DMA_RX_BURST_SHIFT), - LTQ_DMA_PCTRL); - break; - -@@ -206,7 +212,7 @@ ltq_dma_init(struct platform_device *pdev) - { - struct clk *clk; - struct resource *res; -- unsigned id; -+ unsigned int id, nchannels; - int i; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -@@ -222,21 +228,24 @@ ltq_dma_init(struct platform_device *pdev) - clk_enable(clk); - ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); - -+ usleep_range(1, 10); -+ - /* disable all interrupts */ - ltq_dma_w32(0, LTQ_DMA_IRNEN); - - /* reset/configure each channel */ -- for (i = 0; i < DMA_MAX_CHANNEL; i++) { -+ id = ltq_dma_r32(LTQ_DMA_ID); -+ nchannels = ((id & DMA_ID_CHNR) >> 20); -+ for (i = 0; i < nchannels; i++) { - ltq_dma_w32(i, LTQ_DMA_CS); - ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); - ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); - ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); - } - -- id = ltq_dma_r32(LTQ_DMA_ID); - dev_info(&pdev->dev, - "Init done - hw rev: %X, ports: %d, channels: %d\n", -- id & 0x1f, (id >> 16) & 0xf, id >> 20); -+ id & 0x1f, (id >> 16) & 0xf, nchannels); - - return 0; - } -diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c -index 3d5683e75cf1e..200fe9ff641d6 100644 ---- a/arch/mips/lantiq/xway/gptu.c -+++ b/arch/mips/lantiq/xway/gptu.c -@@ -122,6 +122,8 @@ static inline void clkdev_add_gptu(struct device *dev, const char *con, - { - struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); - -+ if (!clk) -+ return; - clk->cl.dev_id = dev_name(dev); - clk->cl.con_id = con; - clk->cl.clk = clk; -diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c -index 917fac1636b71..084f6caba5f23 100644 ---- a/arch/mips/lantiq/xway/sysctrl.c -+++ b/arch/mips/lantiq/xway/sysctrl.c -@@ -315,6 +315,8 @@ static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate, - { - struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); - -+ if (!clk) -+ return; - clk->cl.dev_id = dev; - clk->cl.con_id = con; - clk->cl.clk = clk; -@@ -338,6 +340,8 @@ static void clkdev_add_cgu(const char *dev, const char *con, - { - struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL); - -+ if (!clk) -+ return; - clk->cl.dev_id = dev; - clk->cl.con_id = con; - clk->cl.clk = clk; -@@ -356,24 +360,28 @@ static void clkdev_add_pci(void) - struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL); - - /* main pci clock */ -- clk->cl.dev_id = "17000000.pci"; -- clk->cl.con_id = NULL; -- clk->cl.clk = clk; -- clk->rate = CLOCK_33M; -- clk->rates = valid_pci_rates; -- clk->enable = pci_enable; -- clk->disable = pmu_disable; -- clk->module = 0; -- clk->bits = PMU_PCI; -- clkdev_add(&clk->cl); -+ if (clk) { -+ clk->cl.dev_id = "17000000.pci"; -+ clk->cl.con_id = NULL; -+ clk->cl.clk = clk; -+ clk->rate = CLOCK_33M; -+ clk->rates = valid_pci_rates; -+ clk->enable = pci_enable; -+ clk->disable = pmu_disable; -+ clk->module = 0; -+ clk->bits = PMU_PCI; -+ clkdev_add(&clk->cl); -+ } - - /* use internal/external bus clock */ -- clk_ext->cl.dev_id = "17000000.pci"; -- clk_ext->cl.con_id = "external"; -- clk_ext->cl.clk = clk_ext; -- clk_ext->enable = pci_ext_enable; -- clk_ext->disable = pci_ext_disable; -- clkdev_add(&clk_ext->cl); -+ if (clk_ext) { -+ clk_ext->cl.dev_id = "17000000.pci"; -+ clk_ext->cl.con_id = "external"; -+ clk_ext->cl.clk = clk_ext; -+ clk_ext->enable = pci_ext_enable; -+ clk_ext->disable = pci_ext_disable; -+ clkdev_add(&clk_ext->cl); -+ } - } - - /* xway socs can generate clocks on gpio pins */ -@@ -393,9 +401,15 @@ static void clkdev_add_clkout(void) - char *name; - - name = kzalloc(sizeof("clkout0"), GFP_KERNEL); -+ if (!name) -+ continue; - sprintf(name, "clkout%d", i); - - clk = kzalloc(sizeof(struct clk), GFP_KERNEL); -+ if (!clk) { -+ kfree(name); -+ continue; -+ } - clk->cl.dev_id = "1f103000.cgu"; - clk->cl.con_id = name; - clk->cl.clk = clk; -diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S -index a46db08071953..7767137c3e49a 100644 ---- a/arch/mips/lib/csum_partial.S -+++ b/arch/mips/lib/csum_partial.S -@@ -347,7 +347,7 @@ EXPORT_SYMBOL(csum_partial) - .if \mode == LEGACY_MODE; \ - 9: insn reg, addr; \ - .section __ex_table,"a"; \ -- PTR 9b, .L_exc; \ -+ PTR_WD 9b, .L_exc; \ - .previous; \ - /* This is enabled in EVA mode */ \ - .else; \ -@@ -356,7 +356,7 @@ EXPORT_SYMBOL(csum_partial) - ((\to == USEROP) && (type == ST_INSN)); \ - 9: __BUILD_EVA_INSN(insn##e, reg, addr); \ - .section __ex_table,"a"; \ -- PTR 9b, .L_exc; \ -+ PTR_WD 9b, .L_exc; \ - .previous; \ - .else; \ - /* EVA without exception */ \ -diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S -index 277c32296636d..18a43f2e29c81 100644 ---- a/arch/mips/lib/memcpy.S -+++ b/arch/mips/lib/memcpy.S -@@ -116,7 +116,7 @@ - .if \mode == LEGACY_MODE; \ - 9: insn reg, addr; \ - .section __ex_table,"a"; \ -- PTR 9b, handler; \ -+ PTR_WD 9b, handler; \ - .previous; \ - /* This is assembled in EVA mode */ \ - .else; \ -@@ -125,7 +125,7 @@ - ((\to == USEROP) && (type == ST_INSN)); \ - 9: __BUILD_EVA_INSN(insn##e, reg, addr); \ - .section __ex_table,"a"; \ -- PTR 9b, handler; \ -+ PTR_WD 9b, handler; \ - .previous; \ - .else; \ - /* \ -diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S -index b0baa3c79fad0..0b342bae9a98c 100644 ---- a/arch/mips/lib/memset.S -+++ b/arch/mips/lib/memset.S -@@ -52,7 +52,7 @@ - 9: ___BUILD_EVA_INSN(insn, reg, addr); \ - .endif; \ - .section __ex_table,"a"; \ -- PTR 9b, handler; \ -+ PTR_WD 9b, handler; \ - .previous - - .macro f_fill64 dst, offset, val, fixup, mode -diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S -index 556acf684d7be..13aaa9927ad12 100644 ---- a/arch/mips/lib/strncpy_user.S -+++ b/arch/mips/lib/strncpy_user.S -@@ -15,7 +15,7 @@ - #define EX(insn,reg,addr,handler) \ - 9: insn reg, addr; \ - .section __ex_table,"a"; \ -- PTR 9b, handler; \ -+ PTR_WD 9b, handler; \ - .previous - - /* -@@ -59,7 +59,7 @@ LEAF(__strncpy_from_user_asm) - jr ra - - .section __ex_table,"a" -- PTR 1b, .Lfault -+ PTR_WD 1b, .Lfault - .previous - - EXPORT_SYMBOL(__strncpy_from_user_asm) -diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S -index 92b63f20ec05f..6de31b616f9c1 100644 ---- a/arch/mips/lib/strnlen_user.S -+++ b/arch/mips/lib/strnlen_user.S -@@ -14,7 +14,7 @@ - #define EX(insn,reg,addr,handler) \ - 9: insn reg, addr; \ - .section __ex_table,"a"; \ -- PTR 9b, handler; \ -+ PTR_WD 9b, handler; \ - .previous - - /* -diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c -index 794c96c2a4cdd..311dc1580bbde 100644 ---- a/arch/mips/loongson32/common/platform.c -+++ b/arch/mips/loongson32/common/platform.c -@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) - if (plat_dat->bus_id) { - __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 | - GMAC1_USE_UART0, LS1X_MUX_CTRL0); -- switch (plat_dat->interface) { -+ switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RGMII: - val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23); - break; -@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) - break; - default: - pr_err("unsupported mii mode %d\n", -- plat_dat->interface); -+ plat_dat->phy_interface); - return -ENOTSUPP; - } - val &= ~GMAC1_SHUT; - } else { -- switch (plat_dat->interface) { -+ switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RGMII: - val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01); - break; -@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) - break; - default: - pr_err("unsupported mii mode %d\n", -- plat_dat->interface); -+ plat_dat->phy_interface); - return -ENOTSUPP; - } - val &= ~GMAC0_SHUT; -@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) - plat_dat = dev_get_platdata(&pdev->dev); - - val &= ~PHY_INTF_SELI; -- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) -+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) - val |= 0x4 << PHY_INTF_SELI_SHIFT; - __raw_writel(val, LS1X_MUX_CTRL1); - -@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = { - .bus_id = 0, - .phy_addr = -1, - #if defined(CONFIG_LOONGSON1_LS1B) -- .interface = PHY_INTERFACE_MODE_MII, -+ .phy_interface = PHY_INTERFACE_MODE_MII, - #elif defined(CONFIG_LOONGSON1_LS1C) -- .interface = PHY_INTERFACE_MODE_RMII, -+ .phy_interface = PHY_INTERFACE_MODE_RMII, - #endif - .mdio_bus_data = &ls1x_mdio_bus_data, - .dma_cfg = &ls1x_eth_dma_cfg, -@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = { - static struct plat_stmmacenet_data ls1x_eth1_pdata = { - .bus_id = 1, - .phy_addr = -1, -- .interface = PHY_INTERFACE_MODE_MII, -+ .phy_interface = PHY_INTERFACE_MODE_MII, - .mdio_bus_data = &ls1x_mdio_bus_data, - .dma_cfg = &ls1x_eth_dma_cfg, - .has_gmac = 1, -diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c -index e9de6da0ce51f..9dcfe9de55b0a 100644 ---- a/arch/mips/loongson32/ls1c/board.c -+++ b/arch/mips/loongson32/ls1c/board.c -@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = { - static int __init ls1c_platform_init(void) - { - ls1x_serial_set_uartclk(&ls1x_uart_pdev); -- ls1x_rtc_set_extclk(&ls1x_rtc_pdev); - - return platform_add_devices(ls1c_platform_devices, - ARRAY_SIZE(ls1c_platform_devices)); -diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c -index 758d5d26aaaa2..e420800043b08 100644 ---- a/arch/mips/loongson64/reset.c -+++ b/arch/mips/loongson64/reset.c -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -159,8 +160,17 @@ static int __init mips_reboot_setup(void) - - #ifdef CONFIG_KEXEC - kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); -+ if (WARN_ON(!kexec_argv)) -+ return -ENOMEM; -+ - kdump_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); -+ if (WARN_ON(!kdump_argv)) -+ return -ENOMEM; -+ - kexec_envp = kmalloc(KEXEC_ENVP_SIZE, GFP_KERNEL); -+ if (WARN_ON(!kexec_envp)) -+ return -ENOMEM; -+ - fw_arg1 = KEXEC_ARGV_ADDR; - memcpy(kexec_envp, (void *)fw_arg2, KEXEC_ENVP_SIZE); - -diff --git a/arch/mips/loongson64/vbios_quirk.c b/arch/mips/loongson64/vbios_quirk.c -index 9a29e94d3db1d..3115d4de982c5 100644 ---- a/arch/mips/loongson64/vbios_quirk.c -+++ b/arch/mips/loongson64/vbios_quirk.c -@@ -3,7 +3,7 @@ - #include - #include - --static void pci_fixup_radeon(struct pci_dev *pdev) -+static void pci_fixup_video(struct pci_dev *pdev) - { - struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; - -@@ -22,8 +22,7 @@ static void pci_fixup_radeon(struct pci_dev *pdev) - res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW | - IORESOURCE_PCI_FIXED; - -- dev_info(&pdev->dev, "BAR %d: assigned %pR for Radeon ROM\n", -- PCI_ROM_RESOURCE, res); -+ dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n", res); - } --DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, 0x9615, -- PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_radeon); -+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, 0x9615, -+ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); -diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c -index a1ced5e449511..f9b8c85e98433 100644 ---- a/arch/mips/mm/physaddr.c -+++ b/arch/mips/mm/physaddr.c -@@ -5,6 +5,7 @@ - #include - #include - -+#include - #include - #include - #include -@@ -12,15 +13,6 @@ - - static inline bool __debug_virt_addr_valid(unsigned long x) - { -- /* high_memory does not get immediately defined, and there -- * are early callers of __pa() against PAGE_OFFSET -- */ -- if (!high_memory && x >= PAGE_OFFSET) -- return true; -- -- if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) -- return true; -- - /* - * MAX_DMA_ADDRESS is a virtual address that may not correspond to an - * actual physical address. Enough code relies on -@@ -30,7 +22,9 @@ static inline bool __debug_virt_addr_valid(unsigned long x) - if (x == MAX_DMA_ADDRESS) - return true; - -- return false; -+ return x >= PAGE_OFFSET && (KSEGX(x) < KSEG2 || -+ IS_ENABLED(CONFIG_EVA) || -+ !IS_ENABLED(CONFIG_HIGHMEM)); - } - - phys_addr_t __virt_to_phys(volatile const void *x) -diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c -index 9adad24c2e65e..3471a089bc05f 100644 ---- a/arch/mips/mm/tlbex.c -+++ b/arch/mips/mm/tlbex.c -@@ -634,7 +634,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, - return; - } - -- if (cpu_has_rixi && !!_PAGE_NO_EXEC) { -+ if (cpu_has_rixi && _PAGE_NO_EXEC != 0) { - if (fill_includes_sw_bits) { - UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); - } else { -@@ -2167,16 +2167,14 @@ static void build_r4000_tlb_load_handler(void) - uasm_i_tlbr(&p); - - switch (current_cpu_type()) { -- default: -- if (cpu_has_mips_r2_exec_hazard) { -- uasm_i_ehb(&p); -- fallthrough; -- - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: -- break; -- } -+ break; -+ default: -+ if (cpu_has_mips_r2_exec_hazard) -+ uasm_i_ehb(&p); -+ break; - } - - /* Examine entrylo 0 or 1 based on ptr. */ -@@ -2243,15 +2241,14 @@ static void build_r4000_tlb_load_handler(void) - uasm_i_tlbr(&p); - - switch (current_cpu_type()) { -- default: -- if (cpu_has_mips_r2_exec_hazard) { -- uasm_i_ehb(&p); -- - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: -- break; -- } -+ break; -+ default: -+ if (cpu_has_mips_r2_exec_hazard) -+ uasm_i_ehb(&p); -+ break; - } - - /* Examine entrylo 0 or 1 based on ptr. */ -@@ -2576,7 +2573,7 @@ static void check_pabits(void) - unsigned long entry; - unsigned pabits, fillbits; - -- if (!cpu_has_rixi || !_PAGE_NO_EXEC) { -+ if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) { - /* - * We'll only be making use of the fact that we can rotate bits - * into the fill if the CPU supports RIXI, so don't bother -diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c -index 25372e62783b5..3cd1b408fa1cb 100644 ---- a/arch/mips/pic32/pic32mzda/early_console.c -+++ b/arch/mips/pic32/pic32mzda/early_console.c -@@ -27,7 +27,7 @@ - #define U_BRG(x) (UART_BASE(x) + 0x40) - - static void __iomem *uart_base; --static char console_port = -1; -+static int console_port = -1; - - static int __init configure_uart_pins(int port) - { -@@ -47,7 +47,7 @@ static int __init configure_uart_pins(int port) - return 0; - } - --static void __init configure_uart(char port, int baud) -+static void __init configure_uart(int port, int baud) - { - u32 pbclk; - -@@ -60,7 +60,7 @@ static void __init configure_uart(char port, int baud) - uart_base + PIC32_SET(U_STA(port))); - } - --static void __init setup_early_console(char port, int baud) -+static void __init setup_early_console(int port, int baud) - { - if (configure_uart_pins(port)) - return; -@@ -130,16 +130,15 @@ _out: - return baud; - } - --void __init fw_init_early_console(char port) -+void __init fw_init_early_console(void) - { - char *arch_cmdline = pic32_getcmdline(); -- int baud = -1; -+ int baud, port; - - uart_base = ioremap(PIC32_BASE_UART, 0xc00); - - baud = get_baud_from_cmdline(arch_cmdline); -- if (port == -1) -- port = get_port_from_cmdline(arch_cmdline); -+ port = get_port_from_cmdline(arch_cmdline); - - if (port == -1) - port = EARLY_CONSOLE_PORT; -diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c -index 764f2d022fae4..429830afff54f 100644 ---- a/arch/mips/pic32/pic32mzda/init.c -+++ b/arch/mips/pic32/pic32mzda/init.c -@@ -47,7 +47,7 @@ void __init plat_mem_setup(void) - strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); - - #ifdef CONFIG_EARLY_PRINTK -- fw_init_early_console(-1); -+ fw_init_early_console(); - #endif - pic32_config_init(); - } -diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c -index bdf53807d7c2b..bea857c9da8b7 100644 ---- a/arch/mips/ralink/ill_acc.c -+++ b/arch/mips/ralink/ill_acc.c -@@ -61,6 +61,7 @@ static int __init ill_acc_of_setup(void) - pdev = of_find_device_by_node(np); - if (!pdev) { - pr_err("%pOFn: failed to lookup pdev\n", np); -+ of_node_put(np); - return -EINVAL; - } - -diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c -index bd71f5b142383..0db23bcf2a970 100644 ---- a/arch/mips/ralink/mt7621.c -+++ b/arch/mips/ralink/mt7621.c -@@ -20,31 +20,42 @@ - - #include "common.h" - --static void *detect_magic __initdata = detect_memory_region; -+#define MT7621_MEM_TEST_PATTERN 0xaa5555aa -+ -+static u32 detect_magic __initdata; -+static struct ralink_soc_info *soc_info_ptr; - - phys_addr_t mips_cpc_default_phys_base(void) - { - panic("Cannot detect cpc address"); - } - -+static bool __init mt7621_addr_wraparound_test(phys_addr_t size) -+{ -+ void *dm = (void *)KSEG1ADDR(&detect_magic); -+ -+ if (CPHYSADDR(dm + size) >= MT7621_LOWMEM_MAX_SIZE) -+ return true; -+ __raw_writel(MT7621_MEM_TEST_PATTERN, dm); -+ if (__raw_readl(dm) != __raw_readl(dm + size)) -+ return false; -+ __raw_writel(~MT7621_MEM_TEST_PATTERN, dm); -+ return __raw_readl(dm) == __raw_readl(dm + size); -+} -+ - static void __init mt7621_memory_detect(void) - { -- void *dm = &detect_magic; - phys_addr_t size; - -- for (size = 32 * SZ_1M; size < 256 * SZ_1M; size <<= 1) { -- if (!__builtin_memcmp(dm, dm + size, sizeof(detect_magic))) -- break; -+ for (size = 32 * SZ_1M; size <= 256 * SZ_1M; size <<= 1) { -+ if (mt7621_addr_wraparound_test(size)) { -+ memblock_add(MT7621_LOWMEM_BASE, size); -+ return; -+ } - } - -- if ((size == 256 * SZ_1M) && -- (CPHYSADDR(dm + size) < MT7621_LOWMEM_MAX_SIZE) && -- __builtin_memcmp(dm, dm + size, sizeof(detect_magic))) { -- memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE); -- memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE); -- } else { -- memblock_add(MT7621_LOWMEM_BASE, size); -- } -+ memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE); -+ memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE); - } - - void __init ralink_of_remap(void) -@@ -56,41 +67,83 @@ void __init ralink_of_remap(void) - panic("Failed to remap core resources"); - } - --static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev) -+static unsigned int __init mt7621_get_soc_name0(void) -+{ -+ return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME0); -+} -+ -+static unsigned int __init mt7621_get_soc_name1(void) -+{ -+ return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME1); -+} -+ -+static bool __init mt7621_soc_valid(void) -+{ -+ if (mt7621_get_soc_name0() == MT7621_CHIP_NAME0 && -+ mt7621_get_soc_name1() == MT7621_CHIP_NAME1) -+ return true; -+ else -+ return false; -+} -+ -+static const char __init *mt7621_get_soc_id(void) -+{ -+ if (mt7621_soc_valid()) -+ return "MT7621"; -+ else -+ return "invalid"; -+} -+ -+static unsigned int __init mt7621_get_soc_rev(void) -+{ -+ return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_REV); -+} -+ -+static unsigned int __init mt7621_get_soc_ver(void) -+{ -+ return (mt7621_get_soc_rev() >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK; -+} -+ -+static unsigned int __init mt7621_get_soc_eco(void) -+{ -+ return (mt7621_get_soc_rev() & CHIP_REV_ECO_MASK); -+} -+ -+static const char __init *mt7621_get_soc_revision(void) -+{ -+ if (mt7621_get_soc_rev() == 1 && mt7621_get_soc_eco() == 1) -+ return "E2"; -+ else -+ return "E1"; -+} -+ -+static int __init mt7621_soc_dev_init(void) - { - struct soc_device *soc_dev; - struct soc_device_attribute *soc_dev_attr; - - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); - if (!soc_dev_attr) -- return; -+ return -ENOMEM; - - soc_dev_attr->soc_id = "mt7621"; - soc_dev_attr->family = "Ralink"; -+ soc_dev_attr->revision = mt7621_get_soc_revision(); - -- if (((rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK) == 1 && -- (rev & CHIP_REV_ECO_MASK) == 1) -- soc_dev_attr->revision = "E2"; -- else -- soc_dev_attr->revision = "E1"; -- -- soc_dev_attr->data = soc_info; -+ soc_dev_attr->data = soc_info_ptr; - - soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR(soc_dev)) { - kfree(soc_dev_attr); -- return; -+ return PTR_ERR(soc_dev); - } -+ -+ return 0; - } -+device_initcall(mt7621_soc_dev_init); - - void __init prom_soc_init(struct ralink_soc_info *soc_info) - { -- void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); -- unsigned char *name = NULL; -- u32 n0; -- u32 n1; -- u32 rev; -- - /* Early detection of CMP support */ - mips_cm_probe(); - mips_cpc_probe(); -@@ -113,27 +166,23 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info) - __sync(); - } - -- n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); -- n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); -- -- if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) { -- name = "MT7621"; -+ if (mt7621_soc_valid()) - soc_info->compatible = "mediatek,mt7621-soc"; -- } else { -- panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1); -- } -+ else -+ panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", -+ mt7621_get_soc_name0(), -+ mt7621_get_soc_name1()); - ralink_soc = MT762X_SOC_MT7621AT; -- rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); - - snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, - "MediaTek %s ver:%u eco:%u", -- name, -- (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, -- (rev & CHIP_REV_ECO_MASK)); -+ mt7621_get_soc_id(), -+ mt7621_get_soc_ver(), -+ mt7621_get_soc_eco()); - - soc_info->mem_detect = mt7621_memory_detect; - -- soc_dev_init(soc_info, rev); -+ soc_info_ptr = soc_info; - - if (!register_cps_smp_ops()) - return; -diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c -index 04684990e28ef..b7f6f782d9a13 100644 ---- a/arch/mips/rb532/devices.c -+++ b/arch/mips/rb532/devices.c -@@ -301,11 +301,9 @@ static int __init plat_setup_devices(void) - static int __init setup_kmac(char *s) - { - printk(KERN_INFO "korina mac = %s\n", s); -- if (!mac_pton(s, korina_dev0_data.mac)) { -+ if (!mac_pton(s, korina_dev0_data.mac)) - printk(KERN_ERR "Invalid mac\n"); -- return -EINVAL; -- } -- return 0; -+ return 1; - } - - __setup("kmac=", setup_kmac); -diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c -index 000ede156bdc0..5143d1cf8984c 100644 ---- a/arch/mips/sgi-ip27/ip27-xtalk.c -+++ b/arch/mips/sgi-ip27/ip27-xtalk.c -@@ -27,15 +27,18 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid) - { - struct xtalk_bridge_platform_data *bd; - struct sgi_w1_platform_data *wd; -- struct platform_device *pdev; -+ struct platform_device *pdev_wd; -+ struct platform_device *pdev_bd; - struct resource w1_res; - unsigned long offset; - - offset = NODE_OFFSET(nasid); - - wd = kzalloc(sizeof(*wd), GFP_KERNEL); -- if (!wd) -- goto no_mem; -+ if (!wd) { -+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); -+ return; -+ } - - snprintf(wd->dev_id, sizeof(wd->dev_id), "bridge-%012lx", - offset + (widget << SWIN_SIZE_BITS)); -@@ -46,22 +49,35 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid) - w1_res.end = w1_res.start + 3; - w1_res.flags = IORESOURCE_MEM; - -- pdev = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO); -- if (!pdev) { -- kfree(wd); -- goto no_mem; -+ pdev_wd = platform_device_alloc("sgi_w1", PLATFORM_DEVID_AUTO); -+ if (!pdev_wd) { -+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); -+ goto err_kfree_wd; -+ } -+ if (platform_device_add_resources(pdev_wd, &w1_res, 1)) { -+ pr_warn("xtalk:n%d/%x bridge failed to add platform resources.\n", nasid, widget); -+ goto err_put_pdev_wd; -+ } -+ if (platform_device_add_data(pdev_wd, wd, sizeof(*wd))) { -+ pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget); -+ goto err_put_pdev_wd; -+ } -+ if (platform_device_add(pdev_wd)) { -+ pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget); -+ goto err_put_pdev_wd; - } -- platform_device_add_resources(pdev, &w1_res, 1); -- platform_device_add_data(pdev, wd, sizeof(*wd)); -- platform_device_add(pdev); -+ /* platform_device_add_data() duplicates the data */ -+ kfree(wd); - - bd = kzalloc(sizeof(*bd), GFP_KERNEL); -- if (!bd) -- goto no_mem; -- pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO); -- if (!pdev) { -- kfree(bd); -- goto no_mem; -+ if (!bd) { -+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); -+ goto err_unregister_pdev_wd; -+ } -+ pdev_bd = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO); -+ if (!pdev_bd) { -+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); -+ goto err_kfree_bd; - } - - -@@ -82,13 +98,31 @@ static void bridge_platform_create(nasid_t nasid, int widget, int masterwid) - bd->io.flags = IORESOURCE_IO; - bd->io_offset = offset; - -- platform_device_add_data(pdev, bd, sizeof(*bd)); -- platform_device_add(pdev); -+ if (platform_device_add_data(pdev_bd, bd, sizeof(*bd))) { -+ pr_warn("xtalk:n%d/%x bridge failed to add platform data.\n", nasid, widget); -+ goto err_put_pdev_bd; -+ } -+ if (platform_device_add(pdev_bd)) { -+ pr_warn("xtalk:n%d/%x bridge failed to add platform device.\n", nasid, widget); -+ goto err_put_pdev_bd; -+ } -+ /* platform_device_add_data() duplicates the data */ -+ kfree(bd); - pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget); - return; - --no_mem: -- pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget); -+err_put_pdev_bd: -+ platform_device_put(pdev_bd); -+err_kfree_bd: -+ kfree(bd); -+err_unregister_pdev_wd: -+ platform_device_unregister(pdev_wd); -+ return; -+err_put_pdev_wd: -+ platform_device_put(pdev_wd); -+err_kfree_wd: -+ kfree(wd); -+ return; - } - - static int probe_one_port(nasid_t nasid, int widget, int masterwid) -diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c -index 240bb68ec2478..ff3ba7e778901 100644 ---- a/arch/mips/sni/time.c -+++ b/arch/mips/sni/time.c -@@ -18,14 +18,14 @@ static int a20r_set_periodic(struct clock_event_device *evt) - { - *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34; - wmb(); -- *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV; -+ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV & 0xff; - wmb(); - *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV >> 8; - wmb(); - - *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4; - wmb(); -- *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV; -+ *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV & 0xff; - wmb(); - *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8; - wmb(); -diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c -index 7b7f25b4b057e..9240bcdbe74e4 100644 ---- a/arch/mips/vr41xx/common/icu.c -+++ b/arch/mips/vr41xx/common/icu.c -@@ -640,8 +640,6 @@ static int icu_get_irq(unsigned int irq) - - printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2); - -- atomic_inc(&irq_err_count); -- - return -1; - } - -diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h -index d4cbf069dc224..37a40981deb3b 100644 ---- a/arch/nds32/include/asm/uaccess.h -+++ b/arch/nds32/include/asm/uaccess.h -@@ -70,9 +70,7 @@ static inline void set_fs(mm_segment_t fs) - * versions are void (ie, don't return a value as such). - */ - --#define get_user __get_user \ -- --#define __get_user(x, ptr) \ -+#define get_user(x, ptr) \ - ({ \ - long __gu_err = 0; \ - __get_user_check((x), (ptr), __gu_err); \ -@@ -85,6 +83,14 @@ static inline void set_fs(mm_segment_t fs) - (void)0; \ - }) - -+#define __get_user(x, ptr) \ -+({ \ -+ long __gu_err = 0; \ -+ const __typeof__(*(ptr)) __user *__p = (ptr); \ -+ __get_user_err((x), __p, (__gu_err)); \ -+ __gu_err; \ -+}) -+ - #define __get_user_check(x, ptr, err) \ - ({ \ - const __typeof__(*(ptr)) __user *__p = (ptr); \ -@@ -165,12 +171,18 @@ do { \ - : "r"(addr), "i"(-EFAULT) \ - : "cc") - --#define put_user __put_user \ -+#define put_user(x, ptr) \ -+({ \ -+ long __pu_err = 0; \ -+ __put_user_check((x), (ptr), __pu_err); \ -+ __pu_err; \ -+}) - - #define __put_user(x, ptr) \ - ({ \ - long __pu_err = 0; \ -- __put_user_err((x), (ptr), __pu_err); \ -+ __typeof__(*(ptr)) __user *__p = (ptr); \ -+ __put_user_err((x), __p, __pu_err); \ - __pu_err; \ - }) - -diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c -index 9edd7ed7d7bf8..701c09a668de4 100644 ---- a/arch/nds32/kernel/fpu.c -+++ b/arch/nds32/kernel/fpu.c -@@ -223,7 +223,7 @@ inline void handle_fpu_exception(struct pt_regs *regs) - } - } else if (fpcsr & FPCSR_mskRIT) { - if (!user_mode(regs)) -- do_exit(SIGILL); -+ make_task_dead(SIGILL); - si_signo = SIGILL; - } - -diff --git a/arch/nds32/kernel/perf_event_cpu.c b/arch/nds32/kernel/perf_event_cpu.c -index 0ce6f9f307e6a..f387919607813 100644 ---- a/arch/nds32/kernel/perf_event_cpu.c -+++ b/arch/nds32/kernel/perf_event_cpu.c -@@ -1363,6 +1363,7 @@ void - perf_callchain_user(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - unsigned long fp = 0; - unsigned long gp = 0; - unsigned long lp = 0; -@@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, - - leaf_fp = 0; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ if (guest_cbs && guest_cbs->is_in_guest()) { - /* We don't support guest os callchain now */ - return; - } -@@ -1479,9 +1480,10 @@ void - perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - struct stackframe fr; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ if (guest_cbs && guest_cbs->is_in_guest()) { - /* We don't support guest os callchain now */ - return; - } -@@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, - - unsigned long perf_instruction_pointer(struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); -+ - /* However, NDS32 does not support virtualization */ -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) -- return perf_guest_cbs->get_guest_ip(); -+ if (guest_cbs && guest_cbs->is_in_guest()) -+ return guest_cbs->get_guest_ip(); - - return instruction_pointer(regs); - } - - unsigned long perf_misc_flags(struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - int misc = 0; - - /* However, NDS32 does not support virtualization */ -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -- if (perf_guest_cbs->is_user_mode()) -+ if (guest_cbs && guest_cbs->is_in_guest()) { -+ if (guest_cbs->is_user_mode()) - misc |= PERF_RECORD_MISC_GUEST_USER; - else - misc |= PERF_RECORD_MISC_GUEST_KERNEL; -diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c -index f06421c645aff..b90030e8e546f 100644 ---- a/arch/nds32/kernel/traps.c -+++ b/arch/nds32/kernel/traps.c -@@ -141,7 +141,7 @@ void die(const char *str, struct pt_regs *regs, int err) - - bust_spinlocks(0); - spin_unlock_irq(&die_lock); -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - EXPORT_SYMBOL(die); -@@ -240,7 +240,7 @@ void unhandled_interruption(struct pt_regs *regs) - pr_emerg("unhandled_interruption\n"); - show_regs(regs); - if (!user_mode(regs)) -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - force_sig(SIGKILL); - } - -@@ -251,7 +251,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr, - addr, type); - show_regs(regs); - if (!user_mode(regs)) -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - force_sig(SIGKILL); - } - -@@ -278,7 +278,7 @@ void do_revinsn(struct pt_regs *regs) - pr_emerg("Reserved Instruction\n"); - show_regs(regs); - if (!user_mode(regs)) -- do_exit(SIGILL); -+ make_task_dead(SIGILL); - force_sig(SIGILL); - } - -diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile -index 37dfc7e584bce..0b704c1f379f5 100644 ---- a/arch/nios2/boot/Makefile -+++ b/arch/nios2/boot/Makefile -@@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE - $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE - $(call if_changed,gzip) - --$(obj)/vmImage: $(obj)/vmlinux.gz -+$(obj)/vmImage: $(obj)/vmlinux.gz FORCE - $(call if_changed,uimage) - @$(kecho) 'Kernel: $@ is ready' - -diff --git a/arch/nios2/boot/dts/10m50_devboard.dts b/arch/nios2/boot/dts/10m50_devboard.dts -index 56339bef3247d..0e7e5b0dd685c 100644 ---- a/arch/nios2/boot/dts/10m50_devboard.dts -+++ b/arch/nios2/boot/dts/10m50_devboard.dts -@@ -97,7 +97,7 @@ - rx-fifo-depth = <8192>; - tx-fifo-depth = <8192>; - address-bits = <48>; -- max-frame-size = <1518>; -+ max-frame-size = <1500>; - local-mac-address = [00 00 00 00 00 00]; - altr,has-supplementary-unicast; - altr,enable-sup-addr = <1>; -diff --git a/arch/nios2/boot/dts/3c120_devboard.dts b/arch/nios2/boot/dts/3c120_devboard.dts -index d10fb81686c7e..3ee3169063797 100644 ---- a/arch/nios2/boot/dts/3c120_devboard.dts -+++ b/arch/nios2/boot/dts/3c120_devboard.dts -@@ -106,7 +106,7 @@ - interrupt-names = "rx_irq", "tx_irq"; - rx-fifo-depth = <8192>; - tx-fifo-depth = <8192>; -- max-frame-size = <1518>; -+ max-frame-size = <1500>; - local-mac-address = [ 00 00 00 00 00 00 ]; - phy-mode = "rgmii-id"; - phy-handle = <&phy0>; -diff --git a/arch/nios2/include/asm/entry.h b/arch/nios2/include/asm/entry.h -index cf37f55efbc22..bafb7b2ca59fc 100644 ---- a/arch/nios2/include/asm/entry.h -+++ b/arch/nios2/include/asm/entry.h -@@ -50,7 +50,8 @@ - stw r13, PT_R13(sp) - stw r14, PT_R14(sp) - stw r15, PT_R15(sp) -- stw r2, PT_ORIG_R2(sp) -+ movi r24, -1 -+ stw r24, PT_ORIG_R2(sp) - stw r7, PT_ORIG_R7(sp) - - stw ra, PT_RA(sp) -diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h -index 6424621448728..9da34c3022a27 100644 ---- a/arch/nios2/include/asm/ptrace.h -+++ b/arch/nios2/include/asm/ptrace.h -@@ -74,6 +74,8 @@ extern void show_regs(struct pt_regs *); - ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\ - - 1) - -+#define force_successful_syscall_return() (current_pt_regs()->orig_r2 = -1) -+ - int do_syscall_trace_enter(void); - void do_syscall_trace_exit(void); - #endif /* __ASSEMBLY__ */ -diff --git a/arch/nios2/include/asm/timex.h b/arch/nios2/include/asm/timex.h -index a769f871b28d9..40a1adc9bd03e 100644 ---- a/arch/nios2/include/asm/timex.h -+++ b/arch/nios2/include/asm/timex.h -@@ -8,5 +8,8 @@ - typedef unsigned long cycles_t; - - extern cycles_t get_cycles(void); -+#define get_cycles get_cycles -+ -+#define random_get_entropy() (((unsigned long)get_cycles()) ?: random_get_entropy_fallback()) - - #endif -diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h -index ba9340e96fd4c..ca9285a915efa 100644 ---- a/arch/nios2/include/asm/uaccess.h -+++ b/arch/nios2/include/asm/uaccess.h -@@ -88,6 +88,7 @@ extern __must_check long strnlen_user(const char __user *s, long n); - /* Optimized macros */ - #define __get_user_asm(val, insn, addr, err) \ - { \ -+ unsigned long __gu_val; \ - __asm__ __volatile__( \ - " movi %0, %3\n" \ - "1: " insn " %1, 0(%2)\n" \ -@@ -96,14 +97,20 @@ extern __must_check long strnlen_user(const char __user *s, long n); - " .section __ex_table,\"a\"\n" \ - " .word 1b, 2b\n" \ - " .previous" \ -- : "=&r" (err), "=r" (val) \ -+ : "=&r" (err), "=r" (__gu_val) \ - : "r" (addr), "i" (-EFAULT)); \ -+ val = (__force __typeof__(*(addr)))__gu_val; \ - } - --#define __get_user_unknown(val, size, ptr, err) do { \ -+extern void __get_user_unknown(void); -+ -+#define __get_user_8(val, ptr, err) do { \ -+ u64 __val = 0; \ - err = 0; \ -- if (__copy_from_user(&(val), ptr, size)) { \ -+ if (raw_copy_from_user(&(__val), ptr, sizeof(val))) { \ - err = -EFAULT; \ -+ } else { \ -+ val = (typeof(val))(typeof((val) - (val)))__val; \ - } \ - } while (0) - -@@ -119,8 +126,11 @@ do { \ - case 4: \ - __get_user_asm(val, "ldw", ptr, err); \ - break; \ -+ case 8: \ -+ __get_user_8(val, ptr, err); \ -+ break; \ - default: \ -- __get_user_unknown(val, size, ptr, err); \ -+ __get_user_unknown(); \ - break; \ - } \ - } while (0) -@@ -129,9 +139,7 @@ do { \ - ({ \ - long __gu_err = -EFAULT; \ - const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ -- unsigned long __gu_val = 0; \ -- __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\ -- (x) = (__force __typeof__(x))__gu_val; \ -+ __get_user_common(x, sizeof(*(ptr)), __gu_ptr, __gu_err); \ - __gu_err; \ - }) - -@@ -139,11 +147,9 @@ do { \ - ({ \ - long __gu_err = -EFAULT; \ - const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ -- unsigned long __gu_val = 0; \ - if (access_ok( __gu_ptr, sizeof(*__gu_ptr))) \ -- __get_user_common(__gu_val, sizeof(*__gu_ptr), \ -+ __get_user_common(x, sizeof(*__gu_ptr), \ - __gu_ptr, __gu_err); \ -- (x) = (__force __typeof__(x))__gu_val; \ - __gu_err; \ - }) - -diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S -index 0794cd7803dfe..99f0a65e62347 100644 ---- a/arch/nios2/kernel/entry.S -+++ b/arch/nios2/kernel/entry.S -@@ -185,6 +185,7 @@ ENTRY(handle_system_call) - ldw r5, PT_R5(sp) - - local_restart: -+ stw r2, PT_ORIG_R2(sp) - /* Check that the requested system call is within limits */ - movui r1, __NR_syscalls - bgeu r2, r1, ret_invsyscall -@@ -192,7 +193,6 @@ local_restart: - movhi r11, %hiadj(sys_call_table) - add r1, r1, r11 - ldw r1, %lo(sys_call_table)(r1) -- beq r1, r0, ret_invsyscall - - /* Check if we are being traced */ - GET_THREAD_INFO r11 -@@ -213,6 +213,9 @@ local_restart: - translate_rc_and_ret: - movi r1, 0 - bge r2, zero, 3f -+ ldw r1, PT_ORIG_R2(sp) -+ addi r1, r1, 1 -+ beq r1, zero, 3f - sub r2, zero, r2 - movi r1, 1 - 3: -@@ -255,9 +258,9 @@ traced_system_call: - ldw r6, PT_R6(sp) - ldw r7, PT_R7(sp) - -- /* Fetch the syscall function, we don't need to check the boundaries -- * since this is already done. -- */ -+ /* Fetch the syscall function. */ -+ movui r1, __NR_syscalls -+ bgeu r2, r1, traced_invsyscall - slli r1, r2, 2 - movhi r11,%hiadj(sys_call_table) - add r1, r1, r11 -@@ -276,6 +279,9 @@ traced_system_call: - translate_rc_and_ret2: - movi r1, 0 - bge r2, zero, 4f -+ ldw r1, PT_ORIG_R2(sp) -+ addi r1, r1, 1 -+ beq r1, zero, 4f - sub r2, zero, r2 - movi r1, 1 - 4: -@@ -287,6 +293,11 @@ end_translate_rc_and_ret2: - RESTORE_SWITCH_STACK - br ret_from_exception - -+ /* If the syscall number was invalid return ENOSYS */ -+traced_invsyscall: -+ movi r2, -ENOSYS -+ br translate_rc_and_ret2 -+ - Luser_return: - GET_THREAD_INFO r11 /* get thread_info pointer */ - ldw r10, TI_FLAGS(r11) /* get thread_info->flags */ -@@ -336,9 +347,6 @@ external_interrupt: - /* skip if no interrupt is pending */ - beq r12, r0, ret_from_interrupt - -- movi r24, -1 -- stw r24, PT_ORIG_R2(sp) -- - /* - * Process an external hardware interrupt. - */ -diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c -index 2009ae2d3c3bb..68d626c4f1ba7 100644 ---- a/arch/nios2/kernel/signal.c -+++ b/arch/nios2/kernel/signal.c -@@ -36,10 +36,10 @@ struct rt_sigframe { - - static inline int rt_restore_ucontext(struct pt_regs *regs, - struct switch_stack *sw, -- struct ucontext *uc, int *pr2) -+ struct ucontext __user *uc, int *pr2) - { - int temp; -- unsigned long *gregs = uc->uc_mcontext.gregs; -+ unsigned long __user *gregs = uc->uc_mcontext.gregs; - int err; - - /* Always make any pending restarted system calls return -EINTR */ -@@ -102,10 +102,11 @@ asmlinkage int do_rt_sigreturn(struct switch_stack *sw) - { - struct pt_regs *regs = (struct pt_regs *)(sw + 1); - /* Verify, can we follow the stack back */ -- struct rt_sigframe *frame = (struct rt_sigframe *) regs->sp; -+ struct rt_sigframe __user *frame; - sigset_t set; - int rval; - -+ frame = (struct rt_sigframe __user *) regs->sp; - if (!access_ok(frame, sizeof(*frame))) - goto badframe; - -@@ -124,10 +125,10 @@ badframe: - return 0; - } - --static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) -+static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) - { - struct switch_stack *sw = (struct switch_stack *)regs - 1; -- unsigned long *gregs = uc->uc_mcontext.gregs; -+ unsigned long __user *gregs = uc->uc_mcontext.gregs; - int err = 0; - - err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); -@@ -162,8 +163,9 @@ static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) - return err; - } - --static inline void *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, -- size_t frame_size) -+static inline void __user *get_sigframe(struct ksignal *ksig, -+ struct pt_regs *regs, -+ size_t frame_size) - { - unsigned long usp; - -@@ -174,13 +176,13 @@ static inline void *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, - usp = sigsp(usp, ksig); - - /* Verify, is it 32 or 64 bit aligned */ -- return (void *)((usp - frame_size) & -8UL); -+ return (void __user *)((usp - frame_size) & -8UL); - } - - static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, - struct pt_regs *regs) - { -- struct rt_sigframe *frame; -+ struct rt_sigframe __user *frame; - int err = 0; - - frame = get_sigframe(ksig, regs, sizeof(*frame)); -@@ -240,7 +242,7 @@ static int do_signal(struct pt_regs *regs) - /* - * If we were from a system call, check for system call restarting... - */ -- if (regs->orig_r2 >= 0) { -+ if (regs->orig_r2 >= 0 && regs->r1) { - continue_addr = regs->ea; - restart_addr = continue_addr - 4; - retval = regs->r2; -@@ -262,6 +264,7 @@ static int do_signal(struct pt_regs *regs) - regs->ea = restart_addr; - break; - } -+ regs->orig_r2 = -1; - } - - if (get_signal(&ksig)) { -diff --git a/arch/nios2/kernel/syscall_table.c b/arch/nios2/kernel/syscall_table.c -index 6176d63023c1d..c2875a6dd5a4a 100644 ---- a/arch/nios2/kernel/syscall_table.c -+++ b/arch/nios2/kernel/syscall_table.c -@@ -13,5 +13,6 @@ - #define __SYSCALL(nr, call) [nr] = (call), - - void *sys_call_table[__NR_syscalls] = { -+ [0 ... __NR_syscalls-1] = sys_ni_syscall, - #include - }; -diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c -index 596986a74a26d..85ac49d64cf73 100644 ---- a/arch/nios2/kernel/traps.c -+++ b/arch/nios2/kernel/traps.c -@@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err) - show_regs(regs); - spin_unlock_irq(&die_lock); - /* -- * do_exit() should take care of panic'ing from an interrupt -+ * make_task_dead() should take care of panic'ing from an interrupt - * context so we don't handle it here - */ -- do_exit(err); -+ make_task_dead(err); - } - - void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr) -diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h -index c298061c70a7e..8aa3e78181e9a 100644 ---- a/arch/openrisc/include/asm/io.h -+++ b/arch/openrisc/include/asm/io.h -@@ -31,7 +31,7 @@ - void __iomem *ioremap(phys_addr_t offset, unsigned long size); - - #define iounmap iounmap --extern void iounmap(void __iomem *addr); -+extern void iounmap(volatile void __iomem *addr); - - #include - -diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h -index 3a7eeae6f56a8..aa1c7e98722e3 100644 ---- a/arch/openrisc/include/asm/syscalls.h -+++ b/arch/openrisc/include/asm/syscalls.h -@@ -22,9 +22,11 @@ asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1, - - asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp, - void __user *parent_tid, void __user *child_tid, int tls); -+asmlinkage long __sys_clone3(struct clone_args __user *uargs, size_t size); - asmlinkage long __sys_fork(void); - - #define sys_clone __sys_clone -+#define sys_clone3 __sys_clone3 - #define sys_fork __sys_fork - - #endif /* __ASM_OPENRISC_SYSCALLS_H */ -diff --git a/arch/openrisc/include/asm/timex.h b/arch/openrisc/include/asm/timex.h -index d52b4e536e3f9..5487fa93dd9be 100644 ---- a/arch/openrisc/include/asm/timex.h -+++ b/arch/openrisc/include/asm/timex.h -@@ -23,6 +23,7 @@ static inline cycles_t get_cycles(void) - { - return mfspr(SPR_TTCR); - } -+#define get_cycles get_cycles - - /* This isn't really used any more */ - #define CLOCK_TICK_RATE 1000 -diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c -index 1b16d97e7da7f..a82b2caaa560d 100644 ---- a/arch/openrisc/kernel/dma.c -+++ b/arch/openrisc/kernel/dma.c -@@ -33,7 +33,7 @@ page_set_nocache(pte_t *pte, unsigned long addr, - * Flush the page out of the TLB so that the new page flags get - * picked up next time there's an access - */ -- flush_tlb_page(NULL, addr); -+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE); - - /* Flush page out of dcache */ - for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) -@@ -56,7 +56,7 @@ page_clear_nocache(pte_t *pte, unsigned long addr, - * Flush the page out of the TLB so that the new page flags get - * picked up next time there's an access - */ -- flush_tlb_page(NULL, addr); -+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE); - - return 0; - } -diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S -index edaa775a648e6..d32906e89aafd 100644 ---- a/arch/openrisc/kernel/entry.S -+++ b/arch/openrisc/kernel/entry.S -@@ -173,7 +173,6 @@ handler: ;\ - l.sw PT_GPR28(r1),r28 ;\ - l.sw PT_GPR29(r1),r29 ;\ - /* r30 already save */ ;\ --/* l.sw PT_GPR30(r1),r30*/ ;\ - l.sw PT_GPR31(r1),r31 ;\ - TRACE_IRQS_OFF_ENTRY ;\ - /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ -@@ -211,9 +210,8 @@ handler: ;\ - l.sw PT_GPR27(r1),r27 ;\ - l.sw PT_GPR28(r1),r28 ;\ - l.sw PT_GPR29(r1),r29 ;\ -- /* r31 already saved */ ;\ -- l.sw PT_GPR30(r1),r30 ;\ --/* l.sw PT_GPR31(r1),r31 */ ;\ -+ /* r30 already saved */ ;\ -+ l.sw PT_GPR31(r1),r31 ;\ - /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ - l.addi r30,r0,-1 ;\ - l.sw PT_ORIG_GPR11(r1),r30 ;\ -@@ -1170,6 +1168,11 @@ ENTRY(__sys_clone) - l.j _fork_save_extra_regs_and_call - l.nop - -+ENTRY(__sys_clone3) -+ l.movhi r29,hi(sys_clone3) -+ l.j _fork_save_extra_regs_and_call -+ l.ori r29,r29,lo(sys_clone3) -+ - ENTRY(__sys_fork) - l.movhi r29,hi(sys_fork) - l.ori r29,r29,lo(sys_fork) -diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S -index 15f1b38dfe03b..871f4c8588595 100644 ---- a/arch/openrisc/kernel/head.S -+++ b/arch/openrisc/kernel/head.S -@@ -521,6 +521,15 @@ _start: - l.ori r3,r0,0x1 - l.mtspr r0,r3,SPR_SR - -+ /* -+ * Start the TTCR as early as possible, so that the RNG can make use of -+ * measurements of boot time from the earliest opportunity. Especially -+ * important is that the TTCR does not return zero by the time we reach -+ * rand_initialize(). -+ */ -+ l.movhi r3,hi(SPR_TTMR_CR) -+ l.mtspr r0,r3,SPR_TTMR -+ - CLEAR_GPR(r1) - CLEAR_GPR(r2) - CLEAR_GPR(r3) -diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c -index 415e209732a3d..ba78766cf00b5 100644 ---- a/arch/openrisc/kernel/smp.c -+++ b/arch/openrisc/kernel/smp.c -@@ -272,7 +272,7 @@ static inline void ipi_flush_tlb_range(void *info) - local_flush_tlb_range(NULL, fd->addr1, fd->addr2); - } - --static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start, -+static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start, - unsigned long end) - { - unsigned int cpuid; -@@ -320,7 +320,9 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) - void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) - { -- smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end); -+ const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm) -+ : cpu_online_mask; -+ smp_flush_tlb_range(cmask, start, end); - } - - /* Instruction cache invalidate - performed on each cpu */ -diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c -index aa1e709405acd..9df1d85bfe1d1 100644 ---- a/arch/openrisc/kernel/traps.c -+++ b/arch/openrisc/kernel/traps.c -@@ -212,7 +212,7 @@ void die(const char *str, struct pt_regs *regs, long err) - __asm__ __volatile__("l.nop 1"); - do {} while (1); - #endif -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - /* This is normally the 'Oops' routine */ -diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c -index daae13a76743b..8ec0dafecf257 100644 ---- a/arch/openrisc/mm/ioremap.c -+++ b/arch/openrisc/mm/ioremap.c -@@ -77,7 +77,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size) - } - EXPORT_SYMBOL(ioremap); - --void iounmap(void __iomem *addr) -+void iounmap(volatile void __iomem *addr) - { - /* If the page is from the fixmap pool then we just clear out - * the fixmap mapping. -diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig -index 27a8b49af11fc..117b0f882750a 100644 ---- a/arch/parisc/Kconfig -+++ b/arch/parisc/Kconfig -@@ -9,6 +9,7 @@ config PARISC - select ARCH_WANT_FRAME_POINTERS - select ARCH_HAS_ELF_RANDOMIZE - select ARCH_HAS_STRICT_KERNEL_RWX -+ select ARCH_HAS_STRICT_MODULE_RWX - select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_NO_SG_CHAIN - select ARCH_SUPPORTS_HUGETLBFS if PA20 -@@ -141,10 +142,10 @@ menu "Processor type and features" - - choice - prompt "Processor type" -- default PA7000 -+ default PA7000 if "$(ARCH)" = "parisc" - - config PA7000 -- bool "PA7000/PA7100" -+ bool "PA7000/PA7100" if "$(ARCH)" = "parisc" - help - This is the processor type of your CPU. This information is - used for optimizing purposes. In order to compile a kernel -@@ -155,21 +156,21 @@ config PA7000 - which is required on some machines. - - config PA7100LC -- bool "PA7100LC" -+ bool "PA7100LC" if "$(ARCH)" = "parisc" - help - Select this option for the PCX-L processor, as used in the - 712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748, - D200, D210, D300, D310 and E-class - - config PA7200 -- bool "PA7200" -+ bool "PA7200" if "$(ARCH)" = "parisc" - help - Select this option for the PCX-T' processor, as used in the - C100, C110, J100, J110, J210XC, D250, D260, D350, D360, - K100, K200, K210, K220, K400, K410 and K420 - - config PA7300LC -- bool "PA7300LC" -+ bool "PA7300LC" if "$(ARCH)" = "parisc" - help - Select this option for the PCX-L2 processor, as used in the - 744, A180, B132L, B160L, B180L, C132L, C160L, C180L, -@@ -219,7 +220,8 @@ config MLONGCALLS - Enabling this option will probably slow down your kernel. - - config 64BIT -- bool "64-bit kernel" -+ def_bool y if "$(ARCH)" = "parisc64" -+ bool "64-bit kernel" if "$(ARCH)" = "parisc" - depends on PA8X00 - help - Enable this if you want to support 64bit kernel on PA-RISC platform. -diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile -index fcde3ffa02213..fadb098de1545 100644 ---- a/arch/parisc/Makefile -+++ b/arch/parisc/Makefile -@@ -17,7 +17,12 @@ - # Mike Shaver, Helge Deller and Martin K. Petersen - # - -+ifdef CONFIG_PARISC_SELF_EXTRACT -+boot := arch/parisc/boot -+KBUILD_IMAGE := $(boot)/bzImage -+else - KBUILD_IMAGE := vmlinuz -+endif - - NM = sh $(srctree)/arch/parisc/nm - CHECKFLAGS += -D__hppa__=1 -diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h -index a39250cb7dfcf..fd8c1ebd27470 100644 ---- a/arch/parisc/include/asm/assembly.h -+++ b/arch/parisc/include/asm/assembly.h -@@ -72,10 +72,6 @@ - - #include - -- sp = 30 -- gp = 27 -- ipsw = 22 -- - /* - * We provide two versions of each macro to convert from physical - * to virtual and vice versa. The "_r1" versions take one argument -diff --git a/arch/parisc/include/asm/bugs.h b/arch/parisc/include/asm/bugs.h -deleted file mode 100644 -index 0a7f9db6bd1c7..0000000000000 ---- a/arch/parisc/include/asm/bugs.h -+++ /dev/null -@@ -1,20 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 */ --/* -- * include/asm-parisc/bugs.h -- * -- * Copyright (C) 1999 Mike Shaver -- */ -- --/* -- * This is included by init/main.c to check for architecture-dependent bugs. -- * -- * Needs: -- * void check_bugs(void); -- */ -- --#include -- --static inline void check_bugs(void) --{ --// identify_cpu(&boot_cpu_data); --} -diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h -index eef0096db5f88..2f4c45f60ae1e 100644 ---- a/arch/parisc/include/asm/cacheflush.h -+++ b/arch/parisc/include/asm/cacheflush.h -@@ -53,6 +53,11 @@ extern void flush_dcache_page(struct page *page); - - #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) - #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) -+#define flush_dcache_mmap_lock_irqsave(mapping, flags) \ -+ xa_lock_irqsave(&mapping->i_pages, flags) -+#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \ -+ xa_unlock_irqrestore(&mapping->i_pages, flags) -+ - - #define flush_icache_page(vma,page) do { \ - flush_kernel_dcache_page_addr(page_address(page)); \ -diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h -index c4cd6360f9964..55d29c4f716e6 100644 ---- a/arch/parisc/include/asm/fb.h -+++ b/arch/parisc/include/asm/fb.h -@@ -12,9 +12,13 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; - } - -+#if defined(CONFIG_FB_STI) -+int fb_is_primary_device(struct fb_info *info); -+#else - static inline int fb_is_primary_device(struct fb_info *info) - { - return 0; - } -+#endif - - #endif /* _ASM_FB_H_ */ -diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h -index fceb9cf02fb3a..71aa0921d6c72 100644 ---- a/arch/parisc/include/asm/futex.h -+++ b/arch/parisc/include/asm/futex.h -@@ -16,7 +16,7 @@ static inline void - _futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags) - { - extern u32 lws_lock_start[]; -- long index = ((long)uaddr & 0x3f8) >> 1; -+ long index = ((long)uaddr & 0x7f8) >> 1; - arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; - local_irq_save(*flags); - arch_spin_lock(s); -@@ -26,7 +26,7 @@ static inline void - _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags) - { - extern u32 lws_lock_start[]; -- long index = ((long)uaddr & 0x3f8) >> 1; -+ long index = ((long)uaddr & 0x7f8) >> 1; - arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; - arch_spin_unlock(s); - local_irq_restore(*flags); -diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h -index 9d3d7737c58b1..a005ebc547793 100644 ---- a/arch/parisc/include/asm/hardware.h -+++ b/arch/parisc/include/asm/hardware.h -@@ -10,12 +10,12 @@ - #define SVERSION_ANY_ID PA_SVERSION_ANY_ID - - struct hp_hardware { -- unsigned short hw_type:5; /* HPHW_xxx */ -- unsigned short hversion; -- unsigned long sversion:28; -- unsigned short opt; -- const char name[80]; /* The hardware description */ --}; -+ unsigned int hw_type:8; /* HPHW_xxx */ -+ unsigned int hversion:12; -+ unsigned int sversion:12; -+ unsigned char opt; -+ unsigned char name[59]; /* The hardware description */ -+} __packed; - - struct parisc_device; - -diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h -index 6de13d08a3886..b70b9094fb7cd 100644 ---- a/arch/parisc/include/asm/led.h -+++ b/arch/parisc/include/asm/led.h -@@ -11,8 +11,8 @@ - #define LED1 0x02 - #define LED0 0x01 /* bottom (or furthest left) LED */ - --#define LED_LAN_TX LED0 /* for LAN transmit activity */ --#define LED_LAN_RCV LED1 /* for LAN receive activity */ -+#define LED_LAN_RCV LED0 /* for LAN receive activity */ -+#define LED_LAN_TX LED1 /* for LAN transmit activity */ - #define LED_DISK_IO LED2 /* for disk activity */ - #define LED_HEARTBEAT LED3 /* heartbeat */ - -diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h -index b388d81765883..2f48e0a80d9c6 100644 ---- a/arch/parisc/include/asm/pdc.h -+++ b/arch/parisc/include/asm/pdc.h -@@ -81,6 +81,7 @@ int pdc_do_firm_test_reset(unsigned long ftc_bitmap); - int pdc_do_reset(void); - int pdc_soft_power_info(unsigned long *power_reg); - int pdc_soft_power_button(int sw_control); -+int pdc_soft_power_button_panic(int sw_control); - void pdc_io_reset(void); - void pdc_io_reset_devices(void); - int pdc_iodc_getc(void); -diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h -index 7badd872f05ac..3e7cf882639fb 100644 ---- a/arch/parisc/include/asm/pgtable.h -+++ b/arch/parisc/include/asm/pgtable.h -@@ -76,6 +76,8 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) - purge_tlb_end(flags); - } - -+extern void __update_cache(pte_t pte); -+ - /* Certain architectures need to do special things when PTEs - * within a page table are directly modified. Thus, the following - * hook is made available. -@@ -83,11 +85,14 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) - #define set_pte(pteptr, pteval) \ - do { \ - *(pteptr) = (pteval); \ -- barrier(); \ -+ mb(); \ - } while(0) - - #define set_pte_at(mm, addr, pteptr, pteval) \ - do { \ -+ if (pte_present(pteval) && \ -+ pte_user(pteval)) \ -+ __update_cache(pteval); \ - *(pteptr) = (pteval); \ - purge_tlb_entries(mm, addr); \ - } while (0) -@@ -303,6 +308,7 @@ extern unsigned long *empty_zero_page; - - #define pte_none(x) (pte_val(x) == 0) - #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -+#define pte_user(x) (pte_val(x) & _PAGE_USER) - #define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0)) - - #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) -@@ -410,7 +416,7 @@ extern void paging_init (void); - - #define PG_dcache_dirty PG_arch_1 - --extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); -+#define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep) - - /* Encode and de-code a swap entry */ - -diff --git a/arch/parisc/include/asm/rt_sigframe.h b/arch/parisc/include/asm/rt_sigframe.h -index 4b9e3d707571b..2b3010ade00e7 100644 ---- a/arch/parisc/include/asm/rt_sigframe.h -+++ b/arch/parisc/include/asm/rt_sigframe.h -@@ -2,7 +2,7 @@ - #ifndef _ASM_PARISC_RT_SIGFRAME_H - #define _ASM_PARISC_RT_SIGFRAME_H - --#define SIGRETURN_TRAMP 3 -+#define SIGRETURN_TRAMP 4 - #define SIGRESTARTBLOCK_TRAMP 5 - #define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP) - -diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h -index a303ae9a77f41..16ee41e77174f 100644 ---- a/arch/parisc/include/asm/special_insns.h -+++ b/arch/parisc/include/asm/special_insns.h -@@ -2,28 +2,32 @@ - #ifndef __PARISC_SPECIAL_INSNS_H - #define __PARISC_SPECIAL_INSNS_H - --#define lpa(va) ({ \ -- unsigned long pa; \ -- __asm__ __volatile__( \ -- "copy %%r0,%0\n\t" \ -- "lpa %%r0(%1),%0" \ -- : "=r" (pa) \ -- : "r" (va) \ -- : "memory" \ -- ); \ -- pa; \ -+#define lpa(va) ({ \ -+ unsigned long pa; \ -+ __asm__ __volatile__( \ -+ "copy %%r0,%0\n" \ -+ "8:\tlpa %%r0(%1),%0\n" \ -+ "9:\n" \ -+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \ -+ : "=&r" (pa) \ -+ : "r" (va) \ -+ : "memory" \ -+ ); \ -+ pa; \ - }) - --#define lpa_user(va) ({ \ -- unsigned long pa; \ -- __asm__ __volatile__( \ -- "copy %%r0,%0\n\t" \ -- "lpa %%r0(%%sr3,%1),%0" \ -- : "=r" (pa) \ -- : "r" (va) \ -- : "memory" \ -- ); \ -- pa; \ -+#define lpa_user(va) ({ \ -+ unsigned long pa; \ -+ __asm__ __volatile__( \ -+ "copy %%r0,%0\n" \ -+ "8:\tlpa %%r0(%%sr3,%1),%0\n" \ -+ "9:\n" \ -+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \ -+ : "=&r" (pa) \ -+ : "r" (va) \ -+ : "memory" \ -+ ); \ -+ pa; \ - }) - - #define mfctl(reg) ({ \ -diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h -index 06b510f8172e3..b4622cb06a75e 100644 ---- a/arch/parisc/include/asm/timex.h -+++ b/arch/parisc/include/asm/timex.h -@@ -13,9 +13,10 @@ - - typedef unsigned long cycles_t; - --static inline cycles_t get_cycles (void) -+static inline cycles_t get_cycles(void) - { - return mfctl(16); - } -+#define get_cycles get_cycles - - #endif -diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h -index 8ecc1f0c0483d..d0e090a2c000d 100644 ---- a/arch/parisc/include/asm/traps.h -+++ b/arch/parisc/include/asm/traps.h -@@ -17,6 +17,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err); - const char *trap_name(unsigned long code); - void do_page_fault(struct pt_regs *regs, unsigned long code, - unsigned long address); -+int handle_nadtlb_fault(struct pt_regs *regs); - #endif - - #endif -diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h -index 9e3c010c0f61e..5f1f3eea5aa5f 100644 ---- a/arch/parisc/include/uapi/asm/mman.h -+++ b/arch/parisc/include/uapi/asm/mman.h -@@ -49,31 +49,30 @@ - #define MADV_DONTFORK 10 /* don't inherit across fork */ - #define MADV_DOFORK 11 /* do inherit across fork */ - --#define MADV_COLD 20 /* deactivate these pages */ --#define MADV_PAGEOUT 21 /* reclaim these pages */ -+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ -+#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ - --#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ --#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ -+#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ -+#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ - --#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ --#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ -+#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, -+ overrides the coredump filter bits */ -+#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ - --#define MADV_HUGEPAGE 67 /* Worth backing with hugepages */ --#define MADV_NOHUGEPAGE 68 /* Not worth backing with hugepages */ -+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ -+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ - --#define MADV_DONTDUMP 69 /* Explicity exclude from the core dump, -- overrides the coredump filter bits */ --#define MADV_DODUMP 70 /* Clear the MADV_NODUMP flag */ -+#define MADV_COLD 20 /* deactivate these pages */ -+#define MADV_PAGEOUT 21 /* reclaim these pages */ - --#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */ --#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */ -+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ -+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ - - #define MADV_HWPOISON 100 /* poison a page for testing */ - #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */ - - /* compatibility flags */ - #define MAP_FILE 0 --#define MAP_VARIABLE 0 - - #define PKEY_DISABLE_ACCESS 0x1 - #define PKEY_DISABLE_WRITE 0x2 -diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh -index 056d588befdd6..70d3cffb02515 100644 ---- a/arch/parisc/install.sh -+++ b/arch/parisc/install.sh -@@ -39,6 +39,7 @@ verify "$3" - if [ -n "${INSTALLKERNEL}" ]; then - if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi - if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi -+ if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi - fi - - # Default install -diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c -index 39e02227e2310..c473c2f395a0a 100644 ---- a/arch/parisc/kernel/cache.c -+++ b/arch/parisc/kernel/cache.c -@@ -46,9 +46,6 @@ void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); - */ - DEFINE_SPINLOCK(pa_tlb_flush_lock); - --/* Swapper page setup lock. */ --DEFINE_SPINLOCK(pa_swapper_pg_lock); -- - #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) - int pa_serialize_tlb_flushes __ro_after_init; - #endif -@@ -83,9 +80,9 @@ EXPORT_SYMBOL(flush_cache_all_local); - #define pfn_va(pfn) __va(PFN_PHYS(pfn)) - - void --update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) -+__update_cache(pte_t pte) - { -- unsigned long pfn = pte_pfn(*ptep); -+ unsigned long pfn = pte_pfn(pte); - struct page *page; - - /* We don't have pte special. As a result, we can be called with -@@ -327,6 +324,7 @@ void flush_dcache_page(struct page *page) - struct vm_area_struct *mpnt; - unsigned long offset; - unsigned long addr, old_addr = 0; -+ unsigned long flags; - pgoff_t pgoff; - - if (mapping && !mapping_mapped(mapping)) { -@@ -346,7 +344,7 @@ void flush_dcache_page(struct page *page) - * declared as MAP_PRIVATE or MAP_SHARED), so we only need - * to flush one address here for them all to become coherent */ - -- flush_dcache_mmap_lock(mapping); -+ flush_dcache_mmap_lock_irqsave(mapping, flags); - vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { - offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; - addr = mpnt->vm_start + offset; -@@ -369,7 +367,7 @@ void flush_dcache_page(struct page *page) - old_addr = addr; - } - } -- flush_dcache_mmap_unlock(mapping); -+ flush_dcache_mmap_unlock_irqrestore(mapping, flags); - } - EXPORT_SYMBOL(flush_dcache_page); - -diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c -index 776d624a7207b..e7ee0c0c91d35 100644 ---- a/arch/parisc/kernel/drivers.c -+++ b/arch/parisc/kernel/drivers.c -@@ -520,7 +520,6 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) - dev->id.hversion_rev = iodc_data[1] & 0x0f; - dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) | - (iodc_data[5] << 8) | iodc_data[6]; -- dev->hpa.name = parisc_pathname(dev); - dev->hpa.start = hpa; - /* This is awkward. The STI spec says that gfx devices may occupy - * 32MB or 64MB. Unfortunately, we don't know how to tell whether -@@ -534,10 +533,10 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) - dev->hpa.end = hpa + 0xfff; - } - dev->hpa.flags = IORESOURCE_MEM; -- name = parisc_hardware_description(&dev->id); -- if (name) { -- strlcpy(dev->name, name, sizeof(dev->name)); -- } -+ dev->hpa.name = dev->name; -+ name = parisc_hardware_description(&dev->id) ? : "unknown"; -+ snprintf(dev->name, sizeof(dev->name), "%s [%s]", -+ name, parisc_pathname(dev)); - - /* Silently fail things like mouse ports which are subsumed within - * the keyboard controller -@@ -883,15 +882,13 @@ void __init walk_central_bus(void) - &root); - } - --static void print_parisc_device(struct parisc_device *dev) -+static __init void print_parisc_device(struct parisc_device *dev) - { -- char hw_path[64]; -- static int count; -+ static int count __initdata; - -- print_pa_hwpath(dev, hw_path); -- pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", -- ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type, -- dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); -+ pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }", -+ ++count, dev->name, &(dev->hpa.start), dev->id.hw_type, -+ dev->id.hversion, dev->id.sversion, dev->id.hversion_rev); - - if (dev->num_addrs) { - int k; -@@ -1080,7 +1077,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) - - - --static int print_one_device(struct device * dev, void * data) -+static __init int print_one_device(struct device * dev, void * data) - { - struct parisc_device * pdev = to_parisc_device(dev); - -diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S -index 9f939afe6b88c..437c8d31f3907 100644 ---- a/arch/parisc/kernel/entry.S -+++ b/arch/parisc/kernel/entry.S -@@ -1834,8 +1834,8 @@ syscall_restore: - LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 - - /* Are we being ptraced? */ -- ldw TASK_FLAGS(%r1),%r19 -- ldi _TIF_SYSCALL_TRACE_MASK,%r2 -+ LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 -+ ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2 - and,COND(=) %r19,%r2,%r0 - b,n syscall_restore_rfi - -diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c -index 7034227dbdf32..5385e0fe98426 100644 ---- a/arch/parisc/kernel/firmware.c -+++ b/arch/parisc/kernel/firmware.c -@@ -1158,15 +1158,18 @@ int __init pdc_soft_power_info(unsigned long *power_reg) - } - - /* -- * pdc_soft_power_button - Control the soft power button behaviour -- * @sw_control: 0 for hardware control, 1 for software control -+ * pdc_soft_power_button{_panic} - Control the soft power button behaviour -+ * @sw_control: 0 for hardware control, 1 for software control - * - * - * This PDC function places the soft power button under software or - * hardware control. -- * Under software control the OS may control to when to allow to shut -- * down the system. Under hardware control pressing the power button -+ * Under software control the OS may control to when to allow to shut -+ * down the system. Under hardware control pressing the power button - * powers off the system immediately. -+ * -+ * The _panic version relies on spin_trylock to prevent deadlock -+ * on panic path. - */ - int pdc_soft_power_button(int sw_control) - { -@@ -1180,6 +1183,22 @@ int pdc_soft_power_button(int sw_control) - return retval; - } - -+int pdc_soft_power_button_panic(int sw_control) -+{ -+ int retval; -+ unsigned long flags; -+ -+ if (!spin_trylock_irqsave(&pdc_lock, flags)) { -+ pr_emerg("Couldn't enable soft power button\n"); -+ return -EBUSY; /* ignored by the panic notifier */ -+ } -+ -+ retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control); -+ spin_unlock_irqrestore(&pdc_lock, flags); -+ -+ return retval; -+} -+ - /* - * pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices. - * Primarily a problem on T600 (which parisc-linux doesn't support) but -@@ -1230,7 +1249,7 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096]; - */ - int pdc_iodc_print(const unsigned char *str, unsigned count) - { -- unsigned int i; -+ unsigned int i, found = 0; - unsigned long flags; - - for (i = 0; i < count;) { -@@ -1239,6 +1258,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) - iodc_dbuf[i+0] = '\r'; - iodc_dbuf[i+1] = '\n'; - i += 2; -+ found = 1; - goto print; - default: - iodc_dbuf[i] = str[i]; -@@ -1255,7 +1275,7 @@ print: - __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0); - spin_unlock_irqrestore(&pdc_lock, flags); - -- return i; -+ return i - found; - } - - #if !defined(BOOTLOADER) -diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S -index aa93d775c34db..598d0938449da 100644 ---- a/arch/parisc/kernel/head.S -+++ b/arch/parisc/kernel/head.S -@@ -22,7 +22,7 @@ - #include - #include - -- .level PA_ASM_LEVEL -+ .level 1.1 - - __INITDATA - ENTRY(boot_args) -@@ -69,6 +69,47 @@ $bss_loop: - stw,ma %arg2,4(%r1) - stw,ma %arg3,4(%r1) - -+#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20) -+ /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU -+ * and halt kernel if we detect a PA1.x CPU. */ -+ ldi 32,%r10 -+ mtctl %r10,%cr11 -+ .level 2.0 -+ mfctl,w %cr11,%r10 -+ .level 1.1 -+ comib,<>,n 0,%r10,$cpu_ok -+ -+ load32 PA(msg1),%arg0 -+ ldi msg1_end-msg1,%arg1 -+$iodc_panic: -+ copy %arg0, %r10 -+ copy %arg1, %r11 -+ load32 PA(init_stack),%sp -+#define MEM_CONS 0x3A0 -+ ldw MEM_CONS+32(%r0),%arg0 // HPA -+ ldi ENTRY_IO_COUT,%arg1 -+ ldw MEM_CONS+36(%r0),%arg2 // SPA -+ ldw MEM_CONS+8(%r0),%arg3 // layers -+ load32 PA(__bss_start),%r1 -+ stw %r1,-52(%sp) // arg4 -+ stw %r0,-56(%sp) // arg5 -+ stw %r10,-60(%sp) // arg6 = ptr to text -+ stw %r11,-64(%sp) // arg7 = len -+ stw %r0,-68(%sp) // arg8 -+ load32 PA(.iodc_panic_ret), %rp -+ ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC -+ bv,n (%r1) -+.iodc_panic_ret: -+ b . /* wait endless with ... */ -+ or %r10,%r10,%r10 /* qemu idle sleep */ -+msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n" -+msg1_end: -+ -+$cpu_ok: -+#endif -+ -+ .level PA_ASM_LEVEL -+ - /* Initialize startup VM. Just map first 16/32 MB of memory */ - load32 PA(swapper_pg_dir),%r4 - mtctl %r4,%cr24 /* Initialize kernel root pointer */ -diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c -index 0d46b19dc4d3d..e6cc38ef69458 100644 ---- a/arch/parisc/kernel/irq.c -+++ b/arch/parisc/kernel/irq.c -@@ -333,7 +333,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu) - { - #ifdef CONFIG_SMP - struct irq_data *d = irq_get_irq_data(irq); -- cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu)); -+ irq_data_update_affinity(d, cpumask_of(cpu)); - #endif - - return per_cpu(cpu_data, cpu).txn_addr; -diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c -index 80a0ab372802d..e59574f65e641 100644 ---- a/arch/parisc/kernel/patch.c -+++ b/arch/parisc/kernel/patch.c -@@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags, - - *need_unmap = 1; - set_fixmap(fixmap, page_to_phys(page)); -- if (flags) -- raw_spin_lock_irqsave(&patch_lock, *flags); -- else -- __acquire(&patch_lock); -+ raw_spin_lock_irqsave(&patch_lock, *flags); - - return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); - } -@@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags) - { - clear_fixmap(fixmap); - -- if (flags) -- raw_spin_unlock_irqrestore(&patch_lock, *flags); -- else -- __release(&patch_lock); -+ raw_spin_unlock_irqrestore(&patch_lock, *flags); - } - - void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) -@@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) - int mapped; - - /* Make sure we don't have any aliases in cache */ -- flush_kernel_vmap_range(addr, len); -- flush_icache_range(start, end); -+ flush_kernel_dcache_range_asm(start, end); -+ flush_kernel_icache_range_asm(start, end); -+ flush_tlb_kernel_range(start, end); - - p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped); - -@@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) - * We're crossing a page boundary, so - * need to remap - */ -- flush_kernel_vmap_range((void *)fixmap, -- (p-fixmap) * sizeof(*p)); -+ flush_kernel_dcache_range_asm((unsigned long)fixmap, -+ (unsigned long)p); -+ flush_tlb_kernel_range((unsigned long)fixmap, -+ (unsigned long)p); - if (mapped) - patch_unmap(FIX_TEXT_POKE0, &flags); - p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, -@@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) - } - } - -- flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p)); -+ flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p); -+ flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p); - if (mapped) - patch_unmap(FIX_TEXT_POKE0, &flags); -- flush_icache_range(start, end); - } - - void __kprobes __patch_text(void *addr, u32 insn) -diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c -index 36a57aa38e87e..3b0227b17c070 100644 ---- a/arch/parisc/kernel/pci-dma.c -+++ b/arch/parisc/kernel/pci-dma.c -@@ -446,11 +446,27 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, - void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) - { -+ /* -+ * fdc: The data cache line is written back to memory, if and only if -+ * it is dirty, and then invalidated from the data cache. -+ */ - flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); - } - - void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) - { -- flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); -+ unsigned long addr = (unsigned long) phys_to_virt(paddr); -+ -+ switch (dir) { -+ case DMA_TO_DEVICE: -+ case DMA_BIDIRECTIONAL: -+ flush_kernel_dcache_range(addr, size); -+ return; -+ case DMA_FROM_DEVICE: -+ purge_kernel_dcache_range_asm(addr, addr + size); -+ return; -+ default: -+ BUG(); -+ } - } -diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c -index 38ec4ae812396..4f36c16aec860 100644 ---- a/arch/parisc/kernel/process.c -+++ b/arch/parisc/kernel/process.c -@@ -120,13 +120,18 @@ void machine_power_off(void) - /* It seems we have no way to power the system off via - * software. The user has to press the button himself. */ - -- printk(KERN_EMERG "System shut down completed.\n" -- "Please power this system off now."); -+ printk("Power off or press RETURN to reboot.\n"); - - /* prevent soft lockup/stalled CPU messages for endless loop. */ - rcu_sysrq_start(); - lockup_detector_soft_poweroff(); -- for (;;); -+ while (1) { -+ /* reboot if user presses RETURN key */ -+ if (pdc_iodc_getc() == 13) { -+ printk("Rebooting...\n"); -+ machine_restart(NULL); -+ } -+ } - } - - void (*pm_power_off)(void); -diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c -index 1b6129e7d776b..ccdbcfdfe4e21 100644 ---- a/arch/parisc/kernel/processor.c -+++ b/arch/parisc/kernel/processor.c -@@ -372,10 +372,18 @@ int - show_cpuinfo (struct seq_file *m, void *v) - { - unsigned long cpu; -+ char cpu_name[60], *p; -+ -+ /* strip PA path from CPU name to not confuse lscpu */ -+ strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name)); -+ p = strrchr(cpu_name, '['); -+ if (p) -+ *(--p) = 0; - - for_each_online_cpu(cpu) { -- const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); - #ifdef CONFIG_SMP -+ const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); -+ - if (0 == cpuinfo->hpa) - continue; - #endif -@@ -418,11 +426,9 @@ show_cpuinfo (struct seq_file *m, void *v) - } - seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities); - -- seq_printf(m, "model\t\t: %s\n" -- "model name\t: %s\n", -+ seq_printf(m, "model\t\t: %s - %s\n", - boot_cpu_data.pdc.sys_model_name, -- cpuinfo->dev ? -- cpuinfo->dev->name : "Unknown"); -+ cpu_name); - - seq_printf(m, "hversion\t: 0x%08x\n" - "sversion\t: 0x%08x\n", -diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c -index 65de6c4c9354d..b9398e805978d 100644 ---- a/arch/parisc/kernel/ptrace.c -+++ b/arch/parisc/kernel/ptrace.c -@@ -127,6 +127,12 @@ long arch_ptrace(struct task_struct *child, long request, - unsigned long tmp; - long ret = -EIO; - -+ unsigned long user_regs_struct_size = sizeof(struct user_regs_struct); -+#ifdef CONFIG_64BIT -+ if (is_compat_task()) -+ user_regs_struct_size /= 2; -+#endif -+ - switch (request) { - - /* Read the word at location addr in the USER area. For ptraced -@@ -182,14 +188,14 @@ long arch_ptrace(struct task_struct *child, long request, - return copy_regset_to_user(child, - task_user_regset_view(current), - REGSET_GENERAL, -- 0, sizeof(struct user_regs_struct), -+ 0, user_regs_struct_size, - datap); - - case PTRACE_SETREGS: /* Set all gp regs in the child. */ - return copy_regset_from_user(child, - task_user_regset_view(current), - REGSET_GENERAL, -- 0, sizeof(struct user_regs_struct), -+ 0, user_regs_struct_size, - datap); - - case PTRACE_GETFPREGS: /* Get the child FPU state. */ -@@ -303,6 +309,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, - } - } - break; -+ case PTRACE_GETREGS: -+ case PTRACE_SETREGS: -+ case PTRACE_GETFPREGS: -+ case PTRACE_SETFPREGS: -+ return arch_ptrace(child, request, addr, data); - - default: - ret = compat_ptrace_request(child, request, addr, data); -diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S -index 2b16d8d6598f1..c37010a135865 100644 ---- a/arch/parisc/kernel/real2.S -+++ b/arch/parisc/kernel/real2.S -@@ -248,9 +248,6 @@ ENTRY_CFI(real64_call_asm) - /* save fn */ - copy %arg2, %r31 - -- /* set up the new ap */ -- ldo 64(%arg1), %r29 -- - /* load up the arg registers from the saved arg area */ - /* 32-bit calling convention passes first 4 args in registers */ - ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */ -@@ -262,7 +259,9 @@ ENTRY_CFI(real64_call_asm) - ldd 7*REG_SZ(%arg1), %r19 - ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */ - -+ /* set up real-mode stack and real-mode ap */ - tophys_r1 %sp -+ ldo -16(%sp), %r29 /* Reference param save area */ - - b,l rfi_virt2real,%r2 - nop -diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c -index cceb09855e03f..3fb86ee507dd5 100644 ---- a/arch/parisc/kernel/setup.c -+++ b/arch/parisc/kernel/setup.c -@@ -150,6 +150,8 @@ void __init setup_arch(char **cmdline_p) - #ifdef CONFIG_PA11 - dma_ops_init(); - #endif -+ -+ clear_sched_clock_stable(); - } - - /* -diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c -index bbfe23c40c016..0fb06d87b3a5c 100644 ---- a/arch/parisc/kernel/signal.c -+++ b/arch/parisc/kernel/signal.c -@@ -288,21 +288,22 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, - already in userspace. The first words of tramp are used to - save the previous sigrestartblock trampoline that might be - on the stack. We start the sigreturn trampoline at -- SIGRESTARTBLOCK_TRAMP. */ -+ SIGRESTARTBLOCK_TRAMP+X. */ - err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0, - &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]); -- err |= __put_user(INSN_BLE_SR2_R0, -- &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]); - err |= __put_user(INSN_LDI_R20, -+ &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]); -+ err |= __put_user(INSN_BLE_SR2_R0, - &frame->tramp[SIGRESTARTBLOCK_TRAMP+2]); -+ err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]); - -- start = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]; -- end = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]; -+ start = (unsigned long) &frame->tramp[0]; -+ end = (unsigned long) &frame->tramp[TRAMP_SIZE]; - flush_user_dcache_range_asm(start, end); - flush_user_icache_range_asm(start, end); - - /* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP -- * TRAMP Words 5-7, Length 3 = SIGRETURN_TRAMP -+ * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP - * So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP - */ - rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP]; -diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h -index a5bdbb5678b72..f166250f2d064 100644 ---- a/arch/parisc/kernel/signal32.h -+++ b/arch/parisc/kernel/signal32.h -@@ -36,7 +36,7 @@ struct compat_regfile { - compat_int_t rf_sar; - }; - --#define COMPAT_SIGRETURN_TRAMP 3 -+#define COMPAT_SIGRETURN_TRAMP 4 - #define COMPAT_SIGRESTARTBLOCK_TRAMP 5 - #define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \ - COMPAT_SIGRESTARTBLOCK_TRAMP) -diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c -index 1405b603b91b6..cf92ece20b757 100644 ---- a/arch/parisc/kernel/smp.c -+++ b/arch/parisc/kernel/smp.c -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -69,7 +70,10 @@ enum ipi_message_type { - IPI_CALL_FUNC, - IPI_CPU_START, - IPI_CPU_STOP, -- IPI_CPU_TEST -+ IPI_CPU_TEST, -+#ifdef CONFIG_KGDB -+ IPI_ENTER_KGDB, -+#endif - }; - - -@@ -167,7 +171,12 @@ ipi_interrupt(int irq, void *dev_id) - case IPI_CPU_TEST: - smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); - break; -- -+#ifdef CONFIG_KGDB -+ case IPI_ENTER_KGDB: -+ smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu); -+ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); -+ break; -+#endif - default: - printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", - this_cpu, which); -@@ -226,6 +235,12 @@ send_IPI_allbutself(enum ipi_message_type op) - } - } - -+#ifdef CONFIG_KGDB -+void kgdb_roundup_cpus(void) -+{ -+ send_IPI_allbutself(IPI_ENTER_KGDB); -+} -+#endif - - inline void - smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); } -diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c -index 5f12537318ab2..31950882e272f 100644 ---- a/arch/parisc/kernel/sys_parisc.c -+++ b/arch/parisc/kernel/sys_parisc.c -@@ -463,3 +463,30 @@ asmlinkage long parisc_inotify_init1(int flags) - flags = FIX_O_NONBLOCK(flags); - return sys_inotify_init1(flags); - } -+ -+/* -+ * madvise() wrapper -+ * -+ * Up to kernel v6.1 parisc has different values than all other -+ * platforms for the MADV_xxx flags listed below. -+ * To keep binary compatibility with existing userspace programs -+ * translate the former values to the new values. -+ * -+ * XXX: Remove this wrapper in year 2025 (or later) -+ */ -+ -+asmlinkage notrace long parisc_madvise(unsigned long start, size_t len_in, int behavior) -+{ -+ switch (behavior) { -+ case 65: behavior = MADV_MERGEABLE; break; -+ case 66: behavior = MADV_UNMERGEABLE; break; -+ case 67: behavior = MADV_HUGEPAGE; break; -+ case 68: behavior = MADV_NOHUGEPAGE; break; -+ case 69: behavior = MADV_DONTDUMP; break; -+ case 70: behavior = MADV_DODUMP; break; -+ case 71: behavior = MADV_WIPEONFORK; break; -+ case 72: behavior = MADV_KEEPONFORK; break; -+ } -+ -+ return sys_madvise(start, len_in, behavior); -+} -diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S -index 3f24a0af1e047..9842dcb2041e5 100644 ---- a/arch/parisc/kernel/syscall.S -+++ b/arch/parisc/kernel/syscall.S -@@ -478,7 +478,7 @@ lws_start: - extrd,u %r1,PSW_W_BIT,1,%r1 - /* sp must be aligned on 4, so deposit the W bit setting into - * the bottom of sp temporarily */ -- or,ev %r1,%r30,%r30 -+ or,od %r1,%r30,%r30 - - /* Clip LWS number to a 32-bit value for 32-bit processes */ - depdi 0, 31, 32, %r20 -diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl -index bf751e0732b70..50c759f11c25d 100644 ---- a/arch/parisc/kernel/syscalls/syscall.tbl -+++ b/arch/parisc/kernel/syscalls/syscall.tbl -@@ -131,7 +131,7 @@ - 116 common sysinfo sys_sysinfo compat_sys_sysinfo - 117 common shutdown sys_shutdown - 118 common fsync sys_fsync --119 common madvise sys_madvise -+119 common madvise parisc_madvise - 120 common clone sys_clone_wrapper - 121 common setdomainname sys_setdomainname - 122 common sendfile sys_sendfile compat_sys_sendfile -@@ -413,7 +413,7 @@ - 412 32 utimensat_time64 sys_utimensat sys_utimensat - 413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 - 414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 --416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents -+416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 - 417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 - 418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend - 419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive -diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c -index 9fb1e794831b0..d8e59a1000ab7 100644 ---- a/arch/parisc/kernel/time.c -+++ b/arch/parisc/kernel/time.c -@@ -249,30 +249,12 @@ void __init time_init(void) - static int __init init_cr16_clocksource(void) - { - /* -- * The cr16 interval timers are not syncronized across CPUs on -- * different sockets, so mark them unstable and lower rating on -- * multi-socket SMP systems. -+ * The cr16 interval timers are not synchronized across CPUs. - */ - if (num_online_cpus() > 1 && !running_on_qemu) { -- int cpu; -- unsigned long cpu0_loc; -- cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; -- -- for_each_online_cpu(cpu) { -- if (cpu == 0) -- continue; -- if ((cpu0_loc != 0) && -- (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)) -- continue; -- -- /* mark sched_clock unstable */ -- clear_sched_clock_stable(); -- -- clocksource_cr16.name = "cr16_unstable"; -- clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; -- clocksource_cr16.rating = 0; -- break; -- } -+ clocksource_cr16.name = "cr16_unstable"; -+ clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; -+ clocksource_cr16.rating = 0; - } - - /* register at clocksource framework */ -diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c -index 747c328fb8862..dc1bc77b9fa69 100644 ---- a/arch/parisc/kernel/traps.c -+++ b/arch/parisc/kernel/traps.c -@@ -268,7 +268,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) - panic("Fatal exception"); - - oops_exit(); -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - /* gdb uses break 4,8 */ -@@ -305,8 +305,8 @@ static void handle_break(struct pt_regs *regs) - #endif - - #ifdef CONFIG_KGDB -- if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN || -- iir == PARISC_KGDB_BREAK_INSN)) { -+ if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN || -+ iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) { - kgdb_handle_exception(9, SIGTRAP, 0, regs); - return; - } -@@ -661,6 +661,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) - by hand. Technically we need to emulate: - fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw - */ -+ if (code == 17 && handle_nadtlb_fault(regs)) -+ return; - fault_address = regs->ior; - fault_space = regs->isr; - break; -@@ -729,6 +731,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) - } - mmap_read_unlock(current->mm); - } -+ /* CPU could not fetch instruction, so clear stale IIR value. */ -+ regs->iir = 0xbaadf00d; - fallthrough; - case 27: - /* Data memory protection ID trap */ -@@ -782,7 +786,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) - * unless pagefault_disable() was called before. - */ - -- if (fault_space == 0 && !faulthandler_disabled()) -+ if (faulthandler_disabled() || fault_space == 0) - { - /* Clean up and return if in exception table. */ - if (fixup_exception(regs)) -diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c -index 237d20dd5622d..cc6ed74960501 100644 ---- a/arch/parisc/kernel/unaligned.c -+++ b/arch/parisc/kernel/unaligned.c -@@ -107,7 +107,7 @@ - #define R1(i) (((i)>>21)&0x1f) - #define R2(i) (((i)>>16)&0x1f) - #define R3(i) ((i)&0x1f) --#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1)) -+#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1)) - #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0)) - #define IM5_2(i) IM((i)>>16,5) - #define IM5_3(i) IM((i),5) -@@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) - : "r" (val), "r" (regs->ior), "r" (regs->isr) - : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); - -- return 0; -+ return ret; - } - static int emulate_std(struct pt_regs *regs, int frreg, int flop) - { -@@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) - __asm__ __volatile__ ( - " mtsp %4, %%sr1\n" - " zdep %2, 29, 2, %%r19\n" --" dep %%r0, 31, 2, %2\n" -+" dep %%r0, 31, 2, %3\n" - " mtsar %%r19\n" - " zvdepi -2, 32, %%r19\n" - "1: ldw 0(%%sr1,%3),%%r20\n" -@@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) - " andcm %%r21, %%r19, %%r21\n" - " or %1, %%r20, %1\n" - " or %2, %%r21, %2\n" --"3: stw %1,0(%%sr1,%1)\n" -+"3: stw %1,0(%%sr1,%3)\n" - "4: stw %%r1,4(%%sr1,%3)\n" - "5: stw %2,8(%%sr1,%3)\n" - " copy %%r0, %0\n" -@@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs) - ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ - break; - } --#ifdef CONFIG_PA20 - switch (regs->iir & OPCODE2_MASK) - { - case OPCODE_FLDD_L: -@@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs) - flop=1; - ret = emulate_std(regs, R2(regs->iir),1); - break; -+#ifdef CONFIG_PA20 - case OPCODE_LDD_L: - ret = emulate_ldd(regs, R2(regs->iir),0); - break; - case OPCODE_STD_L: - ret = emulate_std(regs, R2(regs->iir),0); - break; -- } - #endif -+ } - switch (regs->iir & OPCODE3_MASK) - { - case OPCODE_FLDW_L: - flop=1; -- ret = emulate_ldw(regs, R2(regs->iir),0); -+ ret = emulate_ldw(regs, R2(regs->iir), 1); - break; - case OPCODE_LDW_M: -- ret = emulate_ldw(regs, R2(regs->iir),1); -+ ret = emulate_ldw(regs, R2(regs->iir), 0); - break; - - case OPCODE_FSTW_L: -diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c -index 87ae476d1c4f5..86a57fb0e6fae 100644 ---- a/arch/parisc/kernel/unwind.c -+++ b/arch/parisc/kernel/unwind.c -@@ -21,6 +21,8 @@ - #include - - #include -+#include -+#include - - /* #define DEBUG 1 */ - #ifdef DEBUG -@@ -203,6 +205,11 @@ int __init unwind_init(void) - return 0; - } - -+static bool pc_is_kernel_fn(unsigned long pc, void *fn) -+{ -+ return (unsigned long)dereference_kernel_function_descriptor(fn) == pc; -+} -+ - static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) - { - /* -@@ -221,7 +228,7 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int - extern void * const _call_on_stack; - #endif /* CONFIG_IRQSTACKS */ - -- if (pc == (unsigned long) &handle_interruption) { -+ if (pc_is_kernel_fn(pc, handle_interruption)) { - struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); - dbg("Unwinding through handle_interruption()\n"); - info->prev_sp = regs->gr[30]; -@@ -229,13 +236,13 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int - return 1; - } - -- if (pc == (unsigned long) &ret_from_kernel_thread || -- pc == (unsigned long) &syscall_exit) { -+ if (pc_is_kernel_fn(pc, ret_from_kernel_thread) || -+ pc_is_kernel_fn(pc, syscall_exit)) { - info->prev_sp = info->prev_ip = 0; - return 1; - } - -- if (pc == (unsigned long) &intr_return) { -+ if (pc_is_kernel_fn(pc, intr_return)) { - struct pt_regs *regs; - - dbg("Found intr_return()\n"); -@@ -246,20 +253,20 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int - return 1; - } - -- if (pc == (unsigned long) &_switch_to_ret) { -+ if (pc_is_kernel_fn(pc, _switch_to) || -+ pc_is_kernel_fn(pc, _switch_to_ret)) { - info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; - info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); - return 1; - } - - #ifdef CONFIG_IRQSTACKS -- if (pc == (unsigned long) &_call_on_stack) { -+ if (pc_is_kernel_fn(pc, _call_on_stack)) { - info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ); - info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET); - return 1; - } - #endif -- - return 0; - } - -diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c -index 367f6397bda7a..8603850580857 100644 ---- a/arch/parisc/lib/iomap.c -+++ b/arch/parisc/lib/iomap.c -@@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr) - return *((u64 *)addr); - } - -+u64 ioread64_lo_hi(const void __iomem *addr) -+{ -+ u32 low, high; -+ -+ low = ioread32(addr); -+ high = ioread32(addr + sizeof(u32)); -+ -+ return low + ((u64)high << 32); -+} -+ - u64 ioread64_hi_lo(const void __iomem *addr) - { - u32 low, high; -@@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr) - } - } - -+void iowrite64_lo_hi(u64 val, void __iomem *addr) -+{ -+ iowrite32(val, addr); -+ iowrite32(val >> 32, addr + sizeof(u32)); -+} -+ - void iowrite64_hi_lo(u64 val, void __iomem *addr) - { - iowrite32(val >> 32, addr + sizeof(u32)); -@@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32); - EXPORT_SYMBOL(ioread32be); - EXPORT_SYMBOL(ioread64); - EXPORT_SYMBOL(ioread64be); -+EXPORT_SYMBOL(ioread64_lo_hi); - EXPORT_SYMBOL(ioread64_hi_lo); - EXPORT_SYMBOL(iowrite8); - EXPORT_SYMBOL(iowrite16); -@@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32); - EXPORT_SYMBOL(iowrite32be); - EXPORT_SYMBOL(iowrite64); - EXPORT_SYMBOL(iowrite64be); -+EXPORT_SYMBOL(iowrite64_lo_hi); - EXPORT_SYMBOL(iowrite64_hi_lo); - EXPORT_SYMBOL(ioread8_rep); - EXPORT_SYMBOL(ioread16_rep); -diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c -index 716960f5d92ea..5faa3cff47387 100644 ---- a/arch/parisc/mm/fault.c -+++ b/arch/parisc/mm/fault.c -@@ -424,3 +424,92 @@ no_context: - goto no_context; - pagefault_out_of_memory(); - } -+ -+/* Handle non-access data TLB miss faults. -+ * -+ * For probe instructions, accesses to userspace are considered allowed -+ * if they lie in a valid VMA and the access type matches. We are not -+ * allowed to handle MM faults here so there may be situations where an -+ * actual access would fail even though a probe was successful. -+ */ -+int -+handle_nadtlb_fault(struct pt_regs *regs) -+{ -+ unsigned long insn = regs->iir; -+ int breg, treg, xreg, val = 0; -+ struct vm_area_struct *vma, *prev_vma; -+ struct task_struct *tsk; -+ struct mm_struct *mm; -+ unsigned long address; -+ unsigned long acc_type; -+ -+ switch (insn & 0x380) { -+ case 0x280: -+ /* FDC instruction */ -+ fallthrough; -+ case 0x380: -+ /* PDC and FIC instructions */ -+ if (printk_ratelimit()) { -+ pr_warn("BUG: nullifying cache flush/purge instruction\n"); -+ show_regs(regs); -+ } -+ if (insn & 0x20) { -+ /* Base modification */ -+ breg = (insn >> 21) & 0x1f; -+ xreg = (insn >> 16) & 0x1f; -+ if (breg && xreg) -+ regs->gr[breg] += regs->gr[xreg]; -+ } -+ regs->gr[0] |= PSW_N; -+ return 1; -+ -+ case 0x180: -+ /* PROBE instruction */ -+ treg = insn & 0x1f; -+ if (regs->isr) { -+ tsk = current; -+ mm = tsk->mm; -+ if (mm) { -+ /* Search for VMA */ -+ address = regs->ior; -+ mmap_read_lock(mm); -+ vma = find_vma_prev(mm, address, &prev_vma); -+ mmap_read_unlock(mm); -+ -+ /* -+ * Check if access to the VMA is okay. -+ * We don't allow for stack expansion. -+ */ -+ acc_type = (insn & 0x40) ? VM_WRITE : VM_READ; -+ if (vma -+ && address >= vma->vm_start -+ && (vma->vm_flags & acc_type) == acc_type) -+ val = 1; -+ } -+ } -+ if (treg) -+ regs->gr[treg] = val; -+ regs->gr[0] |= PSW_N; -+ return 1; -+ -+ case 0x300: -+ /* LPA instruction */ -+ if (insn & 0x20) { -+ /* Base modification */ -+ breg = (insn >> 21) & 0x1f; -+ xreg = (insn >> 16) & 0x1f; -+ if (breg && xreg) -+ regs->gr[breg] += regs->gr[xreg]; -+ } -+ treg = insn & 0x1f; -+ if (treg) -+ regs->gr[treg] = 0; -+ regs->gr[0] |= PSW_N; -+ return 1; -+ -+ default: -+ break; -+ } -+ -+ return 0; -+} -diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c -index 24426a7e1a5e5..cc15d737fda64 100644 ---- a/arch/parisc/mm/fixmap.c -+++ b/arch/parisc/mm/fixmap.c -@@ -20,12 +20,9 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys) - pte_t *pte; - - if (pmd_none(*pmd)) -- pmd = pmd_alloc(NULL, pud, vaddr); -- -- pte = pte_offset_kernel(pmd, vaddr); -- if (pte_none(*pte)) - pte = pte_alloc_kernel(pmd, vaddr); - -+ pte = pte_offset_kernel(pmd, vaddr); - set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); - flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); - } -diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c -index 3f7d6d5b56ac8..e5c18313b5d4f 100644 ---- a/arch/parisc/mm/init.c -+++ b/arch/parisc/mm/init.c -@@ -341,9 +341,9 @@ static void __init setup_bootmem(void) - - static bool kernel_set_to_readonly; - --static void __init map_pages(unsigned long start_vaddr, -- unsigned long start_paddr, unsigned long size, -- pgprot_t pgprot, int force) -+static void __ref map_pages(unsigned long start_vaddr, -+ unsigned long start_paddr, unsigned long size, -+ pgprot_t pgprot, int force) - { - pmd_t *pmd; - pte_t *pg_table; -@@ -453,7 +453,7 @@ void __init set_kernel_text_rw(int enable_read_write) - flush_tlb_all(); - } - --void __ref free_initmem(void) -+void free_initmem(void) - { - unsigned long init_begin = (unsigned long)__init_begin; - unsigned long init_end = (unsigned long)__init_end; -@@ -467,7 +467,6 @@ void __ref free_initmem(void) - /* The init text pages are marked R-X. We have to - * flush the icache and mark them RW- - * -- * This is tricky, because map_pages is in the init section. - * Do a dummy remap of the data section first (the data - * section is already PAGE_KERNEL) to pull in the TLB entries - * for map_kernel */ -@@ -842,9 +841,9 @@ void flush_tlb_all(void) - { - int do_recycle; - -- __inc_irq_stat(irq_tlb_count); - do_recycle = 0; - spin_lock(&sid_lock); -+ __inc_irq_stat(irq_tlb_count); - if (dirty_space_ids > RECYCLE_THRESHOLD) { - BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ - get_dirty_sids(&recycle_ndirty,recycle_dirty_array); -@@ -863,8 +862,8 @@ void flush_tlb_all(void) - #else - void flush_tlb_all(void) - { -- __inc_irq_stat(irq_tlb_count); - spin_lock(&sid_lock); -+ __inc_irq_stat(irq_tlb_count); - flush_tlb_all_local(NULL); - recycle_sids(); - spin_unlock(&sid_lock); -diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index ba5b661893588..27222b75d2a4b 100644 ---- a/arch/powerpc/Kconfig -+++ b/arch/powerpc/Kconfig -@@ -138,7 +138,7 @@ config PPC - select ARCH_HAS_PTE_SPECIAL - select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 - select ARCH_HAS_SET_MEMORY -- select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION) -+ select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION - select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX && !PPC_BOOK3S_32 - select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select ARCH_HAS_UACCESS_FLUSHCACHE -@@ -150,7 +150,7 @@ config PPC - select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX - select ARCH_STACKWALK - select ARCH_SUPPORTS_ATOMIC_RMW -- select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64 -+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x - select ARCH_USE_BUILTIN_BSWAP - select ARCH_USE_CMPXCHG_LOCKREF if PPC64 - select ARCH_USE_MEMTEST -@@ -190,7 +190,7 @@ config PPC - select HAVE_ARCH_JUMP_LABEL_RELATIVE - select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 - select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14 -- select HAVE_ARCH_KFENCE if PPC32 -+ select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x - select HAVE_ARCH_KGDB - select HAVE_ARCH_MMAP_RND_BITS - select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT -@@ -217,7 +217,6 @@ config PPC - select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH - select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) - select HAVE_IOREMAP_PROT -- select HAVE_IRQ_EXIT_ON_IRQ_STACK - select HAVE_IRQ_TIME_ACCOUNTING - select HAVE_KERNEL_GZIP - select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE -@@ -354,6 +353,10 @@ config ARCH_SUSPEND_NONZERO_CPU - def_bool y - depends on PPC_POWERNV || PPC_PSERIES - -+config ARCH_HAS_ADD_PAGES -+ def_bool y -+ depends on ARCH_ENABLE_MEMORY_HOTPLUG -+ - config PPC_DCR_NATIVE - bool - -@@ -768,7 +771,6 @@ config THREAD_SHIFT - range 13 15 - default "15" if PPC_256K_PAGES - default "14" if PPC64 -- default "14" if KASAN - default "13" - help - Used to define the stack size. The default is almost always what you -diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug -index 192f0ed0097ff..80ce54f59fae8 100644 ---- a/arch/powerpc/Kconfig.debug -+++ b/arch/powerpc/Kconfig.debug -@@ -240,7 +240,7 @@ config PPC_EARLY_DEBUG_40x - - config PPC_EARLY_DEBUG_CPM - bool "Early serial debugging for Freescale CPM-based serial ports" -- depends on SERIAL_CPM -+ depends on SERIAL_CPM=y - help - Select this to enable early debugging for Freescale chips - using a CPM-based serial port. This assumes that the bootwrapper -diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile -index aa6808e706470..3dc75040a7563 100644 ---- a/arch/powerpc/Makefile -+++ b/arch/powerpc/Makefile -@@ -17,23 +17,6 @@ HAS_BIARCH := $(call cc-option-yn, -m32) - # Set default 32 bits cross compilers for vdso and boot wrapper - CROSS32_COMPILE ?= - --ifeq ($(HAS_BIARCH),y) --ifeq ($(CROSS32_COMPILE),) --ifdef CONFIG_PPC32 --# These options will be overridden by any -mcpu option that the CPU --# or platform code sets later on the command line, but they are needed --# to set a sane 32-bit cpu target for the 64-bit cross compiler which --# may default to the wrong ISA. --KBUILD_CFLAGS += -mcpu=powerpc --KBUILD_AFLAGS += -mcpu=powerpc --endif --endif --endif -- --ifdef CONFIG_PPC_BOOK3S_32 --KBUILD_CFLAGS += -mcpu=powerpc --endif -- - # If we're on a ppc/ppc64/ppc64le machine use that defconfig, otherwise just use - # ppc64_defconfig because we have nothing better to go on. - uname := $(shell uname -m) -@@ -109,7 +92,7 @@ aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian - - ifeq ($(HAS_BIARCH),y) - KBUILD_CFLAGS += -m$(BITS) --KBUILD_AFLAGS += -m$(BITS) -Wl,-a$(BITS) -+KBUILD_AFLAGS += -m$(BITS) - KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION) - endif - -@@ -171,9 +154,9 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8 - CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8) - else - CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) --CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4) -+CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4 - endif --else -+else ifdef CONFIG_PPC_BOOK3E_64 - CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 - endif - -@@ -185,6 +168,7 @@ endif - endif - - CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU)) -+AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU)) - - # Altivec option not allowed with e500mc64 in GCC. - ifdef CONFIG_ALTIVEC -@@ -195,14 +179,6 @@ endif - CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU) - CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU)) - --ifdef CONFIG_PPC32 --ifdef CONFIG_PPC_E500MC --CFLAGS-y += $(call cc-option,-mcpu=e500mc,-mcpu=powerpc) --else --CFLAGS-$(CONFIG_E500) += $(call cc-option,-mcpu=8540 -msoft-float,-mcpu=powerpc) --endif --endif -- - asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1) - - KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr) -@@ -468,3 +444,11 @@ checkbin: - echo -n '*** Please use a different binutils version.' ; \ - false ; \ - fi -+ @if test "x${CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT}" = "xy" -a \ -+ "x${CONFIG_LD_IS_BFD}" = "xy" -a \ -+ "${CONFIG_LD_VERSION}" = "23700" ; then \ -+ echo -n '*** binutils 2.37 drops unused section symbols, which recordmcount ' ; \ -+ echo 'is unable to handle.' ; \ -+ echo '*** Please use a different binutils version.' ; \ -+ false ; \ -+ fi -diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile -index 089ee3ea55c8a..1d51b9e21172c 100644 ---- a/arch/powerpc/boot/Makefile -+++ b/arch/powerpc/boot/Makefile -@@ -34,6 +34,7 @@ endif - - BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ - -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ -+ $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \ - -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ - $(LINUXINCLUDE) - -diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S -index 1d83966f5ef64..e8f10a5996593 100644 ---- a/arch/powerpc/boot/crt0.S -+++ b/arch/powerpc/boot/crt0.S -@@ -226,16 +226,19 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */ - #ifdef __powerpc64__ - - #define PROM_FRAME_SIZE 512 --#define SAVE_GPR(n, base) std n,8*(n)(base) --#define REST_GPR(n, base) ld n,8*(n)(base) --#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) --#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) --#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base) --#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base) --#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base) --#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base) --#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) --#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) -+ -+.macro OP_REGS op, width, start, end, base, offset -+ .Lreg=\start -+ .rept (\end - \start + 1) -+ \op .Lreg,\offset+\width*.Lreg(\base) -+ .Lreg=.Lreg+1 -+ .endr -+.endm -+ -+#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, 0 -+#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, 0 -+#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base) -+#define REST_GPR(n, base) REST_GPRS(n, n, base) - - /* prom handles the jump into and return from firmware. The prom args pointer - is loaded in r3. */ -@@ -246,9 +249,7 @@ prom: - stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ - - SAVE_GPR(2, r1) -- SAVE_GPR(13, r1) -- SAVE_8GPRS(14, r1) -- SAVE_10GPRS(22, r1) -+ SAVE_GPRS(13, 31, r1) - mfcr r10 - std r10,8*32(r1) - mfmsr r10 -@@ -283,9 +284,7 @@ prom: - - /* Restore other registers */ - REST_GPR(2, r1) -- REST_GPR(13, r1) -- REST_8GPRS(14, r1) -- REST_10GPRS(22, r1) -+ REST_GPRS(13, 31, r1) - ld r10,8*32(r1) - mtcr r10 - -diff --git a/arch/powerpc/boot/dts/charon.dts b/arch/powerpc/boot/dts/charon.dts -index 408b486b13dff..cd589539f313f 100644 ---- a/arch/powerpc/boot/dts/charon.dts -+++ b/arch/powerpc/boot/dts/charon.dts -@@ -35,7 +35,7 @@ - }; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x08000000>; // 128MB - }; -diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts -index 0e5e9d3acf79f..19a14e62e65f4 100644 ---- a/arch/powerpc/boot/dts/digsy_mtc.dts -+++ b/arch/powerpc/boot/dts/digsy_mtc.dts -@@ -16,7 +16,7 @@ - model = "intercontrol,digsy-mtc"; - compatible = "intercontrol,digsy-mtc"; - -- memory { -+ memory@0 { - reg = <0x00000000 0x02000000>; // 32MB - }; - -diff --git a/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi -new file mode 100644 -index 0000000000000..7e2a90cde72e5 ---- /dev/null -+++ b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi -@@ -0,0 +1,51 @@ -+/* -+ * e500v1 Power ISA Device Tree Source (include) -+ * -+ * Copyright 2012 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+/ { -+ cpus { -+ power-isa-version = "2.03"; -+ power-isa-b; // Base -+ power-isa-e; // Embedded -+ power-isa-atb; // Alternate Time Base -+ power-isa-cs; // Cache Specification -+ power-isa-e.le; // Embedded.Little-Endian -+ power-isa-e.pm; // Embedded.Performance Monitor -+ power-isa-ecl; // Embedded Cache Locking -+ power-isa-mmc; // Memory Coherence -+ power-isa-sp; // Signal Processing Engine -+ power-isa-sp.fs; // SPE.Embedded Float Scalar Single -+ power-isa-sp.fv; // SPE.Embedded Float Vector -+ mmu-type = "power-embedded"; -+ }; -+}; -diff --git a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts -index 18a885130538a..e03ae130162ba 100644 ---- a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts -+++ b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts -@@ -7,7 +7,7 @@ - - /dts-v1/; - --/include/ "e500v2_power_isa.dtsi" -+/include/ "e500v1_power_isa.dtsi" - - / { - model = "MPC8540ADS"; -diff --git a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts -index ac381e7b1c60e..a2a6c5cf852e9 100644 ---- a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts -+++ b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts -@@ -7,7 +7,7 @@ - - /dts-v1/; - --/include/ "e500v2_power_isa.dtsi" -+/include/ "e500v1_power_isa.dtsi" - - / { - model = "MPC8541CDS"; -diff --git a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts -index 9f58db2a7e661..901b6ff06dfbb 100644 ---- a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts -+++ b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts -@@ -7,7 +7,7 @@ - - /dts-v1/; - --/include/ "e500v2_power_isa.dtsi" -+/include/ "e500v1_power_isa.dtsi" - - / { - model = "MPC8555CDS"; -diff --git a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts -index a24722ccaebf1..c2f9aea78b29f 100644 ---- a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts -+++ b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts -@@ -7,7 +7,7 @@ - - /dts-v1/; - --/include/ "e500v2_power_isa.dtsi" -+/include/ "e500v1_power_isa.dtsi" - - / { - model = "MPC8560ADS"; -diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi -new file mode 100644 -index 0000000000000..437dab3fc0176 ---- /dev/null -+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-2.dtsi -@@ -0,0 +1,44 @@ -+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later -+/* -+ * QorIQ FMan v3 10g port #2 device tree stub [ controller @ offset 0x400000 ] -+ * -+ * Copyright 2022 Sean Anderson -+ * Copyright 2012 - 2015 Freescale Semiconductor Inc. -+ */ -+ -+fman@400000 { -+ fman0_rx_0x08: port@88000 { -+ cell-index = <0x8>; -+ compatible = "fsl,fman-v3-port-rx"; -+ reg = <0x88000 0x1000>; -+ fsl,fman-10g-port; -+ }; -+ -+ fman0_tx_0x28: port@a8000 { -+ cell-index = <0x28>; -+ compatible = "fsl,fman-v3-port-tx"; -+ reg = <0xa8000 0x1000>; -+ fsl,fman-10g-port; -+ }; -+ -+ ethernet@e0000 { -+ cell-index = <0>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe0000 0x1000>; -+ fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>; -+ ptp-timer = <&ptp_timer0>; -+ pcsphy-handle = <&pcsphy0>; -+ }; -+ -+ mdio@e1000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; -+ reg = <0xe1000 0x1000>; -+ fsl,erratum-a011043; /* must ignore read errors */ -+ -+ pcsphy0: ethernet-phy@0 { -+ reg = <0x0>; -+ }; -+ }; -+}; -diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi -new file mode 100644 -index 0000000000000..ad116b17850a8 ---- /dev/null -+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-3.dtsi -@@ -0,0 +1,44 @@ -+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later -+/* -+ * QorIQ FMan v3 10g port #3 device tree stub [ controller @ offset 0x400000 ] -+ * -+ * Copyright 2022 Sean Anderson -+ * Copyright 2012 - 2015 Freescale Semiconductor Inc. -+ */ -+ -+fman@400000 { -+ fman0_rx_0x09: port@89000 { -+ cell-index = <0x9>; -+ compatible = "fsl,fman-v3-port-rx"; -+ reg = <0x89000 0x1000>; -+ fsl,fman-10g-port; -+ }; -+ -+ fman0_tx_0x29: port@a9000 { -+ cell-index = <0x29>; -+ compatible = "fsl,fman-v3-port-tx"; -+ reg = <0xa9000 0x1000>; -+ fsl,fman-10g-port; -+ }; -+ -+ ethernet@e2000 { -+ cell-index = <1>; -+ compatible = "fsl,fman-memac"; -+ reg = <0xe2000 0x1000>; -+ fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; -+ ptp-timer = <&ptp_timer0>; -+ pcsphy-handle = <&pcsphy1>; -+ }; -+ -+ mdio@e3000 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; -+ reg = <0xe3000 0x1000>; -+ fsl,erratum-a011043; /* must ignore read errors */ -+ -+ pcsphy1: ethernet-phy@0 { -+ reg = <0x0>; -+ }; -+ }; -+}; -diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi -index c90702b04a530..48e5cd61599c6 100644 ---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi -+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi -@@ -79,6 +79,7 @@ fman0: fman@400000 { - #size-cells = <0>; - compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; - reg = <0xfc000 0x1000>; -+ fsl,erratum-a009885; - }; - - xmdio0: mdio@fd000 { -@@ -86,6 +87,7 @@ fman0: fman@400000 { - #size-cells = <0>; - compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; - reg = <0xfd000 0x1000>; -+ fsl,erratum-a009885; - }; - }; - -diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts -new file mode 100644 -index 0000000000000..d4f5f159d6f23 ---- /dev/null -+++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts -@@ -0,0 +1,29 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+/* -+ * T1040RDB-REV-A Device Tree Source -+ * -+ * Copyright 2014 - 2015 Freescale Semiconductor Inc. -+ * -+ */ -+ -+#include "t1040rdb.dts" -+ -+/ { -+ model = "fsl,T1040RDB-REV-A"; -+}; -+ -+&seville_port0 { -+ label = "ETH5"; -+}; -+ -+&seville_port2 { -+ label = "ETH7"; -+}; -+ -+&seville_port4 { -+ label = "ETH9"; -+}; -+ -+&seville_port6 { -+ label = "ETH11"; -+}; -diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts -index af0c8a6f56138..b6733e7e65805 100644 ---- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts -+++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts -@@ -119,7 +119,7 @@ - managed = "in-band-status"; - phy-handle = <&phy_qsgmii_0>; - phy-mode = "qsgmii"; -- label = "ETH5"; -+ label = "ETH3"; - status = "okay"; - }; - -@@ -135,7 +135,7 @@ - managed = "in-band-status"; - phy-handle = <&phy_qsgmii_2>; - phy-mode = "qsgmii"; -- label = "ETH7"; -+ label = "ETH5"; - status = "okay"; - }; - -@@ -151,7 +151,7 @@ - managed = "in-band-status"; - phy-handle = <&phy_qsgmii_4>; - phy-mode = "qsgmii"; -- label = "ETH9"; -+ label = "ETH7"; - status = "okay"; - }; - -@@ -167,7 +167,7 @@ - managed = "in-band-status"; - phy-handle = <&phy_qsgmii_6>; - phy-mode = "qsgmii"; -- label = "ETH11"; -+ label = "ETH9"; - status = "okay"; - }; - -diff --git a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi -index 099a598c74c00..bfe1ed5be3374 100644 ---- a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi -+++ b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi -@@ -139,12 +139,12 @@ - fman@400000 { - ethernet@e6000 { - phy-handle = <&phy_rgmii_0>; -- phy-connection-type = "rgmii"; -+ phy-connection-type = "rgmii-id"; - }; - - ethernet@e8000 { - phy-handle = <&phy_rgmii_1>; -- phy-connection-type = "rgmii"; -+ phy-connection-type = "rgmii-id"; - }; - - mdio0: mdio@fc000 { -diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi -index ecbb447920bc6..27714dc2f04a5 100644 ---- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi -+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi -@@ -609,8 +609,8 @@ - /include/ "qoriq-bman1.dtsi" - - /include/ "qoriq-fman3-0.dtsi" --/include/ "qoriq-fman3-0-1g-0.dtsi" --/include/ "qoriq-fman3-0-1g-1.dtsi" -+/include/ "qoriq-fman3-0-10g-2.dtsi" -+/include/ "qoriq-fman3-0-10g-3.dtsi" - /include/ "qoriq-fman3-0-1g-2.dtsi" - /include/ "qoriq-fman3-0-1g-3.dtsi" - /include/ "qoriq-fman3-0-1g-4.dtsi" -@@ -659,3 +659,19 @@ - interrupts = <16 2 1 9>; - }; - }; -+ -+&fman0_rx_0x08 { -+ /delete-property/ fsl,fman-10g-port; -+}; -+ -+&fman0_tx_0x28 { -+ /delete-property/ fsl,fman-10g-port; -+}; -+ -+&fman0_rx_0x09 { -+ /delete-property/ fsl,fman-10g-port; -+}; -+ -+&fman0_tx_0x29 { -+ /delete-property/ fsl,fman-10g-port; -+}; -diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts -index cb2782dd6132c..e7b194775d783 100644 ---- a/arch/powerpc/boot/dts/lite5200.dts -+++ b/arch/powerpc/boot/dts/lite5200.dts -@@ -32,7 +32,7 @@ - }; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x04000000>; // 64MB - }; -diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts -index 2b86c81f90485..547cbe726ff23 100644 ---- a/arch/powerpc/boot/dts/lite5200b.dts -+++ b/arch/powerpc/boot/dts/lite5200b.dts -@@ -31,7 +31,7 @@ - led4 { gpios = <&gpio_simple 2 1>; }; - }; - -- memory { -+ memory@0 { - reg = <0x00000000 0x10000000>; // 256MB - }; - -diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts -index 61cae9dcddef4..f3188018faceb 100644 ---- a/arch/powerpc/boot/dts/media5200.dts -+++ b/arch/powerpc/boot/dts/media5200.dts -@@ -32,7 +32,7 @@ - }; - }; - -- memory { -+ memory@0 { - reg = <0x00000000 0x08000000>; // 128MB RAM - }; - -diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi -index 648fe31795f49..8b796f3b11da7 100644 ---- a/arch/powerpc/boot/dts/mpc5200b.dtsi -+++ b/arch/powerpc/boot/dts/mpc5200b.dtsi -@@ -33,7 +33,7 @@ - }; - }; - -- memory: memory { -+ memory: memory@0 { - device_type = "memory"; - reg = <0x00000000 0x04000000>; // 64MB - }; -diff --git a/arch/powerpc/boot/dts/o2d.dts b/arch/powerpc/boot/dts/o2d.dts -index 24a46f65e5299..e0a8d3034417f 100644 ---- a/arch/powerpc/boot/dts/o2d.dts -+++ b/arch/powerpc/boot/dts/o2d.dts -@@ -12,7 +12,7 @@ - model = "ifm,o2d"; - compatible = "ifm,o2d"; - -- memory { -+ memory@0 { - reg = <0x00000000 0x08000000>; // 128MB - }; - -diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi -index 6661955a2be47..b55a9e5bd828c 100644 ---- a/arch/powerpc/boot/dts/o2d.dtsi -+++ b/arch/powerpc/boot/dts/o2d.dtsi -@@ -19,7 +19,7 @@ - model = "ifm,o2d"; - compatible = "ifm,o2d"; - -- memory { -+ memory@0 { - reg = <0x00000000 0x04000000>; // 64MB - }; - -diff --git a/arch/powerpc/boot/dts/o2dnt2.dts b/arch/powerpc/boot/dts/o2dnt2.dts -index eeba7f5507d5d..c2eedbd1f5fcb 100644 ---- a/arch/powerpc/boot/dts/o2dnt2.dts -+++ b/arch/powerpc/boot/dts/o2dnt2.dts -@@ -12,7 +12,7 @@ - model = "ifm,o2dnt2"; - compatible = "ifm,o2d"; - -- memory { -+ memory@0 { - reg = <0x00000000 0x08000000>; // 128MB - }; - -diff --git a/arch/powerpc/boot/dts/o3dnt.dts b/arch/powerpc/boot/dts/o3dnt.dts -index fd00396b0593e..e4c1bdd412716 100644 ---- a/arch/powerpc/boot/dts/o3dnt.dts -+++ b/arch/powerpc/boot/dts/o3dnt.dts -@@ -12,7 +12,7 @@ - model = "ifm,o3dnt"; - compatible = "ifm,o2d"; - -- memory { -+ memory@0 { - reg = <0x00000000 0x04000000>; // 64MB - }; - -diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts -index 780e13d99e7b8..1895bc95900cc 100644 ---- a/arch/powerpc/boot/dts/pcm032.dts -+++ b/arch/powerpc/boot/dts/pcm032.dts -@@ -20,7 +20,7 @@ - model = "phytec,pcm032"; - compatible = "phytec,pcm032"; - -- memory { -+ memory@0 { - reg = <0x00000000 0x08000000>; // 128MB - }; - -diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts -index 9ed0bc78967e1..5bb25a9e40a01 100644 ---- a/arch/powerpc/boot/dts/tqm5200.dts -+++ b/arch/powerpc/boot/dts/tqm5200.dts -@@ -32,7 +32,7 @@ - }; - }; - -- memory { -+ memory@0 { - device_type = "memory"; - reg = <0x00000000 0x04000000>; // 64MB - }; -diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig -index 6697c5e6682f1..7f7e7add44e7b 100644 ---- a/arch/powerpc/configs/ppc6xx_defconfig -+++ b/arch/powerpc/configs/ppc6xx_defconfig -@@ -243,8 +243,6 @@ CONFIG_ATM_LANE=m - CONFIG_ATM_BR2684=m - CONFIG_BRIDGE=m - CONFIG_VLAN_8021Q=m --CONFIG_DECNET=m --CONFIG_DECNET_ROUTER=y - CONFIG_ATALK=m - CONFIG_DEV_APPLETALK=m - CONFIG_IPDDP=m -@@ -1022,7 +1020,6 @@ CONFIG_NFSD=m - CONFIG_NFSD_V3_ACL=y - CONFIG_NFSD_V4=y - CONFIG_CIFS=m --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_CIFS_UPCALL=y - CONFIG_CIFS_XATTR=y - CONFIG_CIFS_POSIX=y -diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig -index b183629f1bcfb..6011977d43c98 100644 ---- a/arch/powerpc/configs/pseries_defconfig -+++ b/arch/powerpc/configs/pseries_defconfig -@@ -41,6 +41,7 @@ CONFIG_DTL=y - CONFIG_SCANLOG=m - CONFIG_PPC_SMLPAR=y - CONFIG_IBMEBUS=y -+CONFIG_LIBNVDIMM=m - CONFIG_PAPR_SCM=m - CONFIG_PPC_SVM=y - # CONFIG_PPC_PMAC is not set -@@ -190,7 +191,6 @@ CONFIG_HVCS=m - CONFIG_VIRTIO_CONSOLE=m - CONFIG_IBM_BSR=m - CONFIG_RAW_DRIVER=y --CONFIG_MAX_RAW_DEVS=1024 - CONFIG_I2C_CHARDEV=y - CONFIG_FB=y - CONFIG_FIRMWARE_EDID=y -diff --git a/arch/powerpc/crypto/md5-asm.S b/arch/powerpc/crypto/md5-asm.S -index 948d100a29343..fa6bc440cf4ac 100644 ---- a/arch/powerpc/crypto/md5-asm.S -+++ b/arch/powerpc/crypto/md5-asm.S -@@ -38,15 +38,11 @@ - - #define INITIALIZE \ - PPC_STLU r1,-INT_FRAME_SIZE(r1); \ -- SAVE_8GPRS(14, r1); /* push registers onto stack */ \ -- SAVE_4GPRS(22, r1); \ -- SAVE_GPR(26, r1) -+ SAVE_GPRS(14, 26, r1) /* push registers onto stack */ - - #define FINALIZE \ -- REST_8GPRS(14, r1); /* pop registers from stack */ \ -- REST_4GPRS(22, r1); \ -- REST_GPR(26, r1); \ -- addi r1,r1,INT_FRAME_SIZE; -+ REST_GPRS(14, 26, r1); /* pop registers from stack */ \ -+ addi r1,r1,INT_FRAME_SIZE - - #ifdef __BIG_ENDIAN__ - #define LOAD_DATA(reg, off) \ -diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S -index 23e248beff716..f0d5ed557ab14 100644 ---- a/arch/powerpc/crypto/sha1-powerpc-asm.S -+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S -@@ -125,8 +125,7 @@ - - _GLOBAL(powerpc_sha_transform) - PPC_STLU r1,-INT_FRAME_SIZE(r1) -- SAVE_8GPRS(14, r1) -- SAVE_10GPRS(22, r1) -+ SAVE_GPRS(14, 31, r1) - - /* Load up A - E */ - lwz RA(0),0(r3) /* A */ -@@ -184,7 +183,6 @@ _GLOBAL(powerpc_sha_transform) - stw RD(0),12(r3) - stw RE(0),16(r3) - -- REST_8GPRS(14, r1) -- REST_10GPRS(22, r1) -+ REST_GPRS(14, 31, r1) - addi r1,r1,INT_FRAME_SIZE - blr -diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h -index 9a53e29680f41..258174304904b 100644 ---- a/arch/powerpc/include/asm/archrandom.h -+++ b/arch/powerpc/include/asm/archrandom.h -@@ -38,12 +38,7 @@ static inline bool __must_check arch_get_random_seed_int(unsigned int *v) - #endif /* CONFIG_ARCH_RANDOM */ - - #ifdef CONFIG_PPC_POWERNV --int powernv_hwrng_present(void); - int powernv_get_random_long(unsigned long *v); --int powernv_get_random_real_mode(unsigned long *v); --#else --static inline int powernv_hwrng_present(void) { return 0; } --static inline int powernv_get_random_real_mode(unsigned long *v) { return 0; } - #endif - - #endif /* _ASM_POWERPC_ARCHRANDOM_H */ -diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h -index f5be185cbdf8d..94ad7acfd0565 100644 ---- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h -+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h -@@ -143,6 +143,8 @@ static __always_inline void update_user_segments(u32 val) - update_user_segment(15, val); - } - -+int __init find_free_bat(void); -+unsigned int bat_block_size(unsigned long base, unsigned long top); - #endif /* !__ASSEMBLY__ */ - - /* We happily ignore the smaller BATs on 601, we don't actually use -diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h -index 609c80f671943..f8b94f78403f1 100644 ---- a/arch/powerpc/include/asm/book3s/32/pgtable.h -+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h -@@ -178,6 +178,7 @@ static inline bool pte_user(pte_t pte) - #ifndef __ASSEMBLY__ - - int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); -+void unmap_kernel_page(unsigned long va); - - #endif /* !__ASSEMBLY__ */ - -diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h -index 5d34a8646f081..6866d860d4f30 100644 ---- a/arch/powerpc/include/asm/book3s/64/pgtable.h -+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h -@@ -1082,6 +1082,8 @@ static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t p - return hash__map_kernel_page(ea, pa, prot); - } - -+void unmap_kernel_page(unsigned long va); -+ - static inline int __meminit vmemmap_create_mapping(unsigned long start, - unsigned long page_size, - unsigned long phys) -diff --git a/arch/powerpc/include/asm/bpf_perf_event.h b/arch/powerpc/include/asm/bpf_perf_event.h -new file mode 100644 -index 0000000000000..e8a7b4ffb58c2 ---- /dev/null -+++ b/arch/powerpc/include/asm/bpf_perf_event.h -@@ -0,0 +1,9 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H -+#define _ASM_POWERPC_BPF_PERF_EVENT_H -+ -+#include -+ -+typedef struct user_pt_regs bpf_user_pt_regs_t; -+ -+#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */ -diff --git a/arch/powerpc/include/asm/bugs.h b/arch/powerpc/include/asm/bugs.h -deleted file mode 100644 -index 01b8f6ca4dbbc..0000000000000 ---- a/arch/powerpc/include/asm/bugs.h -+++ /dev/null -@@ -1,15 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0-or-later */ --#ifndef _ASM_POWERPC_BUGS_H --#define _ASM_POWERPC_BUGS_H -- --/* -- */ -- --/* -- * This file is included by 'init/main.c' to check for -- * architecture-dependent bugs. -- */ -- --static inline void check_bugs(void) { } -- --#endif /* _ASM_POWERPC_BUGS_H */ -diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h -index 947b5b9c44241..a832aeafe5601 100644 ---- a/arch/powerpc/include/asm/fixmap.h -+++ b/arch/powerpc/include/asm/fixmap.h -@@ -111,8 +111,10 @@ static inline void __set_fixmap(enum fixed_addresses idx, - BUILD_BUG_ON(idx >= __end_of_fixed_addresses); - else if (WARN_ON(idx >= __end_of_fixed_addresses)) - return; -- -- map_kernel_page(__fix_to_virt(idx), phys, flags); -+ if (pgprot_val(flags)) -+ map_kernel_page(__fix_to_virt(idx), phys, flags); -+ else -+ unmap_kernel_page(__fix_to_virt(idx)); - } - - #define __early_set_fixmap __set_fixmap -diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h -index debe8c4f70626..02d32d6422cd8 100644 ---- a/arch/powerpc/include/asm/ftrace.h -+++ b/arch/powerpc/include/asm/ftrace.h -@@ -96,7 +96,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name - #endif /* PPC64_ELF_ABI_v1 */ - #endif /* CONFIG_FTRACE_SYSCALLS */ - --#ifdef CONFIG_PPC64 -+#if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER) - #include - - static inline void this_cpu_disable_ftrace(void) -@@ -120,11 +120,13 @@ static inline u8 this_cpu_get_ftrace_enabled(void) - return get_paca()->ftrace_enabled; - } - -+void ftrace_free_init_tramp(void); - #else /* CONFIG_PPC64 */ - static inline void this_cpu_disable_ftrace(void) { } - static inline void this_cpu_enable_ftrace(void) { } - static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { } - static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; } -+static inline void ftrace_free_init_tramp(void) { } - #endif /* CONFIG_PPC64 */ - #endif /* !__ASSEMBLY__ */ - -diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h -index 21cc571ea9c2d..5c98a950eca0d 100644 ---- a/arch/powerpc/include/asm/hw_irq.h -+++ b/arch/powerpc/include/asm/hw_irq.h -@@ -224,6 +224,42 @@ static inline bool arch_irqs_disabled(void) - return arch_irqs_disabled_flags(arch_local_save_flags()); - } - -+static inline void set_pmi_irq_pending(void) -+{ -+ /* -+ * Invoked from PMU callback functions to set PMI bit in the paca. -+ * This has to be called with irq's disabled (via hard_irq_disable()). -+ */ -+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) -+ WARN_ON_ONCE(mfmsr() & MSR_EE); -+ -+ get_paca()->irq_happened |= PACA_IRQ_PMI; -+} -+ -+static inline void clear_pmi_irq_pending(void) -+{ -+ /* -+ * Invoked from PMU callback functions to clear the pending PMI bit -+ * in the paca. -+ */ -+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) -+ WARN_ON_ONCE(mfmsr() & MSR_EE); -+ -+ get_paca()->irq_happened &= ~PACA_IRQ_PMI; -+} -+ -+static inline bool pmi_irq_pending(void) -+{ -+ /* -+ * Invoked from PMU callback functions to check if there is a pending -+ * PMI bit in the paca. -+ */ -+ if (get_paca()->irq_happened & PACA_IRQ_PMI) -+ return true; -+ -+ return false; -+} -+ - #ifdef CONFIG_PPC_BOOK3S - /* - * To support disabling and enabling of irq with PMI, set of -@@ -408,6 +444,10 @@ static inline void do_hard_irq_enable(void) - BUILD_BUG(); - } - -+static inline void clear_pmi_irq_pending(void) { } -+static inline void set_pmi_irq_pending(void) { } -+static inline bool pmi_irq_pending(void) { return false; } -+ - static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) - { - } -diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h -index 4f897993b7107..699a88584ae16 100644 ---- a/arch/powerpc/include/asm/imc-pmu.h -+++ b/arch/powerpc/include/asm/imc-pmu.h -@@ -137,7 +137,7 @@ struct imc_pmu { - * are inited. - */ - struct imc_pmu_ref { -- struct mutex lock; -+ spinlock_t lock; - unsigned int id; - int refc; - }; -diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h -index a1d238255f077..a07960066b5fa 100644 ---- a/arch/powerpc/include/asm/interrupt.h -+++ b/arch/powerpc/include/asm/interrupt.h -@@ -567,7 +567,7 @@ DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); - DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault); - - /* hash_utils.c */ --DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault); -+DECLARE_INTERRUPT_HANDLER(do_hash_fault); - - /* fault.c */ - DECLARE_INTERRUPT_HANDLER(do_page_fault); -diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h -index f130783c8301d..a4fe1292909e6 100644 ---- a/arch/powerpc/include/asm/io.h -+++ b/arch/powerpc/include/asm/io.h -@@ -359,25 +359,37 @@ static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr) - */ - static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr) - { -- __asm__ __volatile__("stbcix %0,0,%1" -+ __asm__ __volatile__(".machine push; \ -+ .machine power6; \ -+ stbcix %0,0,%1; \ -+ .machine pop;" - : : "r" (val), "r" (paddr) : "memory"); - } - - static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr) - { -- __asm__ __volatile__("sthcix %0,0,%1" -+ __asm__ __volatile__(".machine push; \ -+ .machine power6; \ -+ sthcix %0,0,%1; \ -+ .machine pop;" - : : "r" (val), "r" (paddr) : "memory"); - } - - static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr) - { -- __asm__ __volatile__("stwcix %0,0,%1" -+ __asm__ __volatile__(".machine push; \ -+ .machine power6; \ -+ stwcix %0,0,%1; \ -+ .machine pop;" - : : "r" (val), "r" (paddr) : "memory"); - } - - static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr) - { -- __asm__ __volatile__("stdcix %0,0,%1" -+ __asm__ __volatile__(".machine push; \ -+ .machine power6; \ -+ stdcix %0,0,%1; \ -+ .machine pop;" - : : "r" (val), "r" (paddr) : "memory"); - } - -@@ -389,7 +401,10 @@ static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr) - static inline u8 __raw_rm_readb(volatile void __iomem *paddr) - { - u8 ret; -- __asm__ __volatile__("lbzcix %0,0, %1" -+ __asm__ __volatile__(".machine push; \ -+ .machine power6; \ -+ lbzcix %0,0, %1; \ -+ .machine pop;" - : "=r" (ret) : "r" (paddr) : "memory"); - return ret; - } -@@ -397,7 +412,10 @@ static inline u8 __raw_rm_readb(volatile void __iomem *paddr) - static inline u16 __raw_rm_readw(volatile void __iomem *paddr) - { - u16 ret; -- __asm__ __volatile__("lhzcix %0,0, %1" -+ __asm__ __volatile__(".machine push; \ -+ .machine power6; \ -+ lhzcix %0,0, %1; \ -+ .machine pop;" - : "=r" (ret) : "r" (paddr) : "memory"); - return ret; - } -@@ -405,7 +423,10 @@ static inline u16 __raw_rm_readw(volatile void __iomem *paddr) - static inline u32 __raw_rm_readl(volatile void __iomem *paddr) - { - u32 ret; -- __asm__ __volatile__("lwzcix %0,0, %1" -+ __asm__ __volatile__(".machine push; \ -+ .machine power6; \ -+ lwzcix %0,0, %1; \ -+ .machine pop;" - : "=r" (ret) : "r" (paddr) : "memory"); - return ret; - } -@@ -413,7 +434,10 @@ static inline u32 __raw_rm_readl(volatile void __iomem *paddr) - static inline u64 __raw_rm_readq(volatile void __iomem *paddr) - { - u64 ret; -- __asm__ __volatile__("ldcix %0,0, %1" -+ __asm__ __volatile__(".machine push; \ -+ .machine power6; \ -+ ldcix %0,0, %1; \ -+ .machine pop;" - : "=r" (ret) : "r" (paddr) : "memory"); - return ret; - } -diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h -index 19b6942c6969a..eaf3a562bf1ed 100644 ---- a/arch/powerpc/include/asm/kvm_book3s_64.h -+++ b/arch/powerpc/include/asm/kvm_book3s_64.h -@@ -39,7 +39,6 @@ struct kvm_nested_guest { - pgd_t *shadow_pgtable; /* our page table for this guest */ - u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */ - u64 process_table; /* process table entry for this guest */ -- u64 hfscr; /* HFSCR that the L1 requested for this nested guest */ - long refcnt; /* number of pointers to this struct */ - struct mutex tlb_lock; /* serialize page faults and tlbies */ - struct kvm_nested_guest *next; -diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h -index 080a7feb77318..0d81a9bf37650 100644 ---- a/arch/powerpc/include/asm/kvm_host.h -+++ b/arch/powerpc/include/asm/kvm_host.h -@@ -814,6 +814,7 @@ struct kvm_vcpu_arch { - - /* For support of nested guests */ - struct kvm_nested_guest *nested; -+ u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */ - u32 nested_vcpu_id; - gpa_t nested_io_gpr; - #endif -diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h -index c390ec377baed..1412e643122e4 100644 ---- a/arch/powerpc/include/asm/lppaca.h -+++ b/arch/powerpc/include/asm/lppaca.h -@@ -45,6 +45,7 @@ - #include - #include - #include -+#include - - /* - * The lppaca is the "virtual processor area" registered with the hypervisor, -@@ -123,13 +124,23 @@ struct lppaca { - */ - #define LPPACA_OLD_SHARED_PROC 2 - --static inline bool lppaca_shared_proc(struct lppaca *l) -+#ifdef CONFIG_PPC_PSERIES -+/* -+ * All CPUs should have the same shared proc value, so directly access the PACA -+ * to avoid false positives from DEBUG_PREEMPT. -+ */ -+static inline bool lppaca_shared_proc(void) - { -+ struct lppaca *l = local_paca->lppaca_ptr; -+ - if (!firmware_has_feature(FW_FEATURE_SPLPAR)) - return false; - return !!(l->__old_status & LPPACA_OLD_SHARED_PROC); - } - -+#define get_lppaca() (get_paca()->lppaca_ptr) -+#endif -+ - /* - * SLB shadow buffer structure as defined in the PAPR. The save_area - * contains adjacent ESID and VSID pairs for each shadowed SLB. The -diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h -index f06ae00f2a65e..63ea4693ccea6 100644 ---- a/arch/powerpc/include/asm/nohash/32/pgtable.h -+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h -@@ -64,6 +64,7 @@ extern int icache_44x_need_flush; - #ifndef __ASSEMBLY__ - - int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); -+void unmap_kernel_page(unsigned long va); - - #endif /* !__ASSEMBLY__ */ - -@@ -193,10 +194,12 @@ static inline pte_t pte_wrprotect(pte_t pte) - } - #endif - -+#ifndef pte_mkexec - static inline pte_t pte_mkexec(pte_t pte) - { - return __pte(pte_val(pte) | _PAGE_EXEC); - } -+#endif - - #define pmd_none(pmd) (!pmd_val(pmd)) - #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) -@@ -306,30 +309,29 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - } - - #define __HAVE_ARCH_PTEP_SET_WRPROTECT -+#ifndef ptep_set_wrprotect - static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) - { -- unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); -- unsigned long set = pte_val(pte_wrprotect(__pte(0))); -- -- pte_update(mm, addr, ptep, clr, set, 0); -+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); - } -+#endif - -+#ifndef __ptep_set_access_flags - static inline void __ptep_set_access_flags(struct vm_area_struct *vma, - pte_t *ptep, pte_t entry, - unsigned long address, - int psize) - { -- pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0))))); -- pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0))))); -- unsigned long set = pte_val(entry) & pte_val(pte_set); -- unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr); -+ unsigned long set = pte_val(entry) & -+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - int huge = psize > mmu_virtual_psize ? 1 : 0; - -- pte_update(vma->vm_mm, address, ptep, clr, set, huge); -+ pte_update(vma->vm_mm, address, ptep, 0, set, huge); - - flush_tlb_page(vma, address); - } -+#endif - - static inline int pte_young(pte_t pte) - { -diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h -index fcc48d590d888..1a89ebdc3acc9 100644 ---- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h -+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h -@@ -136,6 +136,28 @@ static inline pte_t pte_mkhuge(pte_t pte) - - #define pte_mkhuge pte_mkhuge - -+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, -+ unsigned long clr, unsigned long set, int huge); -+ -+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -+{ -+ pte_update(mm, addr, ptep, 0, _PAGE_RO, 0); -+} -+#define ptep_set_wrprotect ptep_set_wrprotect -+ -+static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, -+ pte_t entry, unsigned long address, int psize) -+{ -+ unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_EXEC); -+ unsigned long clr = ~pte_val(entry) & _PAGE_RO; -+ int huge = psize > mmu_virtual_psize ? 1 : 0; -+ -+ pte_update(vma->vm_mm, address, ptep, clr, set, huge); -+ -+ flush_tlb_page(vma, address); -+} -+#define __ptep_set_access_flags __ptep_set_access_flags -+ - static inline unsigned long pgd_leaf_size(pgd_t pgd) - { - if (pgd_val(pgd) & _PMD_PAGE_8M) -diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h -index d081704b13fb9..2225991c69b55 100644 ---- a/arch/powerpc/include/asm/nohash/64/pgtable.h -+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h -@@ -118,11 +118,6 @@ static inline pte_t pte_wrprotect(pte_t pte) - return __pte(pte_val(pte) & ~_PAGE_RW); - } - --static inline pte_t pte_mkexec(pte_t pte) --{ -- return __pte(pte_val(pte) | _PAGE_EXEC); --} -- - #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) - #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) - -@@ -313,6 +308,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, - #define __swp_entry_to_pte(x) __pte((x).val) - - int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); -+void unmap_kernel_page(unsigned long va); - extern int __meminit vmemmap_create_mapping(unsigned long start, - unsigned long page_size, - unsigned long phys); -diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h -index 813918f407653..f798640422c2d 100644 ---- a/arch/powerpc/include/asm/nohash/pte-book3e.h -+++ b/arch/powerpc/include/asm/nohash/pte-book3e.h -@@ -48,7 +48,7 @@ - #define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */ - - /* "Higher level" linux bit combinations */ --#define _PAGE_EXEC _PAGE_BAP_UX /* .. and was cache cleaned */ -+#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */ - #define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */ - #define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY) - #define _PAGE_KERNEL_RO (_PAGE_BAP_SR) -@@ -93,11 +93,11 @@ - /* Permission masks used to generate the __P and __S table */ - #define PAGE_NONE __pgprot(_PAGE_BASE) - #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) --#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) -+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX) - #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) --#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) - #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) --#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) - - #ifndef __ASSEMBLY__ - static inline pte_t pte_mkprivileged(pte_t pte) -@@ -113,6 +113,16 @@ static inline pte_t pte_mkuser(pte_t pte) - } - - #define pte_mkuser pte_mkuser -+ -+static inline pte_t pte_mkexec(pte_t pte) -+{ -+ if (pte_val(pte) & _PAGE_BAP_UR) -+ return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX); -+ else -+ return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX); -+} -+#define pte_mkexec pte_mkexec -+ - #endif /* __ASSEMBLY__ */ - - #endif /* __KERNEL__ */ -diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h -index dc05a862e72a1..02c089c5493ad 100644 ---- a/arch/powerpc/include/asm/paca.h -+++ b/arch/powerpc/include/asm/paca.h -@@ -14,7 +14,6 @@ - - #include - #include --#include - #include - #include - #ifdef CONFIG_PPC_BOOK3E -@@ -46,14 +45,11 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */ - #define get_paca() local_paca - #endif - --#ifdef CONFIG_PPC_PSERIES --#define get_lppaca() (get_paca()->lppaca_ptr) --#endif -- - #define get_slb_shadow() (get_paca()->slb_shadow_ptr) - - struct task_struct; - struct rtas_args; -+struct lppaca; - - /* - * Defines the layout of the paca. -@@ -263,7 +259,6 @@ struct paca_struct { - u64 l1d_flush_size; - #endif - #ifdef CONFIG_PPC_PSERIES -- struct rtas_args *rtas_args_reentrant; - u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */ - #endif /* CONFIG_PPC_PSERIES */ - -diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h -index 254687258f42b..03ae544eb6cc4 100644 ---- a/arch/powerpc/include/asm/page.h -+++ b/arch/powerpc/include/asm/page.h -@@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned long pfn) - #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) - #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) - --#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) -+#define virt_addr_valid(vaddr) ({ \ -+ unsigned long _addr = (unsigned long)vaddr; \ -+ _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \ -+ pfn_valid(virt_to_pfn(_addr)); \ -+}) - - /* - * On Book-E parts we need __va to parse the device tree and we can't -@@ -212,6 +216,9 @@ static inline bool pfn_valid(unsigned long pfn) - #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET) - #else - #ifdef CONFIG_PPC64 -+ -+#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x)) -+ - /* - * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET - * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. -@@ -219,13 +226,13 @@ static inline bool pfn_valid(unsigned long pfn) - */ - #define __va(x) \ - ({ \ -- VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \ -+ VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \ - (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \ - }) - - #define __pa(x) \ - ({ \ -- VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \ -+ VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \ - (unsigned long)(x) & 0x0fffffffffffffffUL; \ - }) - -diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h -index bcb7b5f917be6..0a333ac716e0b 100644 ---- a/arch/powerpc/include/asm/paravirt.h -+++ b/arch/powerpc/include/asm/paravirt.h -@@ -6,6 +6,7 @@ - #include - #ifdef CONFIG_PPC64 - #include -+#include - #include - #endif - -@@ -97,7 +98,23 @@ static inline bool vcpu_is_preempted(int cpu) - - #ifdef CONFIG_PPC_SPLPAR - if (!is_kvm_guest()) { -- int first_cpu = cpu_first_thread_sibling(smp_processor_id()); -+ int first_cpu; -+ -+ /* -+ * The result of vcpu_is_preempted() is used in a -+ * speculative way, and is always subject to invalidation -+ * by events internal and external to Linux. While we can -+ * be called in preemptable context (in the Linux sense), -+ * we're not accessing per-cpu resources in a way that can -+ * race destructively with Linux scheduler preemption and -+ * migration, and callers can tolerate the potential for -+ * error introduced by sampling the CPU index without -+ * pinning the task to it. So it is permissible to use -+ * raw_smp_processor_id() here to defeat the preempt debug -+ * warnings that can arise from using smp_processor_id() -+ * in arbitrary contexts. -+ */ -+ first_cpu = cpu_first_thread_sibling(raw_smp_processor_id()); - - /* - * Preemption can only happen at core granularity. This CPU -diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h -index 83e0f701ebc67..217d8fb246354 100644 ---- a/arch/powerpc/include/asm/plpar_wrappers.h -+++ b/arch/powerpc/include/asm/plpar_wrappers.h -@@ -9,6 +9,7 @@ - - #include - #include -+#include - #include - - static inline long poll_pending(void) -diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h -index baea657bc8687..536d997539bb7 100644 ---- a/arch/powerpc/include/asm/ppc-opcode.h -+++ b/arch/powerpc/include/asm/ppc-opcode.h -@@ -249,6 +249,7 @@ - #define PPC_INST_COPY 0x7c20060c - #define PPC_INST_DCBA 0x7c0005ec - #define PPC_INST_DCBA_MASK 0xfc0007fe -+#define PPC_INST_DSSALL 0x7e00066c - #define PPC_INST_ISEL 0x7c00001e - #define PPC_INST_ISEL_MASK 0xfc00003e - #define PPC_INST_LSWI 0x7c0004aa -@@ -498,6 +499,7 @@ - #define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) - #define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) - #define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) -+#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) - #define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) - #define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) - #define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i)) -@@ -575,6 +577,7 @@ - #define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b)) - #define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b)) - #define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b)) -+#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL) - #define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh)) - #define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_RAW_STQCX(t, a, b)) - #define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHD(t, a, b, c)) -diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h -index 1c538a9a11e09..f21e6bde17a1e 100644 ---- a/arch/powerpc/include/asm/ppc_asm.h -+++ b/arch/powerpc/include/asm/ppc_asm.h -@@ -16,30 +16,41 @@ - - #define SZL (BITS_PER_LONG/8) - -+/* -+ * This expands to a sequence of operations with reg incrementing from -+ * start to end inclusive, of this form: -+ * -+ * op reg, (offset + (width * reg))(base) -+ * -+ * Note that offset is not the offset of the first operation unless start -+ * is zero (or width is zero). -+ */ -+.macro OP_REGS op, width, start, end, base, offset -+ .Lreg=\start -+ .rept (\end - \start + 1) -+ \op .Lreg, \offset + \width * .Lreg(\base) -+ .Lreg=.Lreg+1 -+ .endr -+.endm -+ - /* - * Macros for storing registers into and loading registers from - * exception frames. - */ - #ifdef __powerpc64__ --#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base) --#define REST_GPR(n, base) ld n,GPR0+8*(n)(base) --#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base) --#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base) -+#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, GPR0 -+#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, GPR0 -+#define SAVE_NVGPRS(base) SAVE_GPRS(14, 31, base) -+#define REST_NVGPRS(base) REST_GPRS(14, 31, base) - #else --#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base) --#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base) --#define SAVE_NVGPRS(base) stmw 13, GPR0+4*13(base) --#define REST_NVGPRS(base) lmw 13, GPR0+4*13(base) -+#define SAVE_GPRS(start, end, base) OP_REGS stw, 4, start, end, base, GPR0 -+#define REST_GPRS(start, end, base) OP_REGS lwz, 4, start, end, base, GPR0 -+#define SAVE_NVGPRS(base) SAVE_GPRS(13, 31, base) -+#define REST_NVGPRS(base) REST_GPRS(13, 31, base) - #endif - --#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) --#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) --#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base) --#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base) --#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base) --#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base) --#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) --#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) -+#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base) -+#define REST_GPR(n, base) REST_GPRS(n, n, base) - - #define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base) - #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) -diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h -index 9dc97d2f9d27e..a05b34cf5f408 100644 ---- a/arch/powerpc/include/asm/rtas.h -+++ b/arch/powerpc/include/asm/rtas.h -@@ -240,7 +240,6 @@ extern struct rtas_t rtas; - extern int rtas_token(const char *service); - extern int rtas_service_present(const char *service); - extern int rtas_call(int token, int, int, int *, ...); --int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...); - void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, - int nret, ...); - extern void __noreturn rtas_restart(char *cmd); -diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h -index 6e4af4492a144..e92d39c0cd1d9 100644 ---- a/arch/powerpc/include/asm/sections.h -+++ b/arch/powerpc/include/asm/sections.h -@@ -6,22 +6,10 @@ - #include - #include - --#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed -- - #include - --extern bool init_mem_is_free; -- --static inline int arch_is_kernel_initmem_freed(unsigned long addr) --{ -- if (!init_mem_is_free) -- return 0; -- -- return addr >= (unsigned long)__init_begin && -- addr < (unsigned long)__init_end; --} -- - extern char __head_end[]; -+extern char __srwx_boundary[]; - - #ifdef __powerpc64__ - -diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h -index b040094f79202..7ebc807aa8cc8 100644 ---- a/arch/powerpc/include/asm/set_memory.h -+++ b/arch/powerpc/include/asm/set_memory.h -@@ -6,6 +6,8 @@ - #define SET_MEMORY_RW 1 - #define SET_MEMORY_NX 2 - #define SET_MEMORY_X 3 -+#define SET_MEMORY_NP 4 /* Set memory non present */ -+#define SET_MEMORY_P 5 /* Set memory present */ - - int change_memory_attr(unsigned long addr, int numpages, long action); - -@@ -29,6 +31,14 @@ static inline int set_memory_x(unsigned long addr, int numpages) - return change_memory_attr(addr, numpages, SET_MEMORY_X); - } - --int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot); -+static inline int set_memory_np(unsigned long addr, int numpages) -+{ -+ return change_memory_attr(addr, numpages, SET_MEMORY_NP); -+} -+ -+static inline int set_memory_p(unsigned long addr, int numpages) -+{ -+ return change_memory_attr(addr, numpages, SET_MEMORY_P); -+} - - #endif -diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h -index 8985791a2ba57..3c037a12c84db 100644 ---- a/arch/powerpc/include/asm/simple_spinlock.h -+++ b/arch/powerpc/include/asm/simple_spinlock.h -@@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock) - static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) - { - unsigned long tmp, token; -+ unsigned int eh = IS_ENABLED(CONFIG_PPC64); - - token = LOCK_TOKEN; - __asm__ __volatile__( --"1: lwarx %0,0,%2,1\n\ -+"1: lwarx %0,0,%2,%[eh]\n\ - cmpwi 0,%0,0\n\ - bne- 2f\n\ - stwcx. %1,0,%2\n\ -@@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) - PPC_ACQUIRE_BARRIER - "2:" - : "=&r" (tmp) -- : "r" (token), "r" (&lock->slock) -+ : "r" (token), "r" (&lock->slock), [eh] "n" (eh) - : "cr0", "memory"); - - return tmp; -@@ -177,9 +178,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) - static inline long __arch_read_trylock(arch_rwlock_t *rw) - { - long tmp; -+ unsigned int eh = IS_ENABLED(CONFIG_PPC64); - - __asm__ __volatile__( --"1: lwarx %0,0,%1,1\n" -+"1: lwarx %0,0,%1,%[eh]\n" - __DO_SIGN_EXTEND - " addic. %0,%0,1\n\ - ble- 2f\n" -@@ -187,7 +189,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) - bne- 1b\n" - PPC_ACQUIRE_BARRIER - "2:" : "=&r" (tmp) -- : "r" (&rw->lock) -+ : "r" (&rw->lock), [eh] "n" (eh) - : "cr0", "xer", "memory"); - - return tmp; -@@ -200,17 +202,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) - static inline long __arch_write_trylock(arch_rwlock_t *rw) - { - long tmp, token; -+ unsigned int eh = IS_ENABLED(CONFIG_PPC64); - - token = WRLOCK_TOKEN; - __asm__ __volatile__( --"1: lwarx %0,0,%2,1\n\ -+"1: lwarx %0,0,%2,%[eh]\n\ - cmpwi 0,%0,0\n\ - bne- 2f\n" - " stwcx. %1,0,%2\n\ - bne- 1b\n" - PPC_ACQUIRE_BARRIER - "2:" : "=&r" (tmp) -- : "r" (token), "r" (&rw->lock) -+ : "r" (token), "r" (&rw->lock), [eh] "n" (eh) - : "cr0", "memory"); - - return tmp; -diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h -index c60ebd04b2ed9..61b968d9fba7c 100644 ---- a/arch/powerpc/include/asm/syscall.h -+++ b/arch/powerpc/include/asm/syscall.h -@@ -90,7 +90,7 @@ static inline void syscall_get_arguments(struct task_struct *task, - unsigned long val, mask = -1UL; - unsigned int n = 6; - -- if (is_32bit_task()) -+ if (is_tsk_32bit_task(task)) - mask = 0xffffffff; - - while (n--) { -@@ -115,7 +115,7 @@ static inline void syscall_set_arguments(struct task_struct *task, - - static inline int syscall_get_arch(struct task_struct *task) - { -- if (is_32bit_task()) -+ if (is_tsk_32bit_task(task)) - return AUDIT_ARCH_PPC; - else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) - return AUDIT_ARCH_PPC64LE; -diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h -index 7ee66ae5444d1..0e85d7aa395d0 100644 ---- a/arch/powerpc/include/asm/syscalls.h -+++ b/arch/powerpc/include/asm/syscalls.h -@@ -8,6 +8,18 @@ - #include - #include - -+/* -+ * long long munging: -+ * The 32 bit ABI passes long longs in an odd even register pair. -+ * High and low parts are swapped depending on endian mode, -+ * so define a macro (similar to mips linux32) to handle that. -+ */ -+#ifdef __LITTLE_ENDIAN__ -+#define merge_64(low, high) (((u64)high << 32) | low) -+#else -+#define merge_64(high, low) (((u64)high << 32) | low) -+#endif -+ - struct rtas_args; - - asmlinkage long sys_mmap(unsigned long addr, size_t len, -diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h -index b4ec6c7dd72ee..87013ac2a6401 100644 ---- a/arch/powerpc/include/asm/thread_info.h -+++ b/arch/powerpc/include/asm/thread_info.h -@@ -14,10 +14,16 @@ - - #ifdef __KERNEL__ - --#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT -+#ifdef CONFIG_KASAN -+#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1) -+#else -+#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT -+#endif -+ -+#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT - #define THREAD_SHIFT PAGE_SHIFT - #else --#define THREAD_SHIFT CONFIG_THREAD_SHIFT -+#define THREAD_SHIFT MIN_THREAD_SHIFT - #endif - - #define THREAD_SIZE (1 << THREAD_SHIFT) -@@ -165,8 +171,10 @@ static inline bool test_thread_local_flags(unsigned int flags) - - #ifdef CONFIG_COMPAT - #define is_32bit_task() (test_thread_flag(TIF_32BIT)) -+#define is_tsk_32bit_task(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT)) - #else - #define is_32bit_task() (IS_ENABLED(CONFIG_PPC32)) -+#define is_tsk_32bit_task(tsk) (IS_ENABLED(CONFIG_PPC32)) - #endif - - #if defined(CONFIG_PPC64) -diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h -index fa2e76e4093a3..14b4489de52c5 100644 ---- a/arch/powerpc/include/asm/timex.h -+++ b/arch/powerpc/include/asm/timex.h -@@ -19,6 +19,7 @@ static inline cycles_t get_cycles(void) - { - return mftb(); - } -+#define get_cycles get_cycles - - #endif /* __KERNEL__ */ - #endif /* _ASM_POWERPC_TIMEX_H */ -diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h -index 22c79ab400060..b2680070d65d6 100644 ---- a/arch/powerpc/include/asm/uaccess.h -+++ b/arch/powerpc/include/asm/uaccess.h -@@ -125,8 +125,11 @@ do { \ - */ - #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \ - __asm__ __volatile__( \ -+ ".machine push\n" \ -+ ".machine altivec\n" \ - "1: lvx 0,0,%1 # get user\n" \ - " stvx 0,0,%2 # put kernel\n" \ -+ ".machine pop\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: li %0,%3\n" \ -diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h -index 57573d9c1e091..56834a8a14654 100644 ---- a/arch/powerpc/include/asm/vas.h -+++ b/arch/powerpc/include/asm/vas.h -@@ -112,7 +112,7 @@ static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref) - * Receive window attributes specified by the (in-kernel) owner of window. - */ - struct vas_rx_win_attr { -- void *rx_fifo; -+ u64 rx_fifo; - int rx_fifo_size; - int wcreds_max; - -diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h -index f3f4710d4ff52..99129b0cd8b8a 100644 ---- a/arch/powerpc/include/asm/word-at-a-time.h -+++ b/arch/powerpc/include/asm/word-at-a-time.h -@@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask) - return leading_zero_bits >> 3; - } - --static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) -+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) - { - unsigned long rhs = val | c->low_bits; - *data = rhs; -diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h -deleted file mode 100644 -index 5e1e648aeec4c..0000000000000 ---- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h -+++ /dev/null -@@ -1,9 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ --#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ --#define _UAPI__ASM_BPF_PERF_EVENT_H__ -- --#include -- --typedef struct user_pt_regs bpf_user_pt_regs_t; -- --#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ -diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile -index 7be36c1e1db6d..ed91d5b9ffc63 100644 ---- a/arch/powerpc/kernel/Makefile -+++ b/arch/powerpc/kernel/Makefile -@@ -11,6 +11,7 @@ CFLAGS_prom_init.o += -fPIC - CFLAGS_btext.o += -fPIC - endif - -+CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -@@ -19,6 +20,7 @@ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_prom_init.o += -fno-stack-protector - CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING - CFLAGS_prom_init.o += -ffreestanding -+CFLAGS_prom_init.o += $(call cc-option, -ftrivial-auto-var-init=uninitialized) - - ifdef CONFIG_FUNCTION_TRACER - # Do not trace early boot code -@@ -196,3 +198,6 @@ clean-files := vmlinux.lds - # Force dependency (incbin is bad) - $(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg - $(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg -+ -+# for cleaning -+subdir- += vdso32 vdso64 -diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c -index 803c2a45b22ac..1cffb5e7c38d6 100644 ---- a/arch/powerpc/kernel/btext.c -+++ b/arch/powerpc/kernel/btext.c -@@ -241,8 +241,10 @@ int __init btext_find_display(int allow_nonstdout) - rc = btext_initialize(np); - printk("result: %d\n", rc); - } -- if (rc == 0) -+ if (rc == 0) { -+ of_node_put(np); - break; -+ } - } - return rc; - } -diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c -index 038ce8d9061d1..8920862ffd791 100644 ---- a/arch/powerpc/kernel/dma-iommu.c -+++ b/arch/powerpc/kernel/dma-iommu.c -@@ -144,7 +144,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) - /* We support DMA to/from any memory page via the iommu */ - int dma_iommu_dma_supported(struct device *dev, u64 mask) - { -- struct iommu_table *tbl = get_iommu_table_base(dev); -+ struct iommu_table *tbl; - - if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) { - /* -@@ -162,6 +162,8 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask) - return 1; - } - -+ tbl = get_iommu_table_base(dev); -+ - if (!tbl) { - dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask); - return 0; -diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S -index 61fdd53cdd9af..c62dd98159653 100644 ---- a/arch/powerpc/kernel/entry_32.S -+++ b/arch/powerpc/kernel/entry_32.S -@@ -90,8 +90,7 @@ transfer_to_syscall: - stw r12,8(r1) - stw r2,_TRAP(r1) - SAVE_GPR(0, r1) -- SAVE_4GPRS(3, r1) -- SAVE_2GPRS(7, r1) -+ SAVE_GPRS(3, 8, r1) - addi r2,r10,-THREAD - SAVE_NVGPRS(r1) - -@@ -139,7 +138,7 @@ syscall_exit_finish: - mtxer r5 - lwz r0,GPR0(r1) - lwz r3,GPR3(r1) -- REST_8GPRS(4,r1) -+ REST_GPRS(4, 11, r1) - lwz r12,GPR12(r1) - b 1b - -@@ -232,9 +231,9 @@ fast_exception_return: - beq 3f /* if not, we've got problems */ - #endif - --2: REST_4GPRS(3, r11) -+2: REST_GPRS(3, 6, r11) - lwz r10,_CCR(r11) -- REST_2GPRS(1, r11) -+ REST_GPRS(1, 2, r11) - mtcr r10 - lwz r10,_LINK(r11) - mtlr r10 -@@ -298,16 +297,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) - * the reliable stack unwinder later on. Clear it. - */ - stw r0,8(r1) -- REST_4GPRS(7, r1) -- REST_2GPRS(11, r1) -+ REST_GPRS(7, 12, r1) - - mtcr r3 - mtlr r4 - mtctr r5 - mtspr SPRN_XER,r6 - -- REST_4GPRS(2, r1) -- REST_GPR(6, r1) -+ REST_GPRS(2, 6, r1) - REST_GPR(0, r1) - REST_GPR(1, r1) - rfi -@@ -341,8 +338,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) - lwz r6,_CCR(r1) - li r0,0 - -- REST_4GPRS(7, r1) -- REST_2GPRS(11, r1) -+ REST_GPRS(7, 12, r1) - - mtlr r3 - mtctr r4 -@@ -354,7 +350,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) - */ - stw r0,8(r1) - -- REST_4GPRS(2, r1) -+ REST_GPRS(2, 5, r1) - - bne- cr1,1f /* emulate stack store */ - mtcr r6 -@@ -430,8 +426,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) - bne interrupt_return; \ - lwz r0,GPR0(r1); \ - lwz r2,GPR2(r1); \ -- REST_4GPRS(3, r1); \ -- REST_2GPRS(7, r1); \ -+ REST_GPRS(3, 8, r1); \ - lwz r10,_XER(r1); \ - lwz r11,_CTR(r1); \ - mtspr SPRN_XER,r10; \ -diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S -index 70cff7b49e172..07a1448146e27 100644 ---- a/arch/powerpc/kernel/entry_64.S -+++ b/arch/powerpc/kernel/entry_64.S -@@ -330,22 +330,22 @@ _GLOBAL(enter_rtas) - clrldi r4,r4,2 /* convert to realmode address */ - mtlr r4 - -- li r0,0 -- ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI -- andc r0,r6,r0 -- -- li r9,1 -- rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) -- ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE -- andc r6,r0,r9 -- - __enter_rtas: -- sync /* disable interrupts so SRR0/1 */ -- mtmsrd r0 /* don't get trashed */ -- - LOAD_REG_ADDR(r4, rtas) - ld r5,RTASENTRY(r4) /* get the rtas->entry value */ - ld r4,RTASBASE(r4) /* get the rtas->base value */ -+ -+ /* -+ * RTAS runs in 32-bit big endian real mode, but leave MSR[RI] on as we -+ * may hit NMI (SRESET or MCE) while in RTAS. RTAS should disable RI in -+ * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S] -+ * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if -+ * MSR[S] is set, it will remain when entering RTAS. -+ */ -+ LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI) -+ -+ li r0,0 -+ mtmsrd r0,1 /* disable RI before using SRR0/1 */ - - mtspr SPRN_SRR0,r5 - mtspr SPRN_SRR1,r6 -diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S -index 711c66b76df1a..67dc4e3179a02 100644 ---- a/arch/powerpc/kernel/exceptions-64e.S -+++ b/arch/powerpc/kernel/exceptions-64e.S -@@ -198,8 +198,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) - - stdcx. r0,0,r1 /* to clear the reservation */ - -- REST_4GPRS(2, r1) -- REST_4GPRS(6, r1) -+ REST_GPRS(2, 9, r1) - - ld r10,_CTR(r1) - ld r11,_XER(r1) -@@ -375,9 +374,7 @@ ret_from_mc_except: - exc_##n##_common: \ - std r0,GPR0(r1); /* save r0 in stackframe */ \ - std r2,GPR2(r1); /* save r2 in stackframe */ \ -- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ -- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ -- std r9,GPR9(r1); /* save r9 in stackframe */ \ -+ SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \ - std r10,_NIP(r1); /* save SRR0 to stackframe */ \ - std r11,_MSR(r1); /* save SRR1 to stackframe */ \ - beq 2f; /* if from kernel mode */ \ -@@ -1061,9 +1058,7 @@ bad_stack_book3e: - std r11,_ESR(r1) - std r0,GPR0(r1); /* save r0 in stackframe */ \ - std r2,GPR2(r1); /* save r2 in stackframe */ \ -- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ -- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ -- std r9,GPR9(r1); /* save r9 in stackframe */ \ -+ SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \ - ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \ - ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \ - mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \ -@@ -1077,8 +1072,7 @@ bad_stack_book3e: - std r10,_LINK(r1) - std r11,_CTR(r1) - std r12,_XER(r1) -- SAVE_10GPRS(14,r1) -- SAVE_8GPRS(24,r1) -+ SAVE_GPRS(14, 31, r1) - lhz r12,PACA_TRAP_SAVE(r13) - std r12,_TRAP(r1) - addi r11,r1,INT_FRAME_SIZE -diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S -index eaf1f72131a18..277eccf0f0868 100644 ---- a/arch/powerpc/kernel/exceptions-64s.S -+++ b/arch/powerpc/kernel/exceptions-64s.S -@@ -574,8 +574,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) - ld r10,IAREA+EX_CTR(r13) - std r10,_CTR(r1) - std r2,GPR2(r1) /* save r2 in stackframe */ -- SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */ -- SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */ -+ SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ - mflr r9 /* Get LR, later save to stack */ - ld r2,PACATOC(r13) /* get kernel TOC into r2 */ - std r9,_LINK(r1) -@@ -693,8 +692,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) - mtlr r9 - ld r9,_CCR(r1) - mtcr r9 -- REST_8GPRS(2, r1) -- REST_4GPRS(10, r1) -+ REST_GPRS(2, 13, r1) - REST_GPR(0, r1) - /* restore original r1. */ - ld r1,GPR1(r1) -diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c -index b7ceb041743c9..d496dc5151aa1 100644 ---- a/arch/powerpc/kernel/fadump.c -+++ b/arch/powerpc/kernel/fadump.c -@@ -642,6 +642,7 @@ int __init fadump_reserve_mem(void) - return ret; - error_out: - fw_dump.fadump_enabled = 0; -+ fw_dump.reserve_dump_area_size = 0; - return 0; - } - -@@ -861,7 +862,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info) - sizeof(struct fadump_memory_range)); - return 0; - } -- - static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, - u64 base, u64 end) - { -@@ -880,7 +880,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, - start = mem_ranges[mrange_info->mem_range_cnt - 1].base; - size = mem_ranges[mrange_info->mem_range_cnt - 1].size; - -- if ((start + size) == base) -+ /* -+ * Boot memory area needs separate PT_LOAD segment(s) as it -+ * is moved to a different location at the time of crash. -+ * So, fold only if the region is not boot memory area. -+ */ -+ if ((start + size) == base && start >= fw_dump.boot_mem_top) - is_adjacent = true; - } - if (!is_adjacent) { -@@ -1641,6 +1646,14 @@ int __init setup_fadump(void) - else if (fw_dump.reserve_dump_area_size) - fw_dump.ops->fadump_init_mem_struct(&fw_dump); - -+ /* -+ * In case of panic, fadump is triggered via ppc_panic_event() -+ * panic notifier. Setting crash_kexec_post_notifiers to 'true' -+ * lets panic() function take crash friendly path before panic -+ * notifiers are invoked. -+ */ -+ crash_kexec_post_notifiers = true; -+ - return 1; - } - subsys_initcall(setup_fadump); -diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c -index c7022c41cc314..20328f72f9f2b 100644 ---- a/arch/powerpc/kernel/firmware.c -+++ b/arch/powerpc/kernel/firmware.c -@@ -31,11 +31,10 @@ int __init check_kvm_guest(void) - if (!hyper_node) - return 0; - -- if (!of_device_is_compatible(hyper_node, "linux,kvm")) -- return 0; -- -- static_branch_enable(&kvm_guest); -+ if (of_device_is_compatible(hyper_node, "linux,kvm")) -+ static_branch_enable(&kvm_guest); - -+ of_node_put(hyper_node); - return 0; - } - core_initcall(check_kvm_guest); // before kvm_guest_init() -diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h -index 6b1ec9e3541b9..261c79bdbe53f 100644 ---- a/arch/powerpc/kernel/head_32.h -+++ b/arch/powerpc/kernel/head_32.h -@@ -115,8 +115,7 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt) - stw r10,8(r1) - li r10, \trapno - stw r10,_TRAP(r1) -- SAVE_4GPRS(3, r1) -- SAVE_2GPRS(7, r1) -+ SAVE_GPRS(3, 8, r1) - SAVE_NVGPRS(r1) - stw r2,GPR2(r1) - stw r12,_NIP(r1) -@@ -202,11 +201,11 @@ vmap_stack_overflow: - mfspr r1, SPRN_SPRG_THREAD - lwz r1, TASK_CPU - THREAD(r1) - slwi r1, r1, 3 -- addis r1, r1, emergency_ctx@ha -+ addis r1, r1, emergency_ctx-PAGE_OFFSET@ha - #else -- lis r1, emergency_ctx@ha -+ lis r1, emergency_ctx-PAGE_OFFSET@ha - #endif -- lwz r1, emergency_ctx@l(r1) -+ lwz r1, emergency_ctx-PAGE_OFFSET@l(r1) - addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE - EXCEPTION_PROLOG_2 0 vmap_stack_overflow - prepare_transfer_to_handler -diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S -index 7d72ee5ab387c..e783860bea838 100644 ---- a/arch/powerpc/kernel/head_40x.S -+++ b/arch/powerpc/kernel/head_40x.S -@@ -27,6 +27,7 @@ - - #include - #include -+#include - #include - #include - #include -@@ -650,7 +651,7 @@ start_here: - b . /* prevent prefetch past rfi */ - - /* Set up the initial MMU state so we can do the first level of -- * kernel initialization. This maps the first 16 MBytes of memory 1:1 -+ * kernel initialization. This maps the first 32 MBytes of memory 1:1 - * virtual to physical and more importantly sets the cache mode. - */ - initial_mmu: -@@ -687,6 +688,12 @@ initial_mmu: - tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ - tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ - -+ li r0,62 /* TLB slot 62 */ -+ addis r4,r4,SZ_16M@h -+ addis r3,r3,SZ_16M@h -+ tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ -+ tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ -+ - isync - - /* Establish the exception vector base -diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S -index 9bdb95f5694f7..0d073b9fd52c5 100644 ---- a/arch/powerpc/kernel/head_8xx.S -+++ b/arch/powerpc/kernel/head_8xx.S -@@ -733,6 +733,7 @@ _GLOBAL(mmu_pin_tlb) - #ifdef CONFIG_PIN_TLB_DATA - LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) - LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) -+ li r8, 0 - #ifdef CONFIG_PIN_TLB_IMMR - li r0, 3 - #else -@@ -741,26 +742,26 @@ _GLOBAL(mmu_pin_tlb) - mtctr r0 - cmpwi r4, 0 - beq 4f -- LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) - LOAD_REG_ADDR(r9, _sinittext) - - 2: ori r0, r6, MD_EVALID -+ ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT - mtspr SPRN_MD_CTR, r5 - mtspr SPRN_MD_EPN, r0 - mtspr SPRN_MD_TWC, r7 -- mtspr SPRN_MD_RPN, r8 -+ mtspr SPRN_MD_RPN, r12 - addi r5, r5, 0x100 - addis r6, r6, SZ_8M@h - addis r8, r8, SZ_8M@h - cmplw r6, r9 - bdnzt lt, 2b -- --4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) -+4: - 2: ori r0, r6, MD_EVALID -+ ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT - mtspr SPRN_MD_CTR, r5 - mtspr SPRN_MD_EPN, r0 - mtspr SPRN_MD_TWC, r7 -- mtspr SPRN_MD_RPN, r8 -+ mtspr SPRN_MD_RPN, r12 - addi r5, r5, 0x100 - addis r6, r6, SZ_8M@h - addis r8, r8, SZ_8M@h -@@ -781,7 +782,7 @@ _GLOBAL(mmu_pin_tlb) - #endif - #if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA) - lis r0, (MD_RSV4I | MD_TWAM)@h -- mtspr SPRN_MI_CTR, r0 -+ mtspr SPRN_MD_CTR, r0 - #endif - mtspr SPRN_SRR1, r10 - mtspr SPRN_SRR0, r11 -diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h -index e5503420b6c6d..bb6d5d0fc4ac8 100644 ---- a/arch/powerpc/kernel/head_booke.h -+++ b/arch/powerpc/kernel/head_booke.h -@@ -87,8 +87,7 @@ END_BTB_FLUSH_SECTION - stw r10, 8(r1) - li r10, \trapno - stw r10,_TRAP(r1) -- SAVE_4GPRS(3, r1) -- SAVE_2GPRS(7, r1) -+ SAVE_GPRS(3, 8, r1) - SAVE_NVGPRS(r1) - stw r2,GPR2(r1) - stw r12,_NIP(r1) -@@ -465,12 +464,21 @@ label: - bl do_page_fault; \ - b interrupt_return - -+/* -+ * Instruction TLB Error interrupt handlers may call InstructionStorage -+ * directly without clearing ESR, so the ESR at this point may be left over -+ * from a prior interrupt. -+ * -+ * In any case, do_page_fault for BOOK3E does not use ESR and always expects -+ * dsisr to be 0. ESR_DST from a prior store in particular would confuse fault -+ * handling. -+ */ - #define INSTRUCTION_STORAGE_EXCEPTION \ - START_EXCEPTION(InstructionStorage) \ -- NORMAL_EXCEPTION_PROLOG(0x400, INST_STORAGE); \ -- mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ -+ NORMAL_EXCEPTION_PROLOG(0x400, INST_STORAGE); \ -+ li r5,0; /* Store 0 in regs->esr (dsisr) */ \ - stw r5,_ESR(r11); \ -- stw r12, _DEAR(r11); /* Pass SRR0 as arg2 */ \ -+ stw r12, _DEAR(r11); /* Set regs->dear (dar) to SRR0 */ \ - prepare_transfer_to_handler; \ - bl do_page_fault; \ - b interrupt_return -diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c -index 1f835539fda42..77cd4c5a2d631 100644 ---- a/arch/powerpc/kernel/idle.c -+++ b/arch/powerpc/kernel/idle.c -@@ -37,7 +37,7 @@ static int __init powersave_off(char *arg) - { - ppc_md.power_save = NULL; - cpuidle_disable = IDLE_POWERSAVE_OFF; -- return 0; -+ return 1; - } - __setup("powersave=off", powersave_off); - -@@ -82,7 +82,7 @@ void power4_idle(void) - return; - - if (cpu_has_feature(CPU_FTR_ALTIVEC)) -- asm volatile("DSSALL ; sync" ::: "memory"); -+ asm volatile(PPC_DSSALL " ; sync" ::: "memory"); - - power4_idle_nap(); - -diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S -index 13cad9297d822..3c097356366b8 100644 ---- a/arch/powerpc/kernel/idle_6xx.S -+++ b/arch/powerpc/kernel/idle_6xx.S -@@ -129,7 +129,7 @@ BEGIN_FTR_SECTION - END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) - mtspr SPRN_HID0,r4 - BEGIN_FTR_SECTION -- DSSALL -+ PPC_DSSALL - sync - END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */ -diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c -index de10a26972581..e93f67c3af76b 100644 ---- a/arch/powerpc/kernel/interrupt.c -+++ b/arch/powerpc/kernel/interrupt.c -@@ -53,16 +53,18 @@ static inline bool exit_must_hard_disable(void) - */ - static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable) - { -+ bool must_hard_disable = (exit_must_hard_disable() || !restartable); -+ - /* This must be done with RI=1 because tracing may touch vmaps */ - trace_hardirqs_on(); - -- if (exit_must_hard_disable() || !restartable) -+ if (must_hard_disable) - __hard_EE_RI_disable(); - - #ifdef CONFIG_PPC64 - /* This pattern matches prep_irq_for_idle */ - if (unlikely(lazy_irq_pending_nocheck())) { -- if (exit_must_hard_disable() || !restartable) { -+ if (must_hard_disable) { - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; - __hard_RI_enable(); - } -@@ -148,7 +150,7 @@ notrace long system_call_exception(long r3, long r4, long r5, - */ - if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && - unlikely(MSR_TM_TRANSACTIONAL(regs->msr))) -- current_thread_info()->flags |= _TIF_RESTOREALL; -+ set_bits(_TIF_RESTOREALL, ¤t_thread_info()->flags); - - /* - * If the system call was made with a transaction active, doom it and -@@ -266,7 +268,7 @@ static void check_return_regs_valid(struct pt_regs *regs) - if (trap_is_scv(regs)) - return; - -- trap = regs->trap; -+ trap = TRAP(regs); - // EE in HV mode sets HSRRs like 0xea0 - if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL) - trap = 0xea0; -@@ -529,7 +531,6 @@ void preempt_schedule_irq(void); - - notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) - { -- unsigned long flags; - unsigned long ret = 0; - unsigned long kuap; - bool stack_store = current_thread_info()->flags & -@@ -546,7 +547,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) - - kuap = kuap_get_and_assert_locked(); - -- local_irq_save(flags); -+ local_irq_disable(); - - if (!arch_irq_disabled_regs(regs)) { - /* Returning to a kernel context with local irqs enabled. */ -diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S -index ec950b08a8dcc..ff8c8c03f41ac 100644 ---- a/arch/powerpc/kernel/interrupt_64.S -+++ b/arch/powerpc/kernel/interrupt_64.S -@@ -30,21 +30,25 @@ COMPAT_SYS_CALL_TABLE: - .ifc \srr,srr - mfspr r11,SPRN_SRR0 - ld r12,_NIP(r1) -+ clrrdi r11,r11,2 -+ clrrdi r12,r12,2 - 100: tdne r11,r12 -- EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) -+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) - mfspr r11,SPRN_SRR1 - ld r12,_MSR(r1) - 100: tdne r11,r12 -- EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) -+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) - .else - mfspr r11,SPRN_HSRR0 - ld r12,_NIP(r1) -+ clrrdi r11,r11,2 -+ clrrdi r12,r12,2 - 100: tdne r11,r12 -- EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) -+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) - mfspr r11,SPRN_HSRR1 - ld r12,_MSR(r1) - 100: tdne r11,r12 -- EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) -+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) - .endif - #endif - .endm -@@ -162,10 +166,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) - * The value of AMR only matters while we're in the kernel. - */ - mtcr r2 -- ld r2,GPR2(r1) -- ld r3,GPR3(r1) -- ld r13,GPR13(r1) -- ld r1,GPR1(r1) -+ REST_GPRS(2, 3, r1) -+ REST_GPR(13, r1) -+ REST_GPR(1, r1) - RFSCV_TO_USER - b . /* prevent speculative execution */ - -@@ -183,9 +186,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) - mtctr r3 - mtlr r4 - mtspr SPRN_XER,r5 -- REST_10GPRS(2, r1) -- REST_2GPRS(12, r1) -- ld r1,GPR1(r1) -+ REST_GPRS(2, 13, r1) -+ REST_GPR(1, r1) - RFI_TO_USER - .Lsyscall_vectored_\name\()_rst_end: - -@@ -374,10 +376,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) - * The value of AMR only matters while we're in the kernel. - */ - mtcr r2 -- ld r2,GPR2(r1) -- ld r3,GPR3(r1) -- ld r13,GPR13(r1) -- ld r1,GPR1(r1) -+ REST_GPRS(2, 3, r1) -+ REST_GPR(13, r1) -+ REST_GPR(1, r1) - RFI_TO_USER - b . /* prevent speculative execution */ - -@@ -388,8 +389,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) - mtctr r3 - mtspr SPRN_XER,r4 - ld r0,GPR0(r1) -- REST_8GPRS(4, r1) -- ld r12,GPR12(r1) -+ REST_GPRS(4, 12, r1) - b .Lsyscall_restore_regs_cont - .Lsyscall_rst_end: - -@@ -518,17 +518,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) - ld r6,_XER(r1) - li r0,0 - -- REST_4GPRS(7, r1) -- REST_2GPRS(11, r1) -- REST_GPR(13, r1) -+ REST_GPRS(7, 13, r1) - - mtcr r3 - mtlr r4 - mtctr r5 - mtspr SPRN_XER,r6 - -- REST_4GPRS(2, r1) -- REST_GPR(6, r1) -+ REST_GPRS(2, 6, r1) - REST_GPR(0, r1) - REST_GPR(1, r1) - .ifc \srr,srr -@@ -625,8 +622,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) - ld r6,_CCR(r1) - li r0,0 - -- REST_4GPRS(7, r1) -- REST_2GPRS(11, r1) -+ REST_GPRS(7, 12, r1) - - mtlr r3 - mtctr r4 -@@ -638,7 +634,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) - */ - std r0,STACK_FRAME_OVERHEAD-16(r1) - -- REST_4GPRS(2, r1) -+ REST_GPRS(2, 5, r1) - - bne- cr1,1f /* emulate stack store */ - mtcr r6 -diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c -index 07093b7cdcb9a..b858f186e9a70 100644 ---- a/arch/powerpc/kernel/iommu.c -+++ b/arch/powerpc/kernel/iommu.c -@@ -68,11 +68,9 @@ static void iommu_debugfs_add(struct iommu_table *tbl) - static void iommu_debugfs_del(struct iommu_table *tbl) - { - char name[10]; -- struct dentry *liobn_entry; - - sprintf(name, "%08lx", tbl->it_index); -- liobn_entry = debugfs_lookup(name, iommu_debugfs_dir); -- debugfs_remove(liobn_entry); -+ debugfs_lookup_and_remove(name, iommu_debugfs_dir); - } - #else - static void iommu_debugfs_add(struct iommu_table *tbl){} -@@ -174,17 +172,28 @@ static int fail_iommu_bus_notify(struct notifier_block *nb, - return 0; - } - --static struct notifier_block fail_iommu_bus_notifier = { -+/* -+ * PCI and VIO buses need separate notifier_block structs, since they're linked -+ * list nodes. Sharing a notifier_block would mean that any notifiers later -+ * registered for PCI buses would also get called by VIO buses and vice versa. -+ */ -+static struct notifier_block fail_iommu_pci_bus_notifier = { -+ .notifier_call = fail_iommu_bus_notify -+}; -+ -+#ifdef CONFIG_IBMVIO -+static struct notifier_block fail_iommu_vio_bus_notifier = { - .notifier_call = fail_iommu_bus_notify - }; -+#endif - - static int __init fail_iommu_setup(void) - { - #ifdef CONFIG_PCI -- bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier); -+ bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier); - #endif - #ifdef CONFIG_IBMVIO -- bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier); -+ bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier); - #endif - - return 0; -@@ -776,6 +785,11 @@ bool iommu_table_in_use(struct iommu_table *tbl) - /* ignore reserved bit0 */ - if (tbl->it_offset == 0) - start = 1; -+ -+ /* Simple case with no reserved MMIO32 region */ -+ if (!tbl->it_reserved_start && !tbl->it_reserved_end) -+ return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size; -+ - end = tbl->it_reserved_start - tbl->it_offset; - if (find_next_bit(tbl->it_map, end, start) != end) - return true; -diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c -index 7a7cd6bda53ea..61552f57db0ba 100644 ---- a/arch/powerpc/kernel/kprobes.c -+++ b/arch/powerpc/kernel/kprobes.c -@@ -140,7 +140,13 @@ int arch_prepare_kprobe(struct kprobe *p) - preempt_disable(); - prev = get_kprobe(p->addr - 1); - preempt_enable_no_resched(); -- if (prev && ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) { -+ -+ /* -+ * When prev is a ftrace-based kprobe, we don't have an insn, and it -+ * doesn't probe for prefixed instruction. -+ */ -+ if (prev && !kprobe_ftrace(prev) && -+ ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) { - printk("Cannot register a kprobe on the second word of prefixed instruction\n"); - ret = -EINVAL; - } -diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c -index 617eba82531cb..6568823cf3063 100644 ---- a/arch/powerpc/kernel/kvm.c -+++ b/arch/powerpc/kernel/kvm.c -@@ -669,7 +669,8 @@ static void __init kvm_use_magic_page(void) - on_each_cpu(kvm_map_magic_page, &features, 1); - - /* Quick self-test to see if the mapping works */ -- if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) { -+ if (fault_in_readable((const char __user *)KVM_MAGIC_PAGE, -+ sizeof(u32))) { - kvm_patching_worked = false; - return; - } -diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S -index 225511d73bef5..f2e03ed423d0f 100644 ---- a/arch/powerpc/kernel/l2cr_6xx.S -+++ b/arch/powerpc/kernel/l2cr_6xx.S -@@ -96,7 +96,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L2CR) - - /* Stop DST streams */ - BEGIN_FTR_SECTION -- DSSALL -+ PPC_DSSALL - sync - END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - -@@ -292,7 +292,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR) - isync - - /* Stop DST streams */ -- DSSALL -+ PPC_DSSALL - sync - - /* Get the current enable bit of the L3CR into r4 */ -@@ -401,7 +401,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR) - _GLOBAL(__flush_disable_L1) - /* Stop pending alitvec streams and memory accesses */ - BEGIN_FTR_SECTION -- DSSALL -+ PPC_DSSALL - END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - sync - -diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c -index ed04a3ba66fe8..40a583e9d3c70 100644 ---- a/arch/powerpc/kernel/module.c -+++ b/arch/powerpc/kernel/module.c -@@ -90,16 +90,17 @@ int module_finalize(const Elf_Ehdr *hdr, - } - - static __always_inline void * --__module_alloc(unsigned long size, unsigned long start, unsigned long end) -+__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn) - { - pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC; -+ gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0); - - /* - * Don't do huge page allocations for modules yet until more testing - * is done. STRICT_MODULE_RWX may require extra work to support this - * too. - */ -- return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot, -+ return __vmalloc_node_range(size, 1, start, end, gfp, prot, - VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP, - NUMA_NO_NODE, __builtin_return_address(0)); - } -@@ -114,13 +115,13 @@ void *module_alloc(unsigned long size) - - /* First try within 32M limit from _etext to avoid branch trampolines */ - if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) -- ptr = __module_alloc(size, limit, MODULES_END); -+ ptr = __module_alloc(size, limit, MODULES_END, true); - - if (!ptr) -- ptr = __module_alloc(size, MODULES_VADDR, MODULES_END); -+ ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false); - - return ptr; - #else -- return __module_alloc(size, VMALLOC_START, VMALLOC_END); -+ return __module_alloc(size, VMALLOC_START, VMALLOC_END, false); - #endif - } -diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c -index 6baa676e7cb60..5d77d3f5fbb56 100644 ---- a/arch/powerpc/kernel/module_64.c -+++ b/arch/powerpc/kernel/module_64.c -@@ -422,11 +422,17 @@ static inline int create_stub(const Elf64_Shdr *sechdrs, - const char *name) - { - long reladdr; -+ func_desc_t desc; -+ int i; - - if (is_mprofile_ftrace_call(name)) - return create_ftrace_stub(entry, addr, me); - -- memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); -+ for (i = 0; i < sizeof(ppc64_stub_insns) / sizeof(u32); i++) { -+ if (patch_instruction(&entry->jump[i], -+ ppc_inst(ppc64_stub_insns[i]))) -+ return 0; -+ } - - /* Stub uses address relative to r2. */ - reladdr = (unsigned long)entry - my_r2(sechdrs, me); -@@ -437,10 +443,24 @@ static inline int create_stub(const Elf64_Shdr *sechdrs, - } - pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr); - -- entry->jump[0] |= PPC_HA(reladdr); -- entry->jump[1] |= PPC_LO(reladdr); -- entry->funcdata = func_desc(addr); -- entry->magic = STUB_MAGIC; -+ if (patch_instruction(&entry->jump[0], -+ ppc_inst(entry->jump[0] | PPC_HA(reladdr)))) -+ return 0; -+ -+ if (patch_instruction(&entry->jump[1], -+ ppc_inst(entry->jump[1] | PPC_LO(reladdr)))) -+ return 0; -+ -+ // func_desc_t is 8 bytes if ABIv2, else 16 bytes -+ desc = func_desc(addr); -+ for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) { -+ if (patch_instruction(((u32 *)&entry->funcdata) + i, -+ ppc_inst(((u32 *)(&desc))[i]))) -+ return 0; -+ } -+ -+ if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC))) -+ return 0; - - return 1; - } -@@ -495,8 +515,11 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me) - me->name, *instruction, instruction); - return 0; - } -+ - /* ld r2,R2_STACK_OFFSET(r1) */ -- *instruction = PPC_INST_LD_TOC; -+ if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC))) -+ return 0; -+ - return 1; - } - -@@ -636,9 +659,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, - } - - /* Only replace bits 2 through 26 */ -- *(uint32_t *)location -- = (*(uint32_t *)location & ~0x03fffffc) -+ value = (*(uint32_t *)location & ~0x03fffffc) - | (value & 0x03fffffc); -+ -+ if (patch_instruction((u32 *)location, ppc_inst(value))) -+ return -EFAULT; -+ - break; - - case R_PPC64_REL64: -diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S -index 19ea3312403ca..5c7f0b4b784b2 100644 ---- a/arch/powerpc/kernel/optprobes_head.S -+++ b/arch/powerpc/kernel/optprobes_head.S -@@ -10,8 +10,8 @@ - #include - - #ifdef CONFIG_PPC64 --#define SAVE_30GPRS(base) SAVE_10GPRS(2,base); SAVE_10GPRS(12,base); SAVE_10GPRS(22,base) --#define REST_30GPRS(base) REST_10GPRS(2,base); REST_10GPRS(12,base); REST_10GPRS(22,base) -+#define SAVE_30GPRS(base) SAVE_GPRS(2, 31, base) -+#define REST_30GPRS(base) REST_GPRS(2, 31, base) - #define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop; nop; nop - #else - #define SAVE_30GPRS(base) stmw r2, GPR2(base) -diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c -index 9bd30cac852bf..2de557663a96c 100644 ---- a/arch/powerpc/kernel/paca.c -+++ b/arch/powerpc/kernel/paca.c -@@ -16,7 +16,6 @@ - #include - #include - #include --#include - - #include "setup.h" - -@@ -172,30 +171,6 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit) - - #endif /* CONFIG_PPC_BOOK3S_64 */ - --#ifdef CONFIG_PPC_PSERIES --/** -- * new_rtas_args() - Allocates rtas args -- * @cpu: CPU number -- * @limit: Memory limit for this allocation -- * -- * Allocates a struct rtas_args and return it's pointer, -- * if not in Hypervisor mode -- * -- * Return: Pointer to allocated rtas_args -- * NULL if CPU in Hypervisor Mode -- */ --static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit) --{ -- limit = min_t(unsigned long, limit, RTAS_INSTANTIATE_MAX); -- -- if (early_cpu_has_feature(CPU_FTR_HVMODE)) -- return NULL; -- -- return alloc_paca_data(sizeof(struct rtas_args), L1_CACHE_BYTES, -- limit, cpu); --} --#endif /* CONFIG_PPC_PSERIES */ -- - /* The Paca is an array with one entry per processor. Each contains an - * lppaca, which contains the information shared between the - * hypervisor and Linux. -@@ -234,10 +209,6 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) - /* For now -- if we have threads this will be adjusted later */ - new_paca->tcd_ptr = &new_paca->tcd; - #endif -- --#ifdef CONFIG_PPC_PSERIES -- new_paca->rtas_args_reentrant = NULL; --#endif - } - - /* Put the paca pointer into r13 and SPRG_PACA */ -@@ -309,9 +280,6 @@ void __init allocate_paca(int cpu) - #endif - #ifdef CONFIG_PPC_BOOK3S_64 - paca->slb_shadow_ptr = new_slb_shadow(cpu, limit); --#endif --#ifdef CONFIG_PPC_PSERIES -- paca->rtas_args_reentrant = new_rtas_args(cpu, limit); - #endif - paca_struct_size += sizeof(struct paca_struct); - } -diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c -index c3573430919d2..1aabb82b5f375 100644 ---- a/arch/powerpc/kernel/pci-common.c -+++ b/arch/powerpc/kernel/pci-common.c -@@ -67,23 +67,35 @@ void set_pci_dma_ops(const struct dma_map_ops *dma_ops) - pci_dma_ops = dma_ops; - } - --/* -- * This function should run under locking protection, specifically -- * hose_spinlock. -- */ - static int get_phb_number(struct device_node *dn) - { - int ret, phb_id = -1; -- u32 prop_32; - u64 prop; - - /* - * Try fixed PHB numbering first, by checking archs and reading -- * the respective device-tree properties. Firstly, try powernv by -- * reading "ibm,opal-phbid", only present in OPAL environment. -+ * the respective device-tree properties. Firstly, try reading -+ * standard "linux,pci-domain", then try reading "ibm,opal-phbid" -+ * (only present in powernv OPAL environment), then try device-tree -+ * alias and as the last try to use lower bits of "reg" property. - */ -- ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); -+ ret = of_get_pci_domain_nr(dn); -+ if (ret >= 0) { -+ prop = ret; -+ ret = 0; -+ } -+ if (ret) -+ ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); -+ - if (ret) { -+ ret = of_alias_get_id(dn, "pci"); -+ if (ret >= 0) { -+ prop = ret; -+ ret = 0; -+ } -+ } -+ if (ret) { -+ u32 prop_32; - ret = of_property_read_u32_index(dn, "reg", 1, &prop_32); - prop = prop_32; - } -@@ -91,18 +103,20 @@ static int get_phb_number(struct device_node *dn) - if (!ret) - phb_id = (int)(prop & (MAX_PHBS - 1)); - -+ spin_lock(&hose_spinlock); -+ - /* We need to be sure to not use the same PHB number twice. */ - if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap)) -- return phb_id; -+ goto out_unlock; - -- /* -- * If not pseries nor powernv, or if fixed PHB numbering tried to add -- * the same PHB number twice, then fallback to dynamic PHB numbering. -- */ -+ /* If everything fails then fallback to dynamic PHB numbering. */ - phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS); - BUG_ON(phb_id >= MAX_PHBS); - set_bit(phb_id, phb_bitmap); - -+out_unlock: -+ spin_unlock(&hose_spinlock); -+ - return phb_id; - } - -@@ -113,10 +127,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev) - phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); - if (phb == NULL) - return NULL; -- spin_lock(&hose_spinlock); -+ - phb->global_number = get_phb_number(dev); -+ -+ spin_lock(&hose_spinlock); - list_add_tail(&phb->list_node, &hose_list); - spin_unlock(&hose_spinlock); -+ - phb->dn = dev; - phb->is_dynamic = slab_is_available(); - #ifdef CONFIG_PPC64 -diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c -index 61571ae239530..335767cea1373 100644 ---- a/arch/powerpc/kernel/pci_dn.c -+++ b/arch/powerpc/kernel/pci_dn.c -@@ -330,6 +330,7 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose, - INIT_LIST_HEAD(&pdn->list); - parent = of_get_parent(dn); - pdn->parent = parent ? PCI_DN(parent) : NULL; -+ of_node_put(parent); - if (pdn->parent) - list_add_tail(&pdn->list, &pdn->parent->child_list); - -diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S -index 2d4d21bb46a97..235ae24284519 100644 ---- a/arch/powerpc/kernel/ppc_save_regs.S -+++ b/arch/powerpc/kernel/ppc_save_regs.S -@@ -21,60 +21,33 @@ - * different ABIs, though). - */ - _GLOBAL(ppc_save_regs) -- PPC_STL r0,0*SZL(r3) -+ /* This allows stack frame accessor macros and offsets to be used */ -+ subi r3,r3,STACK_FRAME_OVERHEAD -+ PPC_STL r0,GPR0(r3) - #ifdef CONFIG_PPC32 -- stmw r2, 2*SZL(r3) -+ stmw r2,GPR2(r3) - #else -- PPC_STL r2,2*SZL(r3) -- PPC_STL r3,3*SZL(r3) -- PPC_STL r4,4*SZL(r3) -- PPC_STL r5,5*SZL(r3) -- PPC_STL r6,6*SZL(r3) -- PPC_STL r7,7*SZL(r3) -- PPC_STL r8,8*SZL(r3) -- PPC_STL r9,9*SZL(r3) -- PPC_STL r10,10*SZL(r3) -- PPC_STL r11,11*SZL(r3) -- PPC_STL r12,12*SZL(r3) -- PPC_STL r13,13*SZL(r3) -- PPC_STL r14,14*SZL(r3) -- PPC_STL r15,15*SZL(r3) -- PPC_STL r16,16*SZL(r3) -- PPC_STL r17,17*SZL(r3) -- PPC_STL r18,18*SZL(r3) -- PPC_STL r19,19*SZL(r3) -- PPC_STL r20,20*SZL(r3) -- PPC_STL r21,21*SZL(r3) -- PPC_STL r22,22*SZL(r3) -- PPC_STL r23,23*SZL(r3) -- PPC_STL r24,24*SZL(r3) -- PPC_STL r25,25*SZL(r3) -- PPC_STL r26,26*SZL(r3) -- PPC_STL r27,27*SZL(r3) -- PPC_STL r28,28*SZL(r3) -- PPC_STL r29,29*SZL(r3) -- PPC_STL r30,30*SZL(r3) -- PPC_STL r31,31*SZL(r3) -+ SAVE_GPRS(2, 31, r3) - lbz r0,PACAIRQSOFTMASK(r13) -- PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3) -+ PPC_STL r0,SOFTE(r3) - #endif -- /* go up one stack frame for SP */ -- PPC_LL r4,0(r1) -- PPC_STL r4,1*SZL(r3) -+ /* store current SP */ -+ PPC_STL r1,GPR1(r3) - /* get caller's LR */ -+ PPC_LL r4,0(r1) - PPC_LL r0,LRSAVE(r4) -- PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3) -+ PPC_STL r0,_LINK(r3) - mflr r0 -- PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3) -+ PPC_STL r0,_NIP(r3) - mfmsr r0 -- PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3) -+ PPC_STL r0,_MSR(r3) - mfctr r0 -- PPC_STL r0,_CTR-STACK_FRAME_OVERHEAD(r3) -+ PPC_STL r0,_CTR(r3) - mfxer r0 -- PPC_STL r0,_XER-STACK_FRAME_OVERHEAD(r3) -+ PPC_STL r0,_XER(r3) - mfcr r0 -- PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3) -+ PPC_STL r0,_CCR(r3) - li r0,0 -- PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3) -- PPC_STL r0,ORIG_GPR3-STACK_FRAME_OVERHEAD(r3) -+ PPC_STL r0,_TRAP(r3) -+ PPC_STL r0,ORIG_GPR3(r3) - blr -diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c -index 50436b52c2133..c590e12199132 100644 ---- a/arch/powerpc/kernel/process.c -+++ b/arch/powerpc/kernel/process.c -@@ -1818,7 +1818,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) - tm_reclaim_current(0); - #endif - -- memset(regs->gpr, 0, sizeof(regs->gpr)); -+ memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0])); - regs->ctr = 0; - regs->link = 0; - regs->xer = 0; -@@ -2124,12 +2124,12 @@ static unsigned long __get_wchan(struct task_struct *p) - return 0; - - do { -- sp = *(unsigned long *)sp; -+ sp = READ_ONCE_NOCHECK(*(unsigned long *)sp); - if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) || - task_is_running(p)) - return 0; - if (count > 0) { -- ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; -+ ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]); - if (!in_sched_functions(ip)) - return ip; - } -diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c -index 2e67588f6f6e6..86ffbabd26c6e 100644 ---- a/arch/powerpc/kernel/prom.c -+++ b/arch/powerpc/kernel/prom.c -@@ -751,6 +751,13 @@ void __init early_init_devtree(void *params) - of_scan_flat_dt(early_init_dt_scan_root, NULL); - of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); - -+ /* -+ * As generic code authors expect to be able to use static keys -+ * in early_param() handlers, we initialize the static keys just -+ * before parsing early params (it's fine to call jump_label_init() -+ * more than once). -+ */ -+ jump_label_init(); - parse_early_param(); - - /* make sure we've parsed cmdline for mem= before this */ -diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c -index 18b04b08b9833..f845065c860e3 100644 ---- a/arch/powerpc/kernel/prom_init.c -+++ b/arch/powerpc/kernel/prom_init.c -@@ -2991,7 +2991,7 @@ static void __init fixup_device_tree_efika_add_phy(void) - - /* Check if the phy-handle property exists - bail if it does */ - rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); -- if (!rv) -+ if (rv <= 0) - return; - - /* -diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh -index b183ab9c5107c..dfa5f729f774d 100644 ---- a/arch/powerpc/kernel/prom_init_check.sh -+++ b/arch/powerpc/kernel/prom_init_check.sh -@@ -13,7 +13,7 @@ - # If you really need to reference something from prom_init.o add - # it to the list below: - --grep "^CONFIG_KASAN=y$" .config >/dev/null -+grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null - if [ $? -eq 0 ] - then - MEM_FUNCS="__memcpy __memset" -diff --git a/arch/powerpc/kernel/ptrace/ptrace-fpu.c b/arch/powerpc/kernel/ptrace/ptrace-fpu.c -index 5dca19361316e..09c49632bfe59 100644 ---- a/arch/powerpc/kernel/ptrace/ptrace-fpu.c -+++ b/arch/powerpc/kernel/ptrace/ptrace-fpu.c -@@ -17,9 +17,13 @@ int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data) - - #ifdef CONFIG_PPC_FPU_REGS - flush_fp_to_thread(child); -- if (fpidx < (PT_FPSCR - PT_FPR0)) -- memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long)); -- else -+ if (fpidx < (PT_FPSCR - PT_FPR0)) { -+ if (IS_ENABLED(CONFIG_PPC32)) -+ // On 32-bit the index we are passed refers to 32-bit words -+ *data = ((u32 *)child->thread.fp_state.fpr)[fpidx]; -+ else -+ memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long)); -+ } else - *data = child->thread.fp_state.fpscr; - #else - *data = 0; -@@ -39,9 +43,13 @@ int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data) - - #ifdef CONFIG_PPC_FPU_REGS - flush_fp_to_thread(child); -- if (fpidx < (PT_FPSCR - PT_FPR0)) -- memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); -- else -+ if (fpidx < (PT_FPSCR - PT_FPR0)) { -+ if (IS_ENABLED(CONFIG_PPC32)) -+ // On 32-bit the index we are passed refers to 32-bit words -+ ((u32 *)child->thread.fp_state.fpr)[fpidx] = data; -+ else -+ memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); -+ } else - child->thread.fp_state.fpscr = data; - #endif - -diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c -index b8be1d6668b59..54dfa6a2aec8f 100644 ---- a/arch/powerpc/kernel/ptrace/ptrace-view.c -+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c -@@ -290,6 +290,9 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, - static int ppr_get(struct task_struct *target, const struct user_regset *regset, - struct membuf to) - { -+ if (!target->thread.regs) -+ return -EINVAL; -+ - return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64)); - } - -@@ -297,6 +300,9 @@ static int ppr_set(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, const void *kbuf, - const void __user *ubuf) - { -+ if (!target->thread.regs) -+ return -EINVAL; -+ - return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.regs->ppr, 0, sizeof(u64)); - } -diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c -index 7c7093c17c45e..ff5e46dbf7c50 100644 ---- a/arch/powerpc/kernel/ptrace/ptrace.c -+++ b/arch/powerpc/kernel/ptrace/ptrace.c -@@ -446,4 +446,7 @@ void __init pt_regs_check(void) - * real registers. - */ - BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long)); -+ -+ // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible -+ BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX)); - } -diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S -index 02d4719bf43a8..232e4549defe1 100644 ---- a/arch/powerpc/kernel/reloc_64.S -+++ b/arch/powerpc/kernel/reloc_64.S -@@ -8,8 +8,10 @@ - #include - - RELA = 7 --RELACOUNT = 0x6ffffff9 -+RELASZ = 8 -+RELAENT = 9 - R_PPC64_RELATIVE = 22 -+R_PPC64_UADDR64 = 43 - - /* - * r3 = desired final address of kernel -@@ -25,29 +27,38 @@ _GLOBAL(relocate) - add r9,r9,r12 /* r9 has runtime addr of .rela.dyn section */ - ld r10,(p_st - 0b)(r12) - add r10,r10,r12 /* r10 has runtime addr of _stext */ -+ ld r13,(p_sym - 0b)(r12) -+ add r13,r13,r12 /* r13 has runtime addr of .dynsym */ - - /* -- * Scan the dynamic section for the RELA and RELACOUNT entries. -+ * Scan the dynamic section for the RELA, RELASZ and RELAENT entries. - */ - li r7,0 - li r8,0 --1: ld r6,0(r11) /* get tag */ -+.Ltags: -+ ld r6,0(r11) /* get tag */ - cmpdi r6,0 -- beq 4f /* end of list */ -+ beq .Lend_of_list /* end of list */ - cmpdi r6,RELA - bne 2f - ld r7,8(r11) /* get RELA pointer in r7 */ -- b 3f --2: addis r6,r6,(-RELACOUNT)@ha -- cmpdi r6,RELACOUNT@l -+ b 4f -+2: cmpdi r6,RELASZ - bne 3f -- ld r8,8(r11) /* get RELACOUNT value in r8 */ --3: addi r11,r11,16 -- b 1b --4: cmpdi r7,0 /* check we have both RELA and RELACOUNT */ -+ ld r8,8(r11) /* get RELASZ value in r8 */ -+ b 4f -+3: cmpdi r6,RELAENT -+ bne 4f -+ ld r12,8(r11) /* get RELAENT value in r12 */ -+4: addi r11,r11,16 -+ b .Ltags -+.Lend_of_list: -+ cmpdi r7,0 /* check we have RELA, RELASZ, RELAENT */ - cmpdi cr1,r8,0 -- beq 6f -- beq cr1,6f -+ beq .Lout -+ beq cr1,.Lout -+ cmpdi r12,0 -+ beq .Lout - - /* - * Work out linktime address of _stext and hence the -@@ -62,23 +73,39 @@ _GLOBAL(relocate) - - /* - * Run through the list of relocations and process the -- * R_PPC64_RELATIVE ones. -+ * R_PPC64_RELATIVE and R_PPC64_UADDR64 ones. - */ -+ divd r8,r8,r12 /* RELASZ / RELAENT */ - mtctr r8 --5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */ -+.Lrels: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */ - cmpdi r0,R_PPC64_RELATIVE -- bne 6f -+ bne .Luaddr64 - ld r6,0(r9) /* reloc->r_offset */ - ld r0,16(r9) /* reloc->r_addend */ -+ b .Lstore -+.Luaddr64: -+ srdi r14,r0,32 /* ELF64_R_SYM(reloc->r_info) */ -+ clrldi r0,r0,32 -+ cmpdi r0,R_PPC64_UADDR64 -+ bne .Lnext -+ ld r6,0(r9) -+ ld r0,16(r9) -+ mulli r14,r14,24 /* 24 == sizeof(elf64_sym) */ -+ add r14,r14,r13 /* elf64_sym[ELF64_R_SYM] */ -+ ld r14,8(r14) -+ add r0,r0,r14 -+.Lstore: - add r0,r0,r3 - stdx r0,r7,r6 -- addi r9,r9,24 -- bdnz 5b -- --6: blr -+.Lnext: -+ add r9,r9,r12 -+ bdnz .Lrels -+.Lout: -+ blr - - .balign 8 - p_dyn: .8byte __dynamic_start - 0b - p_rela: .8byte __rela_dyn_start - 0b -+p_sym: .8byte __dynamic_symtab - 0b - p_st: .8byte _stext - 0b - -diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c -index ff80bbad22a58..a4cd2484dbca2 100644 ---- a/arch/powerpc/kernel/rtas.c -+++ b/arch/powerpc/kernel/rtas.c -@@ -42,13 +42,21 @@ - #include - #include - #include --#include - - /* This is here deliberately so it's only used in this file */ - void enter_rtas(unsigned long); - - static inline void do_enter_rtas(unsigned long args) - { -+ unsigned long msr; -+ -+ /* -+ * Make sure MSR[RI] is currently enabled as it will be forced later -+ * in enter_rtas. -+ */ -+ msr = mfmsr(); -+ BUG_ON(!(msr & MSR_RI)); -+ - enter_rtas(args); - - srr_regs_clobbered(); /* rtas uses SRRs, invalidate */ -@@ -407,7 +415,7 @@ static char *__fetch_rtas_last_error(char *altbuf) - buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC); - } - if (buf) -- memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX); -+ memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX); - } - - return buf; -@@ -780,6 +788,7 @@ void __noreturn rtas_halt(void) - - /* Must be in the RMO region, so we place it here */ - static char rtas_os_term_buf[2048]; -+static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE; - - void rtas_os_term(char *str) - { -@@ -791,16 +800,20 @@ void rtas_os_term(char *str) - * this property may terminate the partition which we want to avoid - * since it interferes with panic_timeout. - */ -- if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") || -- RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term")) -+ if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE) - return; - - snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); - -+ /* -+ * Keep calling as long as RTAS returns a "try again" status, -+ * but don't use rtas_busy_delay(), which potentially -+ * schedules. -+ */ - do { -- status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL, -+ status = rtas_call(ibm_os_term_token, 1, 1, NULL, - __pa(rtas_os_term_buf)); -- } while (rtas_busy_delay(status)); -+ } while (rtas_busy_delay_time(status)); - - if (status != 0) - printk(KERN_EMERG "ibm,os-term call failed %d\n", status); -@@ -836,59 +849,6 @@ void rtas_activate_firmware(void) - pr_err("ibm,activate-firmware failed (%i)\n", fwrc); - } - --#ifdef CONFIG_PPC_PSERIES --/** -- * rtas_call_reentrant() - Used for reentrant rtas calls -- * @token: Token for desired reentrant RTAS call -- * @nargs: Number of Input Parameters -- * @nret: Number of Output Parameters -- * @outputs: Array of outputs -- * @...: Inputs for desired RTAS call -- * -- * According to LoPAR documentation, only "ibm,int-on", "ibm,int-off", -- * "ibm,get-xive" and "ibm,set-xive" are currently reentrant. -- * Reentrant calls need their own rtas_args buffer, so not using rtas.args, but -- * PACA one instead. -- * -- * Return: -1 on error, -- * First output value of RTAS call if (nret > 0), -- * 0 otherwise, -- */ --int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...) --{ -- va_list list; -- struct rtas_args *args; -- unsigned long flags; -- int i, ret = 0; -- -- if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) -- return -1; -- -- local_irq_save(flags); -- preempt_disable(); -- -- /* We use the per-cpu (PACA) rtas args buffer */ -- args = local_paca->rtas_args_reentrant; -- -- va_start(list, outputs); -- va_rtas_call_unlocked(args, token, nargs, nret, list); -- va_end(list); -- -- if (nret > 1 && outputs) -- for (i = 0; i < nret - 1; ++i) -- outputs[i] = be32_to_cpu(args->rets[i + 1]); -- -- if (nret > 0) -- ret = be32_to_cpu(args->rets[0]); -- -- local_irq_restore(flags); -- preempt_enable(); -- -- return ret; --} -- --#endif /* CONFIG_PPC_PSERIES */ -- - /** - * Find a specific pseries error log in an RTAS extended event log. - * @log: RTAS error/event log -@@ -974,7 +934,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = { - { "get-time-of-day", -1, -1, -1, -1, -1 }, - { "ibm,get-vpd", -1, 0, -1, 1, 2 }, - { "ibm,lpar-perftools", -1, 2, 3, -1, -1 }, -- { "ibm,platform-dump", -1, 4, 5, -1, -1 }, -+ { "ibm,platform-dump", -1, 4, 5, -1, -1 }, /* Special cased */ - { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 }, - { "ibm,scan-log-dump", -1, 0, 1, -1, -1 }, - { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 }, -@@ -1023,6 +983,15 @@ static bool block_rtas_call(int token, int nargs, - size = 1; - - end = base + size - 1; -+ -+ /* -+ * Special case for ibm,platform-dump - NULL buffer -+ * address is used to indicate end of dump processing -+ */ -+ if (!strcmp(f->name, "ibm,platform-dump") && -+ base == 0) -+ return false; -+ - if (!in_rmo_buf(base, end)) - goto err; - } -@@ -1203,6 +1172,13 @@ void __init rtas_initialize(void) - no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry); - rtas.entry = no_entry ? rtas.base : entry; - -+ /* -+ * Discover these now to avoid device tree lookups in the -+ * panic path. -+ */ -+ if (of_property_read_bool(rtas.dev, "ibm,extended-os-term")) -+ ibm_os_term_token = rtas_token("ibm,os-term"); -+ - /* If RTAS was found, allocate the RMO buffer for it and look for - * the stop-self token if any - */ -@@ -1235,6 +1211,12 @@ int __init early_init_dt_scan_rtas(unsigned long node, - entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); - sizep = of_get_flat_dt_prop(node, "rtas-size", NULL); - -+#ifdef CONFIG_PPC64 -+ /* need this feature to decide the crashkernel offset */ -+ if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL)) -+ powerpc_firmware_features |= FW_FEATURE_LPAR; -+#endif -+ - if (basep && entryp && sizep) { - rtas.base = *basep; - rtas.entry = *entryp; -diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c -index a99179d835382..56bd0aa30f930 100644 ---- a/arch/powerpc/kernel/rtas_flash.c -+++ b/arch/powerpc/kernel/rtas_flash.c -@@ -710,9 +710,9 @@ static int __init rtas_flash_init(void) - if (!rtas_validate_flash_data.buf) - return -ENOMEM; - -- flash_block_cache = kmem_cache_create("rtas_flash_cache", -- RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, -- NULL); -+ flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache", -+ RTAS_BLK_SIZE, RTAS_BLK_SIZE, -+ 0, 0, RTAS_BLK_SIZE, NULL); - if (!flash_block_cache) { - printk(KERN_ERR "%s: failed to create block cache\n", - __func__); -diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c -index 15fb5ea1b9eaf..cd6fc64ad9ca6 100644 ---- a/arch/powerpc/kernel/security.c -+++ b/arch/powerpc/kernel/security.c -@@ -363,26 +363,27 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute * - - static int ssb_prctl_get(struct task_struct *task) - { -+ /* -+ * The STF_BARRIER feature is on by default, so if it's off that means -+ * firmware has explicitly said the CPU is not vulnerable via either -+ * the hypercall or device tree. -+ */ -+ if (!security_ftr_enabled(SEC_FTR_STF_BARRIER)) -+ return PR_SPEC_NOT_AFFECTED; -+ -+ /* -+ * If the system's CPU has no known barrier (see setup_stf_barrier()) -+ * then assume that the CPU is not vulnerable. -+ */ - if (stf_enabled_flush_types == STF_BARRIER_NONE) -- /* -- * We don't have an explicit signal from firmware that we're -- * vulnerable or not, we only have certain CPU revisions that -- * are known to be vulnerable. -- * -- * We assume that if we're on another CPU, where the barrier is -- * NONE, then we are not vulnerable. -- */ - return PR_SPEC_NOT_AFFECTED; -- else -- /* -- * If we do have a barrier type then we are vulnerable. The -- * barrier is not a global or per-process mitigation, so the -- * only value we can report here is PR_SPEC_ENABLE, which -- * appears as "vulnerable" in /proc. -- */ -- return PR_SPEC_ENABLE; -- -- return -EINVAL; -+ -+ /* -+ * Otherwise the CPU is vulnerable. The barrier is not a global or -+ * per-process mitigation, so the only value that can be reported here -+ * is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc. -+ */ -+ return PR_SPEC_ENABLE; - } - - int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) -diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c -index a0a78aba2083e..1ee4640a26413 100644 ---- a/arch/powerpc/kernel/secvar-sysfs.c -+++ b/arch/powerpc/kernel/secvar-sysfs.c -@@ -26,15 +26,18 @@ static ssize_t format_show(struct kobject *kobj, struct kobj_attribute *attr, - const char *format; - - node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend"); -- if (!of_device_is_available(node)) -- return -ENODEV; -+ if (!of_device_is_available(node)) { -+ rc = -ENODEV; -+ goto out; -+ } - - rc = of_property_read_string(node, "format", &format); - if (rc) -- return rc; -+ goto out; - - rc = sprintf(buf, "%s\n", format); - -+out: - of_node_put(node); - - return rc; -diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h -index 1f07317964e49..618aeccdf6918 100644 ---- a/arch/powerpc/kernel/signal.h -+++ b/arch/powerpc/kernel/signal.h -@@ -25,8 +25,14 @@ static inline int __get_user_sigset(sigset_t *dst, const sigset_t __user *src) - - return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]); - } --#define unsafe_get_user_sigset(dst, src, label) \ -- unsafe_get_user((dst)->sig[0], (u64 __user *)&(src)->sig[0], label) -+#define unsafe_get_user_sigset(dst, src, label) do { \ -+ sigset_t *__dst = dst; \ -+ const sigset_t __user *__src = src; \ -+ int i; \ -+ \ -+ for (i = 0; i < _NSIG_WORDS; i++) \ -+ unsafe_get_user(__dst->sig[i], &__src->sig[i], label); \ -+} while (0) - - #ifdef CONFIG_VSX - extern unsigned long copy_vsx_to_user(void __user *to, -diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c -index 0608581967f09..68ed8ecf64fcc 100644 ---- a/arch/powerpc/kernel/signal_32.c -+++ b/arch/powerpc/kernel/signal_32.c -@@ -258,8 +258,9 @@ static void prepare_save_user_regs(int ctx_has_vsx_region) - #endif - } - --static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, -- struct mcontext __user *tm_frame, int ctx_has_vsx_region) -+static __always_inline int -+__unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, -+ struct mcontext __user *tm_frame, int ctx_has_vsx_region) - { - unsigned long msr = regs->msr; - -@@ -358,8 +359,9 @@ static void prepare_save_tm_user_regs(void) - current->thread.ckvrsave = mfspr(SPRN_VRSAVE); - } - --static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame, -- struct mcontext __user *tm_frame, unsigned long msr) -+static __always_inline int -+save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame, -+ struct mcontext __user *tm_frame, unsigned long msr) - { - /* Save both sets of general registers */ - unsafe_save_general_regs(¤t->thread.ckpt_regs, frame, failed); -@@ -438,8 +440,9 @@ failed: - #else - static void prepare_save_tm_user_regs(void) { } - --static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame, -- struct mcontext __user *tm_frame, unsigned long msr) -+static __always_inline int -+save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame, -+ struct mcontext __user *tm_frame, unsigned long msr) - { - return 0; - } -@@ -1048,7 +1051,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, - if (new_ctx == NULL) - return 0; - if (!access_ok(new_ctx, ctx_size) || -- fault_in_pages_readable((u8 __user *)new_ctx, ctx_size)) -+ fault_in_readable((char __user *)new_ctx, ctx_size)) - return -EFAULT; - - /* -@@ -1062,8 +1065,10 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, - * or if another thread unmaps the region containing the context. - * We kill the task with a SIGSEGV in this situation. - */ -- if (do_setcontext(new_ctx, regs, 0)) -- do_exit(SIGSEGV); -+ if (do_setcontext(new_ctx, regs, 0)) { -+ force_exit_sig(SIGSEGV); -+ return -EFAULT; -+ } - - set_thread_flag(TIF_RESTOREALL); - return 0; -@@ -1237,7 +1242,7 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx, - #endif - - if (!access_ok(ctx, sizeof(*ctx)) || -- fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx))) -+ fault_in_readable((char __user *)ctx, sizeof(*ctx))) - return -EFAULT; - - /* -diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c -index 1831bba0582e1..d1e1fc0acbea3 100644 ---- a/arch/powerpc/kernel/signal_64.c -+++ b/arch/powerpc/kernel/signal_64.c -@@ -688,7 +688,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, - if (new_ctx == NULL) - return 0; - if (!access_ok(new_ctx, ctx_size) || -- fault_in_pages_readable((u8 __user *)new_ctx, ctx_size)) -+ fault_in_readable((char __user *)new_ctx, ctx_size)) - return -EFAULT; - - /* -@@ -703,15 +703,18 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, - * We kill the task with a SIGSEGV in this situation. - */ - -- if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) -- do_exit(SIGSEGV); -+ if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) { -+ force_exit_sig(SIGSEGV); -+ return -EFAULT; -+ } - set_current_blocked(&set); - - if (!user_read_access_begin(new_ctx, ctx_size)) - return -EFAULT; - if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) { - user_read_access_end(); -- do_exit(SIGSEGV); -+ force_exit_sig(SIGSEGV); -+ return -EFAULT; - } - user_read_access_end(); - -diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c -index 605bab448f847..fb95f92dcfac6 100644 ---- a/arch/powerpc/kernel/smp.c -+++ b/arch/powerpc/kernel/smp.c -@@ -61,6 +61,7 @@ - #include - #include - #include -+#include - - #ifdef DEBUG - #include -@@ -620,6 +621,45 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) - } - #endif - -+#ifdef CONFIG_NMI_IPI -+static void crash_stop_this_cpu(struct pt_regs *regs) -+#else -+static void crash_stop_this_cpu(void *dummy) -+#endif -+{ -+ /* -+ * Just busy wait here and avoid marking CPU as offline to ensure -+ * register data is captured appropriately. -+ */ -+ while (1) -+ cpu_relax(); -+} -+ -+void crash_smp_send_stop(void) -+{ -+ static bool stopped = false; -+ -+ /* -+ * In case of fadump, register data for all CPUs is captured by f/w -+ * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before -+ * this rtas call to avoid tricky post processing of those CPUs' -+ * backtraces. -+ */ -+ if (should_fadump_crash()) -+ return; -+ -+ if (stopped) -+ return; -+ -+ stopped = true; -+ -+#ifdef CONFIG_NMI_IPI -+ smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000); -+#else -+ smp_call_function(crash_stop_this_cpu, NULL, 0); -+#endif /* CONFIG_NMI_IPI */ -+} -+ - #ifdef CONFIG_NMI_IPI - static void nmi_stop_this_cpu(struct pt_regs *regs) - { -@@ -1640,10 +1680,12 @@ void start_secondary(void *unused) - BUG(); - } - -+#ifdef CONFIG_PROFILING - int setup_profiling_timer(unsigned int multiplier) - { - return 0; - } -+#endif - - static void fixup_topology(void) - { -diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S -index f73f4d72fea43..e0cbd63007f21 100644 ---- a/arch/powerpc/kernel/swsusp_32.S -+++ b/arch/powerpc/kernel/swsusp_32.S -@@ -181,7 +181,7 @@ _GLOBAL(swsusp_arch_resume) - #ifdef CONFIG_ALTIVEC - /* Stop pending alitvec streams and memory accesses */ - BEGIN_FTR_SECTION -- DSSALL -+ PPC_DSSALL - END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - #endif - sync -diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S -index 6d3189830dd32..068a268a8013e 100644 ---- a/arch/powerpc/kernel/swsusp_asm64.S -+++ b/arch/powerpc/kernel/swsusp_asm64.S -@@ -142,7 +142,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) - _GLOBAL(swsusp_arch_resume) - /* Stop pending alitvec streams and memory accesses */ - BEGIN_FTR_SECTION -- DSSALL -+ PPC_DSSALL - END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - sync - -diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c -index 16ff0399a2574..719bfc6d1e3f5 100644 ---- a/arch/powerpc/kernel/sys_ppc32.c -+++ b/arch/powerpc/kernel/sys_ppc32.c -@@ -56,18 +56,6 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len, - return sys_mmap(addr, len, prot, flags, fd, pgoff << 12); - } - --/* -- * long long munging: -- * The 32 bit ABI passes long longs in an odd even register pair. -- * High and low parts are swapped depending on endian mode, -- * so define a macro (similar to mips linux32) to handle that. -- */ --#ifdef __LITTLE_ENDIAN__ --#define merge_64(low, high) ((u64)high << 32) | low --#else --#define merge_64(high, low) ((u64)high << 32) | low --#endif -- - compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, - u32 reg6, u32 pos1, u32 pos2) - { -@@ -94,7 +82,7 @@ asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4, - asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, - u32 len1, u32 len2) - { -- return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2, -+ return ksys_fallocate(fd, mode, merge_64(offset1, offset2), - merge_64(len1, len2)); - } - -diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c -index 825931e400df7..e3edcf8f7cae5 100644 ---- a/arch/powerpc/kernel/syscalls.c -+++ b/arch/powerpc/kernel/syscalls.c -@@ -99,8 +99,8 @@ long ppc64_personality(unsigned long personality) - long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, - u32 len_high, u32 len_low) - { -- return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low, -- (u64)len_high << 32 | len_low, advice); -+ return ksys_fadvise64_64(fd, merge_64(offset_high, offset_low), -+ merge_64(len_high, len_low), advice); - } - - SYSCALL_DEFINE0(switch_endian) -diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S -index cb3358886203e..6c1db3b6de2dc 100644 ---- a/arch/powerpc/kernel/systbl.S -+++ b/arch/powerpc/kernel/systbl.S -@@ -18,6 +18,7 @@ - .p2align 3 - #define __SYSCALL(nr, entry) .8byte entry - #else -+ .p2align 2 - #define __SYSCALL(nr, entry) .long entry - #endif - -diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c -index 934d8ae66cc63..4406d7a89558b 100644 ---- a/arch/powerpc/kernel/time.c -+++ b/arch/powerpc/kernel/time.c -@@ -450,7 +450,7 @@ void vtime_flush(struct task_struct *tsk) - #define calc_cputime_factors() - #endif - --void __delay(unsigned long loops) -+void __no_kcsan __delay(unsigned long loops) - { - unsigned long start; - -@@ -471,7 +471,7 @@ void __delay(unsigned long loops) - } - EXPORT_SYMBOL(__delay); - --void udelay(unsigned long usecs) -+void __no_kcsan udelay(unsigned long usecs) - { - __delay(tb_ticks_per_usec * usecs); - } -diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S -index 2b91f233b05d5..5a0f023a26e90 100644 ---- a/arch/powerpc/kernel/tm.S -+++ b/arch/powerpc/kernel/tm.S -@@ -226,11 +226,8 @@ _GLOBAL(tm_reclaim) - - /* Sync the userland GPRs 2-12, 14-31 to thread->regs: */ - SAVE_GPR(0, r7) /* user r0 */ -- SAVE_GPR(2, r7) /* user r2 */ -- SAVE_4GPRS(3, r7) /* user r3-r6 */ -- SAVE_GPR(8, r7) /* user r8 */ -- SAVE_GPR(9, r7) /* user r9 */ -- SAVE_GPR(10, r7) /* user r10 */ -+ SAVE_GPRS(2, 6, r7) /* user r2-r6 */ -+ SAVE_GPRS(8, 10, r7) /* user r8-r10 */ - ld r3, GPR1(r1) /* user r1 */ - ld r4, GPR7(r1) /* user r7 */ - ld r5, GPR11(r1) /* user r11 */ -@@ -445,12 +442,9 @@ restore_gprs: - ld r6, THREAD_TM_PPR(r3) - - REST_GPR(0, r7) /* GPR0 */ -- REST_2GPRS(2, r7) /* GPR2-3 */ -- REST_GPR(4, r7) /* GPR4 */ -- REST_4GPRS(8, r7) /* GPR8-11 */ -- REST_2GPRS(12, r7) /* GPR12-13 */ -- -- REST_NVGPRS(r7) /* GPR14-31 */ -+ REST_GPRS(2, 4, r7) /* GPR2-4 */ -+ REST_GPRS(8, 12, r7) /* GPR8-12 */ -+ REST_GPRS(14, 31, r7) /* GPR14-31 */ - - /* Load up PPR and DSCR here so we don't run with user values for long */ - mtspr SPRN_DSCR, r5 -@@ -486,18 +480,24 @@ restore_gprs: - REST_GPR(6, r7) - - /* -- * Store r1 and r5 on the stack so that we can access them after we -- * clear MSR RI. -+ * Store user r1 and r5 and r13 on the stack (in the unused save -+ * areas / compiler reserved areas), so that we can access them after -+ * we clear MSR RI. - */ - - REST_GPR(5, r7) - std r5, -8(r1) -- ld r5, GPR1(r7) -+ ld r5, GPR13(r7) - std r5, -16(r1) -+ ld r5, GPR1(r7) -+ std r5, -24(r1) - - REST_GPR(7, r7) - -- /* Clear MSR RI since we are about to use SCRATCH0. EE is already off */ -+ /* Stash the stack pointer away for use after recheckpoint */ -+ std r1, PACAR1(r13) -+ -+ /* Clear MSR RI since we are about to clobber r13. EE is already off */ - li r5, 0 - mtmsrd r5, 1 - -@@ -508,9 +508,9 @@ restore_gprs: - * until we turn MSR RI back on. - */ - -- SET_SCRATCH0(r1) - ld r5, -8(r1) -- ld r1, -16(r1) -+ ld r13, -16(r1) -+ ld r1, -24(r1) - - /* Commit register state as checkpointed state: */ - TRECHKPT -@@ -526,9 +526,9 @@ restore_gprs: - */ - - GET_PACA(r13) -- GET_SCRATCH0(r1) -+ ld r1, PACAR1(r13) - -- /* R1 is restored, so we are recoverable again. EE is still off */ -+ /* R13, R1 is restored, so we are recoverable again. EE is still off */ - li r4, MSR_RI - mtmsrd r4, 1 - -diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c -index d89c5df4f2062..660040c2d7b54 100644 ---- a/arch/powerpc/kernel/trace/ftrace.c -+++ b/arch/powerpc/kernel/trace/ftrace.c -@@ -336,9 +336,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) - - /* Is this a known long jump tramp? */ - for (i = 0; i < NUM_FTRACE_TRAMPS; i++) -- if (!ftrace_tramps[i]) -- break; -- else if (ftrace_tramps[i] == tramp) -+ if (ftrace_tramps[i] == tramp) - return 0; - - /* Is this a known plt tramp? */ -@@ -881,6 +879,17 @@ void arch_ftrace_update_code(int command) - - extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; - -+void ftrace_free_init_tramp(void) -+{ -+ int i; -+ -+ for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++) -+ if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) { -+ ftrace_tramps[i] = 0; -+ return; -+ } -+} -+ - int __init ftrace_dyn_arch_init(void) - { - int i; -diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S -index f9fd5f743eba3..d636fc755f608 100644 ---- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S -+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S -@@ -41,15 +41,14 @@ _GLOBAL(ftrace_regs_caller) - - /* Save all gprs to pt_regs */ - SAVE_GPR(0, r1) -- SAVE_10GPRS(2, r1) -+ SAVE_GPRS(2, 11, r1) - - /* Ok to continue? */ - lbz r3, PACA_FTRACE_ENABLED(r13) - cmpdi r3, 0 - beq ftrace_no_trace - -- SAVE_10GPRS(12, r1) -- SAVE_10GPRS(22, r1) -+ SAVE_GPRS(12, 31, r1) - - /* Save previous stack pointer (r1) */ - addi r8, r1, SWITCH_FRAME_SIZE -@@ -108,10 +107,8 @@ ftrace_regs_call: - #endif - - /* Restore gprs */ -- REST_GPR(0,r1) -- REST_10GPRS(2,r1) -- REST_10GPRS(12,r1) -- REST_10GPRS(22,r1) -+ REST_GPR(0, r1) -+ REST_GPRS(2, 31, r1) - - /* Restore possibly modified LR */ - ld r0, _LINK(r1) -@@ -157,7 +154,7 @@ _GLOBAL(ftrace_caller) - stdu r1, -SWITCH_FRAME_SIZE(r1) - - /* Save all gprs to pt_regs */ -- SAVE_8GPRS(3, r1) -+ SAVE_GPRS(3, 10, r1) - - lbz r3, PACA_FTRACE_ENABLED(r13) - cmpdi r3, 0 -@@ -194,7 +191,7 @@ ftrace_call: - mtctr r3 - - /* Restore gprs */ -- REST_8GPRS(3,r1) -+ REST_GPRS(3, 10, r1) - - /* Restore callee's TOC */ - ld r2, 24(r1) -diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c -index 11741703d26e0..a08bb7cefdc54 100644 ---- a/arch/powerpc/kernel/traps.c -+++ b/arch/powerpc/kernel/traps.c -@@ -245,7 +245,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, - - if (panic_on_oops) - panic("Fatal exception"); -- do_exit(signr); -+ make_task_dead(signr); - } - NOKPROBE_SYMBOL(oops_end); - -@@ -792,9 +792,9 @@ int machine_check_generic(struct pt_regs *regs) - void die_mce(const char *str, struct pt_regs *regs, long err) - { - /* -- * The machine check wants to kill the interrupted context, but -- * do_exit() checks for in_interrupt() and panics in that case, so -- * exit the irq/nmi before calling die. -+ * The machine check wants to kill the interrupted context, -+ * but make_task_dead() checks for in_interrupt() and panics -+ * in that case, so exit the irq/nmi before calling die. - */ - if (in_nmi()) - nmi_exit(); -diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S -index 40bdefe9caa73..d4531902d8c67 100644 ---- a/arch/powerpc/kernel/vmlinux.lds.S -+++ b/arch/powerpc/kernel/vmlinux.lds.S -@@ -8,6 +8,7 @@ - #define BSS_FIRST_SECTIONS *(.bss.prominit) - #define EMITS_PT_NOTE - #define RO_EXCEPTION_TABLE_ALIGN 0 -+#define RUNTIME_DISCARD_EXIT - - #define SOFT_MASK_TABLE(align) \ - . = ALIGN(align); \ -@@ -32,6 +33,10 @@ - - #define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT) - -+#if STRICT_ALIGN_SIZE < PAGE_SIZE -+#error "CONFIG_DATA_SHIFT must be >= PAGE_SHIFT" -+#endif -+ - ENTRY(_stext) - - PHDRS { -@@ -204,12 +209,16 @@ SECTIONS - } - #endif - -+ /* -+ * Various code relies on __init_begin being at the strict RWX boundary. -+ */ -+ . = ALIGN(STRICT_ALIGN_SIZE); -+ __srwx_boundary = .; -+ __init_begin = .; -+ - /* - * Init sections discarded at runtime - */ -- . = ALIGN(STRICT_ALIGN_SIZE); -- __init_begin = .; -- . = ALIGN(PAGE_SIZE); - .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { - _sinittext = .; - INIT_TEXT -@@ -275,9 +284,7 @@ SECTIONS - . = ALIGN(8); - .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) - { --#ifdef CONFIG_PPC32 - __dynamic_symtab = .; --#endif - *(.dynsym) - } - .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) } -@@ -401,9 +408,12 @@ SECTIONS - DISCARDS - /DISCARD/ : { - *(*.EMB.apuinfo) -- *(.glink .iplt .plt .rela* .comment) -+ *(.glink .iplt .plt .comment) - *(.gnu.version*) - *(.gnu.attributes) - *(.eh_frame) -+#ifndef CONFIG_RELOCATABLE -+ *(.rela*) -+#endif - } - } -diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c -index f9ea0e5357f92..ad94a2c6b7337 100644 ---- a/arch/powerpc/kernel/watchdog.c -+++ b/arch/powerpc/kernel/watchdog.c -@@ -135,6 +135,10 @@ static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb) - { - cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask); - cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask); -+ /* -+ * See wd_smp_clear_cpu_pending() -+ */ -+ smp_mb(); - if (cpumask_empty(&wd_smp_cpus_pending)) { - wd_smp_last_reset_tb = tb; - cpumask_andnot(&wd_smp_cpus_pending, -@@ -187,6 +191,12 @@ static void watchdog_smp_panic(int cpu, u64 tb) - if (sysctl_hardlockup_all_cpu_backtrace) - trigger_allbutself_cpu_backtrace(); - -+ /* -+ * Force flush any remote buffers that might be stuck in IRQ context -+ * and therefore could not run their irq_work. -+ */ -+ printk_trigger_flush(); -+ - if (hardlockup_panic) - nmi_panic(NULL, "Hard LOCKUP"); - -@@ -215,13 +225,44 @@ static void wd_smp_clear_cpu_pending(int cpu, u64 tb) - - cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck); - wd_smp_unlock(&flags); -+ } else { -+ /* -+ * The last CPU to clear pending should have reset the -+ * watchdog so we generally should not find it empty -+ * here if our CPU was clear. However it could happen -+ * due to a rare race with another CPU taking the -+ * last CPU out of the mask concurrently. -+ * -+ * We can't add a warning for it. But just in case -+ * there is a problem with the watchdog that is causing -+ * the mask to not be reset, try to kick it along here. -+ */ -+ if (unlikely(cpumask_empty(&wd_smp_cpus_pending))) -+ goto none_pending; - } - return; - } -+ - cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); -+ -+ /* -+ * Order the store to clear pending with the load(s) to check all -+ * words in the pending mask to check they are all empty. This orders -+ * with the same barrier on another CPU. This prevents two CPUs -+ * clearing the last 2 pending bits, but neither seeing the other's -+ * store when checking if the mask is empty, and missing an empty -+ * mask, which ends with a false positive. -+ */ -+ smp_mb(); - if (cpumask_empty(&wd_smp_cpus_pending)) { - unsigned long flags; - -+none_pending: -+ /* -+ * Double check under lock because more than one CPU could see -+ * a clear mask with the lockless check after clearing their -+ * pending bits. -+ */ - wd_smp_lock(&flags); - if (cpumask_empty(&wd_smp_cpus_pending)) { - wd_smp_last_reset_tb = tb; -@@ -312,8 +353,12 @@ void arch_touch_nmi_watchdog(void) - { - unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000; - int cpu = smp_processor_id(); -- u64 tb = get_tb(); -+ u64 tb; - -+ if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) -+ return; -+ -+ tb = get_tb(); - if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { - per_cpu(wd_timer_tb, cpu) = tb; - wd_smp_clear_cpu_pending(cpu, tb); -diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c -index 48525e8b57300..71b1bfdadd76a 100644 ---- a/arch/powerpc/kexec/core.c -+++ b/arch/powerpc/kexec/core.c -@@ -147,11 +147,18 @@ void __init reserve_crashkernel(void) - if (!crashk_res.start) { - #ifdef CONFIG_PPC64 - /* -- * On 64bit we split the RMO in half but cap it at half of -- * a small SLB (128MB) since the crash kernel needs to place -- * itself and some stacks to be in the first segment. -+ * On the LPAR platform place the crash kernel to mid of -+ * RMA size (512MB or more) to ensure the crash kernel -+ * gets enough space to place itself and some stack to be -+ * in the first segment. At the same time normal kernel -+ * also get enough space to allocate memory for essential -+ * system resource in the first segment. Keep the crash -+ * kernel starts at 128MB offset on other platforms. - */ -- crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); -+ if (firmware_has_feature(FW_FEATURE_LPAR)) -+ crashk_res.start = ppc64_rma_size / 2; -+ else -+ crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); - #else - crashk_res.start = KDUMP_KERNELBASE; - #endif -diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S -index e3ab9df6cf199..6cfcd20d46686 100644 ---- a/arch/powerpc/kvm/book3s_32_sr.S -+++ b/arch/powerpc/kvm/book3s_32_sr.S -@@ -122,11 +122,27 @@ - - /* 0x0 - 0xb */ - -- /* 'current->mm' needs to be in r4 */ -- tophys(r4, r2) -- lwz r4, MM(r4) -- tophys(r4, r4) -- /* This only clobbers r0, r3, r4 and r5 */ -+ /* switch_mmu_context() needs paging, let's enable it */ -+ mfmsr r9 -+ ori r11, r9, MSR_DR -+ mtmsr r11 -+ sync -+ -+ /* switch_mmu_context() clobbers r12, rescue it */ -+ SAVE_GPR(12, r1) -+ -+ /* Calling switch_mmu_context(, current->mm, ); */ -+ lwz r4, MM(r2) - bl switch_mmu_context - -+ /* restore r12 */ -+ REST_GPR(12, r1) -+ -+ /* Disable paging again */ -+ mfmsr r9 -+ li r6, MSR_DR -+ andc r9, r9, r6 -+ mtmsr r9 -+ sync -+ - .endm -diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S -index 983b8c18bc31e..a644003603da1 100644 ---- a/arch/powerpc/kvm/book3s_64_entry.S -+++ b/arch/powerpc/kvm/book3s_64_entry.S -@@ -407,10 +407,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) - */ - ld r10,HSTATE_SCRATCH0(r13) - cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK -- beq machine_check_common -+ beq .Lcall_machine_check_common - - cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET -- beq system_reset_common -+ beq .Lcall_system_reset_common - - b . -+ -+.Lcall_machine_check_common: -+ b machine_check_common -+ -+.Lcall_system_reset_common: -+ b system_reset_common - #endif -diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c -index 6365087f31602..3cb2e05a7ee83 100644 ---- a/arch/powerpc/kvm/book3s_64_vio.c -+++ b/arch/powerpc/kvm/book3s_64_vio.c -@@ -421,13 +421,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, - tbl[idx % TCES_PER_PAGE] = tce; - } - --static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl, -- unsigned long entry) -+static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt, -+ struct iommu_table *tbl, unsigned long entry) - { -- unsigned long hpa = 0; -- enum dma_data_direction dir = DMA_NONE; -+ unsigned long i; -+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); -+ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift); -+ -+ for (i = 0; i < subpages; ++i) { -+ unsigned long hpa = 0; -+ enum dma_data_direction dir = DMA_NONE; - -- iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir); -+ iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir); -+ } - } - - static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, -@@ -486,6 +492,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm, - break; - } - -+ iommu_tce_kill(tbl, io_entry, subpages); -+ - return ret; - } - -@@ -545,6 +553,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm, - break; - } - -+ iommu_tce_kill(tbl, io_entry, subpages); -+ - return ret; - } - -@@ -591,10 +601,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, - ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, - entry, ua, dir); - -- iommu_tce_kill(stit->tbl, entry, 1); - - if (ret != H_SUCCESS) { -- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); -+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry); - goto unlock_exit; - } - } -@@ -670,13 +679,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, - */ - if (get_user(tce, tces + i)) { - ret = H_TOO_HARD; -- goto invalidate_exit; -+ goto unlock_exit; - } - tce = be64_to_cpu(tce); - - if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { - ret = H_PARAMETER; -- goto invalidate_exit; -+ goto unlock_exit; - } - - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { -@@ -685,19 +694,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, - iommu_tce_direction(tce)); - - if (ret != H_SUCCESS) { -- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, -- entry); -- goto invalidate_exit; -+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, -+ entry + i); -+ goto unlock_exit; - } - } - - kvmppc_tce_put(stt, entry + i, tce); - } - --invalidate_exit: -- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) -- iommu_tce_kill(stit->tbl, entry, npages); -- - unlock_exit: - srcu_read_unlock(&vcpu->kvm->srcu, idx); - -@@ -736,20 +741,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, - continue; - - if (ret == H_TOO_HARD) -- goto invalidate_exit; -+ return ret; - - WARN_ON_ONCE(1); -- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry); -+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i); - } - } - - for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) - kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); - --invalidate_exit: -- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) -- iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages); -- - return ret; - } - EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); -diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c -index 870b7f0c7ea56..fdeda6a9cff44 100644 ---- a/arch/powerpc/kvm/book3s_64_vio_hv.c -+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c -@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl, - tbl->it_ops->tce_kill(tbl, entry, pages, true); - } - --static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl, -- unsigned long entry) -+static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt, -+ struct iommu_table *tbl, unsigned long entry) - { -- unsigned long hpa = 0; -- enum dma_data_direction dir = DMA_NONE; -+ unsigned long i; -+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); -+ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift); -+ -+ for (i = 0; i < subpages; ++i) { -+ unsigned long hpa = 0; -+ enum dma_data_direction dir = DMA_NONE; - -- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir); -+ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir); -+ } - } - - static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, -@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, - break; - } - -+ iommu_tce_kill_rm(tbl, io_entry, subpages); -+ - return ret; - } - -@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, - break; - } - -+ iommu_tce_kill_rm(tbl, io_entry, subpages); -+ - return ret; - } - -@@ -420,10 +430,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, - ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, - stit->tbl, entry, ua, dir); - -- iommu_tce_kill_rm(stit->tbl, entry, 1); -- - if (ret != H_SUCCESS) { -- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); -+ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry); - return ret; - } - } -@@ -561,7 +569,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, - ua = 0; - if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) { - ret = H_PARAMETER; -- goto invalidate_exit; -+ goto unlock_exit; - } - - list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { -@@ -570,19 +578,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, - iommu_tce_direction(tce)); - - if (ret != H_SUCCESS) { -- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, -- entry); -- goto invalidate_exit; -+ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, -+ entry + i); -+ goto unlock_exit; - } - } - - kvmppc_rm_tce_put(stt, entry + i, tce); - } - --invalidate_exit: -- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) -- iommu_tce_kill_rm(stit->tbl, entry, npages); -- - unlock_exit: - if (!prereg) - arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); -@@ -620,20 +624,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, - continue; - - if (ret == H_TOO_HARD) -- goto invalidate_exit; -+ return ret; - - WARN_ON_ONCE_RM(1); -- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); -+ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i); - } - } - - for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) - kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value); - --invalidate_exit: -- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) -- iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages); -- - return ret; - } - -diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c -index 2acb1c96cfafd..eba77096c4430 100644 ---- a/arch/powerpc/kvm/book3s_hv.c -+++ b/arch/powerpc/kvm/book3s_hv.c -@@ -1731,7 +1731,6 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, - - static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) - { -- struct kvm_nested_guest *nested = vcpu->arch.nested; - int r; - int srcu_idx; - -@@ -1831,7 +1830,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) - * it into a HEAI. - */ - if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || -- (nested->hfscr & (1UL << cause))) { -+ (vcpu->arch.nested_hfscr & (1UL << cause))) { - vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; - - /* -@@ -3726,7 +3725,20 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) - - kvmppc_set_host_core(pcpu); - -- guest_exit_irqoff(); -+ context_tracking_guest_exit(); -+ if (!vtime_accounting_enabled_this_cpu()) { -+ local_irq_enable(); -+ /* -+ * Service IRQs here before vtime_account_guest_exit() so any -+ * ticks that occurred while running the guest are accounted to -+ * the guest. If vtime accounting is enabled, accounting uses -+ * TB rather than ticks, so it can be done without enabling -+ * interrupts here, which has the problem that it accounts -+ * interrupt processing overhead to the host. -+ */ -+ local_irq_disable(); -+ } -+ vtime_account_guest_exit(); - - local_irq_enable(); - -@@ -4510,7 +4522,20 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, - - kvmppc_set_host_core(pcpu); - -- guest_exit_irqoff(); -+ context_tracking_guest_exit(); -+ if (!vtime_accounting_enabled_this_cpu()) { -+ local_irq_enable(); -+ /* -+ * Service IRQs here before vtime_account_guest_exit() so any -+ * ticks that occurred while running the guest are accounted to -+ * the guest. If vtime accounting is enabled, accounting uses -+ * TB rather than ticks, so it can be done without enabling -+ * interrupts here, which has the problem that it accounts -+ * interrupt processing overhead to the host. -+ */ -+ local_irq_disable(); -+ } -+ vtime_account_guest_exit(); - - local_irq_enable(); - -@@ -4835,8 +4860,12 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, - unsigned long npages = mem->memory_size >> PAGE_SHIFT; - - if (change == KVM_MR_CREATE) { -- slot->arch.rmap = vzalloc(array_size(npages, -- sizeof(*slot->arch.rmap))); -+ unsigned long size = array_size(npages, sizeof(*slot->arch.rmap)); -+ -+ if ((size >> PAGE_SHIFT) > totalram_pages()) -+ return -ENOMEM; -+ -+ slot->arch.rmap = vzalloc(size); - if (!slot->arch.rmap) - return -ENOMEM; - } -@@ -5206,6 +5235,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) - kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); - lpcr &= LPCR_PECE | LPCR_LPES; - } else { -+ /* -+ * The L2 LPES mode will be set by the L0 according to whether -+ * or not it needs to take external interrupts in HV mode. -+ */ - lpcr = 0; - } - lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | -@@ -6072,8 +6105,11 @@ static int kvmppc_book3s_init_hv(void) - if (r) - return r; - -- if (kvmppc_radix_possible()) -+ if (kvmppc_radix_possible()) { - r = kvmppc_radix_init(); -+ if (r) -+ return r; -+ } - - r = kvmppc_uvmem_init(); - if (r < 0) -diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c -index fcf4760a3a0ea..b148629b7f033 100644 ---- a/arch/powerpc/kvm/book3s_hv_builtin.c -+++ b/arch/powerpc/kvm/book3s_hv_builtin.c -@@ -20,7 +20,7 @@ - #include - #include - #include --#include -+#include - #include - #include - #include -@@ -177,13 +177,14 @@ EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); - - int kvmppc_hwrng_present(void) - { -- return powernv_hwrng_present(); -+ return ppc_md.get_random_seed != NULL; - } - EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); - - long kvmppc_rm_h_random(struct kvm_vcpu *vcpu) - { -- if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4])) -+ if (ppc_md.get_random_seed && -+ ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4])) - return H_SUCCESS; - - return H_HARDWARE; -@@ -695,6 +696,7 @@ static void flush_guest_tlb(struct kvm *kvm) - "r" (0) : "memory"); - } - asm volatile("ptesync": : :"memory"); -+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. - asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); - } else { - for (set = 0; set < kvm->arch.tlb_sets; ++set) { -@@ -705,7 +707,9 @@ static void flush_guest_tlb(struct kvm *kvm) - rb += PPC_BIT(51); /* increment set number */ - } - asm volatile("ptesync": : :"memory"); -- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); -+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. -+ if (cpu_has_feature(CPU_FTR_ARCH_300)) -+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); - } - } - -diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c -index ed8a2c9f56299..ddea14e5cb5e4 100644 ---- a/arch/powerpc/kvm/book3s_hv_nested.c -+++ b/arch/powerpc/kvm/book3s_hv_nested.c -@@ -261,8 +261,7 @@ static void load_l2_hv_regs(struct kvm_vcpu *vcpu, - /* - * Don't let L1 change LPCR bits for the L2 except these: - */ -- mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | -- LPCR_LPES | LPCR_MER; -+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER; - - /* - * Additional filtering is required depending on hardware -@@ -362,7 +361,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) - /* set L1 state to L2 state */ - vcpu->arch.nested = l2; - vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token; -- l2->hfscr = l2_hv.hfscr; -+ vcpu->arch.nested_hfscr = l2_hv.hfscr; - vcpu->arch.regs = l2_regs; - - /* Guest must always run with ME enabled, HV disabled. */ -@@ -582,7 +581,7 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu) - if (eaddr & (0xFFFUL << 52)) - return H_PARAMETER; - -- buf = kzalloc(n, GFP_KERNEL); -+ buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN); - if (!buf) - return H_NO_MEM; - -diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c -index 961b3d70483ca..a0e0c28408c07 100644 ---- a/arch/powerpc/kvm/book3s_hv_p9_entry.c -+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c -@@ -7,15 +7,6 @@ - #include - - #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING --static void __start_timing(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next) --{ -- struct kvmppc_vcore *vc = vcpu->arch.vcore; -- u64 tb = mftb() - vc->tb_offset_applied; -- -- vcpu->arch.cur_activity = next; -- vcpu->arch.cur_tb_start = tb; --} -- - static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next) - { - struct kvmppc_vcore *vc = vcpu->arch.vcore; -@@ -47,8 +38,8 @@ static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator - curr->seqcount = seq + 2; - } - --#define start_timing(vcpu, next) __start_timing(vcpu, next) --#define end_timing(vcpu) __start_timing(vcpu, NULL) -+#define start_timing(vcpu, next) __accumulate_time(vcpu, next) -+#define end_timing(vcpu) __accumulate_time(vcpu, NULL) - #define accumulate_time(vcpu, next) __accumulate_time(vcpu, next) - #else - #define start_timing(vcpu, next) do {} while (0) -diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c -index d4bca93b79f6d..6fa634599bc90 100644 ---- a/arch/powerpc/kvm/book3s_hv_ras.c -+++ b/arch/powerpc/kvm/book3s_hv_ras.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - #include - #include - #include -diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S -index eb776d0c5d8e9..81fc1e0ebe9a8 100644 ---- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S -+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S -@@ -2005,7 +2005,7 @@ hcall_real_table: - .globl hcall_real_table_end - hcall_real_table_end: - --_GLOBAL(kvmppc_h_set_xdabr) -+_GLOBAL_TOC(kvmppc_h_set_xdabr) - EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) - andi. r0, r5, DABRX_USER | DABRX_KERNEL - beq 6f -@@ -2015,7 +2015,7 @@ EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) - 6: li r3, H_PARAMETER - blr - --_GLOBAL(kvmppc_h_set_dabr) -+_GLOBAL_TOC(kvmppc_h_set_dabr) - EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) - li r5, DABRX_USER | DABRX_KERNEL - 3: -@@ -2711,8 +2711,7 @@ kvmppc_bad_host_intr: - std r0, GPR0(r1) - std r9, GPR1(r1) - std r2, GPR2(r1) -- SAVE_4GPRS(3, r1) -- SAVE_2GPRS(7, r1) -+ SAVE_GPRS(3, 8, r1) - srdi r0, r12, 32 - clrldi r12, r12, 32 - std r0, _CCR(r1) -@@ -2735,7 +2734,7 @@ kvmppc_bad_host_intr: - ld r9, HSTATE_SCRATCH2(r13) - ld r12, HSTATE_SCRATCH0(r13) - GET_SCRATCH0(r0) -- SAVE_4GPRS(9, r1) -+ SAVE_GPRS(9, 12, r1) - std r0, GPR13(r1) - SAVE_NVGPRS(r1) - ld r5, HSTATE_CFAR(r13) -diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c -index a7061ee3b1577..3d4ee75b0fb76 100644 ---- a/arch/powerpc/kvm/book3s_hv_uvmem.c -+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c -@@ -251,7 +251,7 @@ int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) - p = kzalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -ENOMEM; -- p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns))); -+ p->pfns = vcalloc(slot->npages, sizeof(*p->pfns)); - if (!p->pfns) { - kfree(p); - return -ENOMEM; -@@ -360,13 +360,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, - static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, - struct kvm *kvm, unsigned long *gfn) - { -- struct kvmppc_uvmem_slot *p; -+ struct kvmppc_uvmem_slot *p = NULL, *iter; - bool ret = false; - unsigned long i; - -- list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) -- if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns) -+ list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) -+ if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { -+ p = iter; - break; -+ } - if (!p) - return ret; - /* -diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c -index 977801c83aff8..8c15c90dd3a97 100644 ---- a/arch/powerpc/kvm/booke.c -+++ b/arch/powerpc/kvm/booke.c -@@ -1042,7 +1042,21 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) - } - - trace_kvm_exit(exit_nr, vcpu); -- guest_exit_irqoff(); -+ -+ context_tracking_guest_exit(); -+ if (!vtime_accounting_enabled_this_cpu()) { -+ local_irq_enable(); -+ /* -+ * Service IRQs here before vtime_account_guest_exit() so any -+ * ticks that occurred while running the guest are accounted to -+ * the guest. If vtime accounting is enabled, accounting uses -+ * TB rather than ticks, so it can be done without enabling -+ * interrupts here, which has the problem that it accounts -+ * interrupt processing overhead to the host. -+ */ -+ local_irq_disable(); -+ } -+ vtime_account_guest_exit(); - - local_irq_enable(); - -diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c -index b4e6f70b97b94..ee305455bd8db 100644 ---- a/arch/powerpc/kvm/powerpc.c -+++ b/arch/powerpc/kvm/powerpc.c -@@ -1507,7 +1507,7 @@ int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, - { - enum emulation_result emulated = EMULATE_DONE; - -- if (vcpu->arch.mmio_vsx_copy_nums > 2) -+ if (vcpu->arch.mmio_vmx_copy_nums > 2) - return EMULATE_FAIL; - - while (vcpu->arch.mmio_vmx_copy_nums) { -@@ -1604,7 +1604,7 @@ int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, - unsigned int index = rs & KVM_MMIO_REG_MASK; - enum emulation_result emulated = EMULATE_DONE; - -- if (vcpu->arch.mmio_vsx_copy_nums > 2) -+ if (vcpu->arch.mmio_vmx_copy_nums > 2) - return EMULATE_FAIL; - - vcpu->arch.io_gpr = rs; -diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile -index 99a7c9132422c..54be64203b2ab 100644 ---- a/arch/powerpc/lib/Makefile -+++ b/arch/powerpc/lib/Makefile -@@ -19,6 +19,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING - CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING - endif - -+CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -+CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) -+ - obj-y += alloc.o code-patching.o feature-fixups.o pmem.o test_code-patching.o - - ifndef CONFIG_KASAN -diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c -index cda17bee5afea..c3e06922468b3 100644 ---- a/arch/powerpc/lib/feature-fixups.c -+++ b/arch/powerpc/lib/feature-fixups.c -@@ -228,6 +228,7 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) - - static bool stf_exit_reentrant = false; - static bool rfi_exit_reentrant = false; -+static DEFINE_MUTEX(exit_flush_lock); - - static int __do_stf_barrier_fixups(void *data) - { -@@ -253,6 +254,9 @@ void do_stf_barrier_fixups(enum stf_barrier_type types) - * low level interrupt exit code before patching. After the patching, - * if allowed, then flip the branch to allow fast exits. - */ -+ -+ // Prevent static key update races with do_rfi_flush_fixups() -+ mutex_lock(&exit_flush_lock); - static_branch_enable(&interrupt_exit_not_reentrant); - - stop_machine(__do_stf_barrier_fixups, &types, NULL); -@@ -264,6 +268,8 @@ void do_stf_barrier_fixups(enum stf_barrier_type types) - - if (stf_exit_reentrant && rfi_exit_reentrant) - static_branch_disable(&interrupt_exit_not_reentrant); -+ -+ mutex_unlock(&exit_flush_lock); - } - - void do_uaccess_flush_fixups(enum l1d_flush_type types) -@@ -486,6 +492,9 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) - * without stop_machine, so this could be achieved with a broadcast - * IPI instead, but this matches the stf sequence. - */ -+ -+ // Prevent static key update races with do_stf_barrier_fixups() -+ mutex_lock(&exit_flush_lock); - static_branch_enable(&interrupt_exit_not_reentrant); - - stop_machine(__do_rfi_flush_fixups, &types, NULL); -@@ -497,6 +506,8 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) - - if (stf_exit_reentrant && rfi_exit_reentrant) - static_branch_disable(&interrupt_exit_not_reentrant); -+ -+ mutex_unlock(&exit_flush_lock); - } - - void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) -diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c -index d8d5f901cee1c..2d39b7c246e30 100644 ---- a/arch/powerpc/lib/sstep.c -+++ b/arch/powerpc/lib/sstep.c -@@ -112,9 +112,9 @@ static nokprobe_inline long address_ok(struct pt_regs *regs, - { - if (!user_mode(regs)) - return 1; -- if (__access_ok(ea, nb)) -+ if (access_ok((void __user *)ea, nb)) - return 1; -- if (__access_ok(ea, 1)) -+ if (access_ok((void __user *)ea, 1)) - /* Access overlaps the end of the user region */ - regs->dar = TASK_SIZE_MAX - 1; - else -@@ -1014,7 +1014,10 @@ NOKPROBE_SYMBOL(emulate_dcbz); - - #define __put_user_asmx(x, addr, err, op, cr) \ - __asm__ __volatile__( \ -+ ".machine push\n" \ -+ ".machine power8\n" \ - "1: " op " %2,0,%3\n" \ -+ ".machine pop\n" \ - " mfcr %1\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ -@@ -1027,7 +1030,10 @@ NOKPROBE_SYMBOL(emulate_dcbz); - - #define __get_user_asmx(x, addr, err, op) \ - __asm__ __volatile__( \ -+ ".machine push\n" \ -+ ".machine power8\n" \ - "1: "op" %1,0,%2\n" \ -+ ".machine pop\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: li %0,%3\n" \ -@@ -3181,12 +3187,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) - case BARRIER_EIEIO: - eieio(); - break; -+#ifdef CONFIG_PPC64 - case BARRIER_LWSYNC: - asm volatile("lwsync" : : : "memory"); - break; - case BARRIER_PTESYNC: - asm volatile("ptesync" : : : "memory"); - break; -+#endif - } - break; - -@@ -3304,7 +3312,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) - __put_user_asmx(op->val, ea, err, "stbcx.", cr); - break; - case 2: -- __put_user_asmx(op->val, ea, err, "stbcx.", cr); -+ __put_user_asmx(op->val, ea, err, "sthcx.", cr); - break; - #endif - case 4: -diff --git a/arch/powerpc/lib/test_emulate_step_exec_instr.S b/arch/powerpc/lib/test_emulate_step_exec_instr.S -index 9ef941d958d80..5473f9d03df3a 100644 ---- a/arch/powerpc/lib/test_emulate_step_exec_instr.S -+++ b/arch/powerpc/lib/test_emulate_step_exec_instr.S -@@ -37,7 +37,7 @@ _GLOBAL(exec_instr) - * The stack pointer (GPR1) and the thread pointer (GPR13) are not - * saved as these should not be modified anyway. - */ -- SAVE_2GPRS(2, r1) -+ SAVE_GPRS(2, 3, r1) - SAVE_NVGPRS(r1) - - /* -@@ -75,8 +75,7 @@ _GLOBAL(exec_instr) - - /* Load GPRs from pt_regs */ - REST_GPR(0, r31) -- REST_10GPRS(2, r31) -- REST_GPR(12, r31) -+ REST_GPRS(2, 12, r31) - REST_NVGPRS(r31) - - /* Placeholder for the test instruction */ -@@ -99,8 +98,7 @@ _GLOBAL(exec_instr) - subi r3, r3, GPR0 - SAVE_GPR(0, r3) - SAVE_GPR(2, r3) -- SAVE_8GPRS(4, r3) -- SAVE_GPR(12, r3) -+ SAVE_GPRS(4, 12, r3) - SAVE_NVGPRS(r3) - - /* Save resulting LR to pt_regs */ -diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c -index 39b84e7452e1b..aa3bb8da1cb9b 100644 ---- a/arch/powerpc/math-emu/math_efp.c -+++ b/arch/powerpc/math-emu/math_efp.c -@@ -17,6 +17,7 @@ - - #include - #include -+#include - - #include - #include -diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c -index 27061583a0107..692c336e4f55b 100644 ---- a/arch/powerpc/mm/book3s32/mmu.c -+++ b/arch/powerpc/mm/book3s32/mmu.c -@@ -76,7 +76,7 @@ unsigned long p_block_mapped(phys_addr_t pa) - return 0; - } - --static int find_free_bat(void) -+int __init find_free_bat(void) - { - int b; - int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; -@@ -100,7 +100,7 @@ static int find_free_bat(void) - * - block size has to be a power of two. This is calculated by finding the - * highest bit set to 1. - */ --static unsigned int block_size(unsigned long base, unsigned long top) -+unsigned int bat_block_size(unsigned long base, unsigned long top) - { - unsigned int max_size = SZ_256M; - unsigned int base_shift = (ffs(base) - 1) & 31; -@@ -145,7 +145,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to - int idx; - - while ((idx = find_free_bat()) != -1 && base != top) { -- unsigned int size = block_size(base, top); -+ unsigned int size = bat_block_size(base, top); - - if (size < 128 << 10) - break; -@@ -159,8 +159,11 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to - unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) - { - unsigned long done; -- unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; -+ unsigned long border = (unsigned long)__srwx_boundary - PAGE_OFFSET; -+ unsigned long size; - -+ size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET); -+ setibat(0, PAGE_OFFSET, 0, size, PAGE_KERNEL_X); - - if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) { - pr_debug_once("Read-Write memory mapped without BATs\n"); -@@ -196,18 +199,17 @@ void mmu_mark_initmem_nx(void) - int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; - int i; - unsigned long base = (unsigned long)_stext - PAGE_OFFSET; -- unsigned long top = (unsigned long)_etext - PAGE_OFFSET; -+ unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K); - unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; - unsigned long size; - -- for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) { -- size = block_size(base, top); -+ for (i = 0; i < nb - 1 && base < top;) { -+ size = bat_block_size(base, top); - setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); - base += size; - } - if (base < top) { -- size = block_size(base, top); -- size = max(size, 128UL << 10); -+ size = bat_block_size(base, top); - if ((top - base) > size) { - size <<= 1; - if (strict_kernel_rwx_enabled() && base + size > border) -@@ -247,10 +249,9 @@ void mmu_mark_rodata_ro(void) - } - - /* -- * Set up one of the I/D BAT (block address translation) register pairs. -+ * Set up one of the D BAT (block address translation) register pairs. - * The parameters are not checked; in particular size must be a power - * of 2 between 128k and 256M. -- * On 603+, only set IBAT when _PAGE_EXEC is set - */ - void __init setbat(int index, unsigned long virt, phys_addr_t phys, - unsigned int size, pgprot_t prot) -@@ -286,10 +287,6 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys, - /* G bit must be zero in IBATs */ - flags &= ~_PAGE_EXEC; - } -- if (flags & _PAGE_EXEC) -- bat[0] = bat[1]; -- else -- bat[0].batu = bat[0].batl = 0; - - bat_addrs[index].start = virt; - bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1; -diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c -index c145776d3ae5e..7bfd88c4b5470 100644 ---- a/arch/powerpc/mm/book3s64/hash_utils.c -+++ b/arch/powerpc/mm/book3s64/hash_utils.c -@@ -1522,8 +1522,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap, - } - EXPORT_SYMBOL_GPL(hash_page); - --DECLARE_INTERRUPT_HANDLER(__do_hash_fault); --DEFINE_INTERRUPT_HANDLER(__do_hash_fault) -+DEFINE_INTERRUPT_HANDLER(do_hash_fault) - { - unsigned long ea = regs->dar; - unsigned long dsisr = regs->dsisr; -@@ -1582,35 +1581,6 @@ DEFINE_INTERRUPT_HANDLER(__do_hash_fault) - } - } - --/* -- * The _RAW interrupt entry checks for the in_nmi() case before -- * running the full handler. -- */ --DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault) --{ -- /* -- * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then -- * don't call hash_page, just fail the fault. This is required to -- * prevent re-entrancy problems in the hash code, namely perf -- * interrupts hitting while something holds H_PAGE_BUSY, and taking a -- * hash fault. See the comment in hash_preload(). -- * -- * We come here as a result of a DSI at a point where we don't want -- * to call hash_page, such as when we are accessing memory (possibly -- * user memory) inside a PMU interrupt that occurred while interrupts -- * were soft-disabled. We want to invoke the exception handler for -- * the access, or panic if there isn't a handler. -- */ -- if (unlikely(in_nmi())) { -- do_bad_page_fault_segv(regs); -- return 0; -- } -- -- __do_hash_fault(regs); -- -- return 0; --} -- - #ifdef CONFIG_PPC_MM_SLICES - static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) - { -@@ -1677,26 +1647,18 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, - #endif /* CONFIG_PPC_64K_PAGES */ - - /* -- * __hash_page_* must run with interrupts off, as it sets the -- * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any -- * time and may take a hash fault reading the user stack, see -- * read_user_stack_slow() in the powerpc/perf code. -- * -- * If that takes a hash fault on the same page as we lock here, it -- * will bail out when seeing H_PAGE_BUSY set, and retry the access -- * leading to an infinite loop. -+ * __hash_page_* must run with interrupts off, including PMI interrupts -+ * off, as it sets the H_PAGE_BUSY bit. - * -- * Disabling interrupts here does not prevent perf interrupts, but it -- * will prevent them taking hash faults (see the NMI test in -- * do_hash_page), then read_user_stack's copy_from_user_nofault will -- * fail and perf will fall back to read_user_stack_slow(), which -- * walks the Linux page tables. -+ * It's otherwise possible for perf interrupts to hit at any time and -+ * may take a hash fault reading the user stack, which could take a -+ * hash miss and deadlock on the same H_PAGE_BUSY bit. - * - * Interrupts must also be off for the duration of the - * mm_is_thread_local test and update, to prevent preempt running the - * mm on another CPU (XXX: this may be racy vs kthread_use_mm). - */ -- local_irq_save(flags); -+ powerpc_local_irq_pmu_save(flags); - - /* Is that local to this CPU ? */ - if (mm_is_thread_local(mm)) -@@ -1721,7 +1683,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, - mm_ctx_user_psize(&mm->context), - pte_val(*ptep)); - -- local_irq_restore(flags); -+ powerpc_local_irq_pmu_restore(flags); - } - - /* -diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c -index ae20add7954a0..832dfc59fc6c6 100644 ---- a/arch/powerpc/mm/book3s64/radix_pgtable.c -+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c -@@ -232,6 +232,14 @@ void radix__mark_rodata_ro(void) - end = (unsigned long)__init_begin; - - radix__change_memory_range(start, end, _PAGE_WRITE); -+ -+ for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) { -+ end = start + PAGE_SIZE; -+ if (overlaps_interrupt_vector_text(start, end)) -+ radix__change_memory_range(start, end, _PAGE_WRITE); -+ else -+ break; -+ } - } - - void radix__mark_initmem_nx(void) -@@ -260,8 +268,24 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e - static unsigned long next_boundary(unsigned long addr, unsigned long end) - { - #ifdef CONFIG_STRICT_KERNEL_RWX -- if (addr < __pa_symbol(__init_begin)) -- return __pa_symbol(__init_begin); -+ unsigned long stext_phys; -+ -+ stext_phys = __pa_symbol(_stext); -+ -+ // Relocatable kernel running at non-zero real address -+ if (stext_phys != 0) { -+ // The end of interrupts code at zero is a rodata boundary -+ unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys; -+ if (addr < end_intr) -+ return end_intr; -+ -+ // Start of relocated kernel text is a rodata boundary -+ if (addr < stext_phys) -+ return stext_phys; -+ } -+ -+ if (addr < __pa_symbol(__srwx_boundary)) -+ return __pa_symbol(__srwx_boundary); - #endif - return end; - } -@@ -740,9 +764,9 @@ static void free_pud_table(pud_t *pud_start, p4d_t *p4d) - } - - static void remove_pte_table(pte_t *pte_start, unsigned long addr, -- unsigned long end) -+ unsigned long end, bool direct) - { -- unsigned long next; -+ unsigned long next, pages = 0; - pte_t *pte; - - pte = pte_start + pte_index(addr); -@@ -764,13 +788,16 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, - } - - pte_clear(&init_mm, addr, pte); -+ pages++; - } -+ if (direct) -+ update_page_count(mmu_virtual_psize, -pages); - } - - static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr, -- unsigned long end) -+ unsigned long end, bool direct) - { -- unsigned long next; -+ unsigned long next, pages = 0; - pte_t *pte_base; - pmd_t *pmd; - -@@ -788,19 +815,22 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr, - continue; - } - pte_clear(&init_mm, addr, (pte_t *)pmd); -+ pages++; - continue; - } - - pte_base = (pte_t *)pmd_page_vaddr(*pmd); -- remove_pte_table(pte_base, addr, next); -+ remove_pte_table(pte_base, addr, next, direct); - free_pte_table(pte_base, pmd); - } -+ if (direct) -+ update_page_count(MMU_PAGE_2M, -pages); - } - - static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr, -- unsigned long end) -+ unsigned long end, bool direct) - { -- unsigned long next; -+ unsigned long next, pages = 0; - pmd_t *pmd_base; - pud_t *pud; - -@@ -818,16 +848,20 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr, - continue; - } - pte_clear(&init_mm, addr, (pte_t *)pud); -+ pages++; - continue; - } - - pmd_base = pud_pgtable(*pud); -- remove_pmd_table(pmd_base, addr, next); -+ remove_pmd_table(pmd_base, addr, next, direct); - free_pmd_table(pmd_base, pud); - } -+ if (direct) -+ update_page_count(MMU_PAGE_1G, -pages); - } - --static void __meminit remove_pagetable(unsigned long start, unsigned long end) -+static void __meminit remove_pagetable(unsigned long start, unsigned long end, -+ bool direct) - { - unsigned long addr, next; - pud_t *pud_base; -@@ -856,7 +890,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end) - } - - pud_base = p4d_pgtable(*p4d); -- remove_pud_table(pud_base, addr, next); -+ remove_pud_table(pud_base, addr, next, direct); - free_pud_table(pud_base, p4d); - } - -@@ -879,7 +913,7 @@ int __meminit radix__create_section_mapping(unsigned long start, - - int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) - { -- remove_pagetable(start, end); -+ remove_pagetable(start, end, true); - return 0; - } - #endif /* CONFIG_MEMORY_HOTPLUG */ -@@ -915,7 +949,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, - #ifdef CONFIG_MEMORY_HOTPLUG - void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) - { -- remove_pagetable(start, start + page_size); -+ remove_pagetable(start, start + page_size, false); - } - #endif - #endif -@@ -954,15 +988,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre - pmd = *pmdp; - pmd_clear(pmdp); - -- /* -- * pmdp collapse_flush need to ensure that there are no parallel gup -- * walk after this call. This is needed so that we can have stable -- * page ref count when collapsing a page. We don't allow a collapse page -- * if we have gup taken on the page. We can ensure that by sending IPI -- * because gup walk happens with IRQ disabled. -- */ -- serialize_against_pte_lookup(vma->vm_mm); -- - radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); - - return pmd; -@@ -1030,8 +1055,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, - pte_t entry, unsigned long address, int psize) - { - struct mm_struct *mm = vma->vm_mm; -- unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | -- _PAGE_RW | _PAGE_EXEC); -+ unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY | -+ _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - - unsigned long change = pte_val(entry) ^ pte_val(*ptep); - /* -@@ -1093,7 +1118,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) - - int pud_clear_huge(pud_t *pud) - { -- if (pud_huge(*pud)) { -+ if (pud_is_leaf(*pud)) { - pud_clear(pud); - return 1; - } -@@ -1140,7 +1165,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) - - int pmd_clear_huge(pmd_t *pmd) - { -- if (pmd_huge(*pmd)) { -+ if (pmd_is_leaf(*pmd)) { - pmd_clear(pmd); - return 1; - } -diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c -index 7724af19ed7e6..6972fd5d423c0 100644 ---- a/arch/powerpc/mm/book3s64/radix_tlb.c -+++ b/arch/powerpc/mm/book3s64/radix_tlb.c -@@ -127,21 +127,6 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric) - trace_tlbie(0, 0, rb, rs, ric, prs, r); - } - --static __always_inline void __tlbie_pid_lpid(unsigned long pid, -- unsigned long lpid, -- unsigned long ric) --{ -- unsigned long rb, rs, prs, r; -- -- rb = PPC_BIT(53); /* IS = 1 */ -- rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); -- prs = 1; /* process scoped */ -- r = 1; /* radix format */ -- -- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) -- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); -- trace_tlbie(0, 0, rb, rs, ric, prs, r); --} - static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric) - { - unsigned long rb,rs,prs,r; -@@ -202,23 +187,6 @@ static __always_inline void __tlbie_va(unsigned long va, unsigned long pid, - trace_tlbie(0, 0, rb, rs, ric, prs, r); - } - --static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid, -- unsigned long lpid, -- unsigned long ap, unsigned long ric) --{ -- unsigned long rb, rs, prs, r; -- -- rb = va & ~(PPC_BITMASK(52, 63)); -- rb |= ap << PPC_BITLSHIFT(58); -- rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); -- prs = 1; /* process scoped */ -- r = 1; /* radix format */ -- -- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) -- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); -- trace_tlbie(0, 0, rb, rs, ric, prs, r); --} -- - static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid, - unsigned long ap, unsigned long ric) - { -@@ -264,22 +232,6 @@ static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid, - } - } - --static inline void fixup_tlbie_va_range_lpid(unsigned long va, -- unsigned long pid, -- unsigned long lpid, -- unsigned long ap) --{ -- if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { -- asm volatile("ptesync" : : : "memory"); -- __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB); -- } -- -- if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { -- asm volatile("ptesync" : : : "memory"); -- __tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB); -- } --} -- - static inline void fixup_tlbie_pid(unsigned long pid) - { - /* -@@ -299,26 +251,6 @@ static inline void fixup_tlbie_pid(unsigned long pid) - } - } - --static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid) --{ -- /* -- * We can use any address for the invalidation, pick one which is -- * probably unused as an optimisation. -- */ -- unsigned long va = ((1UL << 52) - 1); -- -- if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { -- asm volatile("ptesync" : : : "memory"); -- __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB); -- } -- -- if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { -- asm volatile("ptesync" : : : "memory"); -- __tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K), -- RIC_FLUSH_TLB); -- } --} -- - static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid, - unsigned long ap) - { -@@ -416,31 +348,6 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric) - asm volatile("eieio; tlbsync; ptesync": : :"memory"); - } - --static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid, -- unsigned long ric) --{ -- asm volatile("ptesync" : : : "memory"); -- -- /* -- * Workaround the fact that the "ric" argument to __tlbie_pid -- * must be a compile-time contraint to match the "i" constraint -- * in the asm statement. -- */ -- switch (ric) { -- case RIC_FLUSH_TLB: -- __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB); -- fixup_tlbie_pid_lpid(pid, lpid); -- break; -- case RIC_FLUSH_PWC: -- __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); -- break; -- case RIC_FLUSH_ALL: -- default: -- __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL); -- fixup_tlbie_pid_lpid(pid, lpid); -- } -- asm volatile("eieio; tlbsync; ptesync" : : : "memory"); --} - struct tlbiel_pid { - unsigned long pid; - unsigned long ric; -@@ -566,20 +473,6 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end, - fixup_tlbie_va_range(addr - page_size, pid, ap); - } - --static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end, -- unsigned long pid, unsigned long lpid, -- unsigned long page_size, -- unsigned long psize) --{ -- unsigned long addr; -- unsigned long ap = mmu_get_ap(psize); -- -- for (addr = start; addr < end; addr += page_size) -- __tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB); -- -- fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap); --} -- - static __always_inline void _tlbie_va(unsigned long va, unsigned long pid, - unsigned long psize, unsigned long ric) - { -@@ -660,18 +553,6 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end, - asm volatile("eieio; tlbsync; ptesync": : :"memory"); - } - --static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end, -- unsigned long pid, unsigned long lpid, -- unsigned long page_size, -- unsigned long psize, bool also_pwc) --{ -- asm volatile("ptesync" : : : "memory"); -- if (also_pwc) -- __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); -- __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize); -- asm volatile("eieio; tlbsync; ptesync" : : : "memory"); --} -- - static inline void _tlbiel_va_range_multicast(struct mm_struct *mm, - unsigned long start, unsigned long end, - unsigned long pid, unsigned long page_size, -@@ -1171,15 +1052,12 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm, - } - } - } else { -- bool hflush = false; -+ bool hflush; - unsigned long hstart, hend; - -- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { -- hstart = (start + PMD_SIZE - 1) & PMD_MASK; -- hend = end & PMD_MASK; -- if (hstart < hend) -- hflush = true; -- } -+ hstart = (start + PMD_SIZE - 1) & PMD_MASK; -+ hend = end & PMD_MASK; -+ hflush = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hstart < hend; - - if (type == FLUSH_TYPE_LOCAL) { - asm volatile("ptesync": : :"memory"); -@@ -1471,6 +1349,127 @@ void radix__flush_tlb_all(void) - } - - #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -+static __always_inline void __tlbie_pid_lpid(unsigned long pid, -+ unsigned long lpid, -+ unsigned long ric) -+{ -+ unsigned long rb, rs, prs, r; -+ -+ rb = PPC_BIT(53); /* IS = 1 */ -+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); -+ prs = 1; /* process scoped */ -+ r = 1; /* radix format */ -+ -+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) -+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); -+ trace_tlbie(0, 0, rb, rs, ric, prs, r); -+} -+ -+static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid, -+ unsigned long lpid, -+ unsigned long ap, unsigned long ric) -+{ -+ unsigned long rb, rs, prs, r; -+ -+ rb = va & ~(PPC_BITMASK(52, 63)); -+ rb |= ap << PPC_BITLSHIFT(58); -+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31))); -+ prs = 1; /* process scoped */ -+ r = 1; /* radix format */ -+ -+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) -+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); -+ trace_tlbie(0, 0, rb, rs, ric, prs, r); -+} -+ -+static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid) -+{ -+ /* -+ * We can use any address for the invalidation, pick one which is -+ * probably unused as an optimisation. -+ */ -+ unsigned long va = ((1UL << 52) - 1); -+ -+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { -+ asm volatile("ptesync" : : : "memory"); -+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB); -+ } -+ -+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { -+ asm volatile("ptesync" : : : "memory"); -+ __tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K), -+ RIC_FLUSH_TLB); -+ } -+} -+ -+static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid, -+ unsigned long ric) -+{ -+ asm volatile("ptesync" : : : "memory"); -+ -+ /* -+ * Workaround the fact that the "ric" argument to __tlbie_pid -+ * must be a compile-time contraint to match the "i" constraint -+ * in the asm statement. -+ */ -+ switch (ric) { -+ case RIC_FLUSH_TLB: -+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB); -+ fixup_tlbie_pid_lpid(pid, lpid); -+ break; -+ case RIC_FLUSH_PWC: -+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); -+ break; -+ case RIC_FLUSH_ALL: -+ default: -+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL); -+ fixup_tlbie_pid_lpid(pid, lpid); -+ } -+ asm volatile("eieio; tlbsync; ptesync" : : : "memory"); -+} -+ -+static inline void fixup_tlbie_va_range_lpid(unsigned long va, -+ unsigned long pid, -+ unsigned long lpid, -+ unsigned long ap) -+{ -+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { -+ asm volatile("ptesync" : : : "memory"); -+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB); -+ } -+ -+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { -+ asm volatile("ptesync" : : : "memory"); -+ __tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB); -+ } -+} -+ -+static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end, -+ unsigned long pid, unsigned long lpid, -+ unsigned long page_size, -+ unsigned long psize) -+{ -+ unsigned long addr; -+ unsigned long ap = mmu_get_ap(psize); -+ -+ for (addr = start; addr < end; addr += page_size) -+ __tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB); -+ -+ fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap); -+} -+ -+static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end, -+ unsigned long pid, unsigned long lpid, -+ unsigned long page_size, -+ unsigned long psize, bool also_pwc) -+{ -+ asm volatile("ptesync" : : : "memory"); -+ if (also_pwc) -+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); -+ __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize); -+ asm volatile("eieio; tlbsync; ptesync" : : : "memory"); -+} -+ - /* - * Performs process-scoped invalidations for a given LPID - * as part of H_RPT_INVALIDATE hcall. -diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c -index f0037bcc47a0e..a4fd2901189c5 100644 ---- a/arch/powerpc/mm/book3s64/slb.c -+++ b/arch/powerpc/mm/book3s64/slb.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include -diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c -index a8d0ce85d39ad..4a15172dfef29 100644 ---- a/arch/powerpc/mm/fault.c -+++ b/arch/powerpc/mm/fault.c -@@ -568,18 +568,24 @@ NOKPROBE_SYMBOL(hash__do_page_fault); - static void __bad_page_fault(struct pt_regs *regs, int sig) - { - int is_write = page_fault_is_write(regs->dsisr); -+ const char *msg; - - /* kernel has accessed a bad area */ - -+ if (regs->dar < PAGE_SIZE) -+ msg = "Kernel NULL pointer dereference"; -+ else -+ msg = "Unable to handle kernel data access"; -+ - switch (TRAP(regs)) { - case INTERRUPT_DATA_STORAGE: -- case INTERRUPT_DATA_SEGMENT: - case INTERRUPT_H_DATA_STORAGE: -- pr_alert("BUG: %s on %s at 0x%08lx\n", -- regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" : -- "Unable to handle kernel data access", -+ pr_alert("BUG: %s on %s at 0x%08lx\n", msg, - is_write ? "write" : "read", regs->dar); - break; -+ case INTERRUPT_DATA_SEGMENT: -+ pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar); -+ break; - case INTERRUPT_INST_STORAGE: - case INTERRUPT_INST_SEGMENT: - pr_alert("BUG: Unable to handle kernel instruction fetch%s", -diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c -index 386be136026e8..db040f34c0046 100644 ---- a/arch/powerpc/mm/init_64.c -+++ b/arch/powerpc/mm/init_64.c -@@ -188,7 +188,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star - unsigned long nr_pfn = page_size / sizeof(struct page); - unsigned long start_pfn = page_to_pfn((struct page *)start); - -- if ((start_pfn + nr_pfn) > altmap->end_pfn) -+ if ((start_pfn + nr_pfn - 1) > altmap->end_pfn) - return true; - - if (start_pfn < altmap->base_pfn) -@@ -313,8 +313,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, - start = ALIGN_DOWN(start, page_size); - if (altmap) { - alt_start = altmap->base_pfn; -- alt_end = altmap->base_pfn + altmap->reserve + -- altmap->free + altmap->alloc + altmap->align; -+ alt_end = altmap->base_pfn + altmap->reserve + altmap->free; - } - - pr_debug("vmemmap_free %lx...%lx\n", start, end); -diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile -index bb1a5408b86b2..8636b17c6a20f 100644 ---- a/arch/powerpc/mm/kasan/Makefile -+++ b/arch/powerpc/mm/kasan/Makefile -@@ -1,6 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0 - - KASAN_SANITIZE := n -+KCOV_INSTRUMENT := n - - obj-$(CONFIG_PPC32) += kasan_init_32.o - obj-$(CONFIG_PPC_8xx) += 8xx.o -diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c -index 202bd260a0095..450a67ef0bbe1 100644 ---- a/arch/powerpc/mm/kasan/book3s_32.c -+++ b/arch/powerpc/mm/kasan/book3s_32.c -@@ -10,47 +10,51 @@ int __init kasan_init_region(void *start, size_t size) - { - unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); - unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); -- unsigned long k_cur = k_start; -- int k_size = k_end - k_start; -- int k_size_base = 1 << (ffs(k_size) - 1); -+ unsigned long k_nobat = k_start; -+ unsigned long k_cur; -+ phys_addr_t phys; - int ret; -- void *block; - -- block = memblock_alloc(k_size, k_size_base); -- -- if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) { -- int k_size_more = 1 << (ffs(k_size - k_size_base) - 1); -- -- setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL); -- if (k_size_more >= SZ_128K) -- setbat(-1, k_start + k_size_base, __pa(block) + k_size_base, -- k_size_more, PAGE_KERNEL); -- if (v_block_mapped(k_start)) -- k_cur = k_start + k_size_base; -- if (v_block_mapped(k_start + k_size_base)) -- k_cur = k_start + k_size_base + k_size_more; -- -- update_bats(); -+ while (k_nobat < k_end) { -+ unsigned int k_size = bat_block_size(k_nobat, k_end); -+ int idx = find_free_bat(); -+ -+ if (idx == -1) -+ break; -+ if (k_size < SZ_128K) -+ break; -+ phys = memblock_phys_alloc_range(k_size, k_size, 0, -+ MEMBLOCK_ALLOC_ANYWHERE); -+ if (!phys) -+ break; -+ -+ setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL); -+ k_nobat += k_size; - } -+ if (k_nobat != k_start) -+ update_bats(); - -- if (!block) -- block = memblock_alloc(k_size, PAGE_SIZE); -- if (!block) -- return -ENOMEM; -+ if (k_nobat < k_end) { -+ phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0, -+ MEMBLOCK_ALLOC_ANYWHERE); -+ if (!phys) -+ return -ENOMEM; -+ } - - ret = kasan_init_shadow_page_tables(k_start, k_end); - if (ret) - return ret; - -- kasan_update_early_region(k_start, k_cur, __pte(0)); -+ kasan_update_early_region(k_start, k_nobat, __pte(0)); - -- for (; k_cur < k_end; k_cur += PAGE_SIZE) { -+ for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_off_k(k_cur); -- void *va = block + k_cur - k_start; -- pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); -+ pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL); - - __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); - } - flush_tlb_kernel_range(k_start, k_end); -+ memset(kasan_mem_to_shadow(start), 0, k_end - k_start); -+ - return 0; - } -diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c -index cf8770b1a692e..f3e4d069e0ba7 100644 ---- a/arch/powerpc/mm/kasan/kasan_init_32.c -+++ b/arch/powerpc/mm/kasan/kasan_init_32.c -@@ -83,13 +83,12 @@ void __init - kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte) - { - unsigned long k_cur; -- phys_addr_t pa = __pa(kasan_early_shadow_page); - - for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_off_k(k_cur); - pte_t *ptep = pte_offset_kernel(pmd, k_cur); - -- if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) -+ if (pte_page(*ptep) != virt_to_page(lm_alias(kasan_early_shadow_page))) - continue; - - __set_pte_at(&init_mm, k_cur, ptep, pte, 0); -diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c -index c3c4e31462eca..6902f453c7451 100644 ---- a/arch/powerpc/mm/mem.c -+++ b/arch/powerpc/mm/mem.c -@@ -20,8 +20,9 @@ - #include - #include - #include --#include - #include -+#include -+#include - - #include - -@@ -103,6 +104,37 @@ void __ref arch_remove_linear_mapping(u64 start, u64 size) - vm_unmap_aliases(); - } - -+/* -+ * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need -+ * updating. -+ */ -+static void update_end_of_memory_vars(u64 start, u64 size) -+{ -+ unsigned long end_pfn = PFN_UP(start + size); -+ -+ if (end_pfn > max_pfn) { -+ max_pfn = end_pfn; -+ max_low_pfn = end_pfn; -+ high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; -+ } -+} -+ -+int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, -+ struct mhp_params *params) -+{ -+ int ret; -+ -+ ret = __add_pages(nid, start_pfn, nr_pages, params); -+ if (ret) -+ return ret; -+ -+ /* update max_pfn, max_low_pfn and high_memory */ -+ update_end_of_memory_vars(start_pfn << PAGE_SHIFT, -+ nr_pages << PAGE_SHIFT); -+ -+ return ret; -+} -+ - int __ref arch_add_memory(int nid, u64 start, u64 size, - struct mhp_params *params) - { -@@ -113,7 +145,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, - rc = arch_create_linear_mapping(nid, start, size, params); - if (rc) - return rc; -- rc = __add_pages(nid, start_pfn, nr_pages, params); -+ rc = add_pages(nid, start_pfn, nr_pages, params); - if (rc) - arch_remove_linear_mapping(start, size); - return rc; -@@ -314,6 +346,7 @@ void free_initmem(void) - mark_initmem_nx(); - init_mem_is_free = true; - free_initmem_default(POISON_FREE_INITMEM); -+ ftrace_free_init_tramp(); - } - - /* -diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c -index 74246536b8326..aca34d37b5197 100644 ---- a/arch/powerpc/mm/mmu_context.c -+++ b/arch/powerpc/mm/mmu_context.c -@@ -81,7 +81,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, - * context - */ - if (cpu_has_feature(CPU_FTR_ALTIVEC)) -- asm volatile ("dssall"); -+ asm volatile (PPC_DSSALL); - - if (!new_on_cpu) - membarrier_arch_switch_mm(prev, next, tsk); -diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c -index 0df9fe29dd567..5348e1f9eb940 100644 ---- a/arch/powerpc/mm/nohash/8xx.c -+++ b/arch/powerpc/mm/nohash/8xx.c -@@ -183,8 +183,8 @@ void mmu_mark_initmem_nx(void) - unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; - unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); - -- mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false); -- mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); -+ if (!debug_pagealloc_enabled_or_kfence()) -+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); - - mmu_pin_tlb(block_mapped_ram, false); - } -diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c -index 77884e24281dd..3d845e001c874 100644 ---- a/arch/powerpc/mm/nohash/book3e_pgtable.c -+++ b/arch/powerpc/mm/nohash/book3e_pgtable.c -@@ -95,8 +95,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) - pgdp = pgd_offset_k(ea); - p4dp = p4d_offset(pgdp, ea); - if (p4d_none(*p4dp)) { -- pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); -- p4d_populate(&init_mm, p4dp, pmdp); -+ pudp = early_alloc_pgtable(PUD_TABLE_SIZE); -+ p4d_populate(&init_mm, p4dp, pudp); - } - pudp = pud_offset(p4dp, ea); - if (pud_none(*pudp)) { -@@ -105,7 +105,7 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) - } - pmdp = pmd_offset(pudp, ea); - if (!pmd_present(*pmdp)) { -- ptep = early_alloc_pgtable(PAGE_SIZE); -+ ptep = early_alloc_pgtable(PTE_TABLE_SIZE); - pmd_populate_kernel(&init_mm, pmdp, ptep); - } - ptep = pte_offset_kernel(pmdp, ea); -diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c -index 4c74e8a5482bf..c555ad9fa00b1 100644 ---- a/arch/powerpc/mm/nohash/kaslr_booke.c -+++ b/arch/powerpc/mm/nohash/kaslr_booke.c -@@ -18,7 +18,6 @@ - #include - #include - #include --#include - #include - - struct regions { -@@ -36,10 +35,6 @@ struct regions { - int reserved_mem_size_cells; - }; - --/* Simplified build-specific string for starting entropy. */ --static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" -- LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; -- - struct regions __initdata regions; - - static __init void kaslr_get_cmdline(void *fdt) -@@ -72,7 +67,8 @@ static unsigned long __init get_boot_seed(void *fdt) - { - unsigned long hash = 0; - -- hash = rotate_xor(hash, build_str, sizeof(build_str)); -+ /* build-specific string for starting entropy. */ -+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); - hash = rotate_xor(hash, fdt, fdt_totalsize(fdt)); - - return hash; -diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S -index bf24451f3e71f..9235e720e3572 100644 ---- a/arch/powerpc/mm/nohash/tlb_low_64e.S -+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S -@@ -222,7 +222,7 @@ tlb_miss_kernel_bolted: - - tlb_miss_fault_bolted: - /* We need to check if it was an instruction miss */ -- andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX -+ andi. r10,r11,_PAGE_BAP_UX|_PAGE_BAP_SX - bne itlb_miss_fault_bolted - dtlb_miss_fault_bolted: - tlb_epilog_bolted -@@ -239,7 +239,7 @@ itlb_miss_fault_bolted: - srdi r15,r16,60 /* get region */ - bne- itlb_miss_fault_bolted - -- li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */ -+ li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */ - - /* We do the user/kernel test for the PID here along with the RW test - */ -@@ -614,7 +614,7 @@ itlb_miss_fault_e6500: - - /* We do the user/kernel test for the PID here along with the RW test - */ -- li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */ -+ li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */ - oris r11,r11,_PAGE_ACCESSED@h - - cmpldi cr0,r15,0 /* Check for user region */ -@@ -734,7 +734,7 @@ normal_tlb_miss_done: - - normal_tlb_miss_access_fault: - /* We need to check if it was an instruction miss */ -- andi. r10,r11,_PAGE_EXEC -+ andi. r10,r11,_PAGE_BAP_UX - bne 1f - ld r14,EX_TLB_DEAR(r12) - ld r15,EX_TLB_ESR(r12) -diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c -index 6f14c8fb6359d..9c038c8cebebc 100644 ---- a/arch/powerpc/mm/numa.c -+++ b/arch/powerpc/mm/numa.c -@@ -367,6 +367,7 @@ void update_numa_distance(struct device_node *node) - WARN(numa_distance_table[nid][nid] == -1, - "NUMA distance details for node %d not provided\n", nid); - } -+EXPORT_SYMBOL_GPL(update_numa_distance); - - /* - * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN} -@@ -376,9 +377,9 @@ static void initialize_form2_numa_distance_lookup_table(void) - { - int i, j; - struct device_node *root; -- const __u8 *numa_dist_table; -+ const __u8 *form2_distances; - const __be32 *numa_lookup_index; -- int numa_dist_table_length; -+ int form2_distances_length; - int max_numa_index, distance_index; - - if (firmware_has_feature(FW_FEATURE_OPAL)) -@@ -392,45 +393,41 @@ static void initialize_form2_numa_distance_lookup_table(void) - max_numa_index = of_read_number(&numa_lookup_index[0], 1); - - /* first element of the array is the size and is encode-int */ -- numa_dist_table = of_get_property(root, "ibm,numa-distance-table", NULL); -- numa_dist_table_length = of_read_number((const __be32 *)&numa_dist_table[0], 1); -+ form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL); -+ form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1); - /* Skip the size which is encoded int */ -- numa_dist_table += sizeof(__be32); -+ form2_distances += sizeof(__be32); - -- pr_debug("numa_dist_table_len = %d, numa_dist_indexes_len = %d\n", -- numa_dist_table_length, max_numa_index); -+ pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n", -+ form2_distances_length, max_numa_index); - - for (i = 0; i < max_numa_index; i++) - /* +1 skip the max_numa_index in the property */ - numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1); - - -- if (numa_dist_table_length != max_numa_index * max_numa_index) { -+ if (form2_distances_length != max_numa_index * max_numa_index) { - WARN(1, "Wrong NUMA distance information\n"); -- /* consider everybody else just remote. */ -- for (i = 0; i < max_numa_index; i++) { -- for (j = 0; j < max_numa_index; j++) { -- int nodeA = numa_id_index_table[i]; -- int nodeB = numa_id_index_table[j]; -- -- if (nodeA == nodeB) -- numa_distance_table[nodeA][nodeB] = LOCAL_DISTANCE; -- else -- numa_distance_table[nodeA][nodeB] = REMOTE_DISTANCE; -- } -- } -+ form2_distances = NULL; // don't use it - } -- - distance_index = 0; - for (i = 0; i < max_numa_index; i++) { - for (j = 0; j < max_numa_index; j++) { - int nodeA = numa_id_index_table[i]; - int nodeB = numa_id_index_table[j]; -- -- numa_distance_table[nodeA][nodeB] = numa_dist_table[distance_index++]; -- pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, numa_distance_table[nodeA][nodeB]); -+ int dist; -+ -+ if (form2_distances) -+ dist = form2_distances[distance_index++]; -+ else if (nodeA == nodeB) -+ dist = LOCAL_DISTANCE; -+ else -+ dist = REMOTE_DISTANCE; -+ numa_distance_table[nodeA][nodeB] = dist; -+ pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist); - } - } -+ - of_node_put(root); - } - -@@ -960,7 +957,9 @@ static int __init parse_numa_properties(void) - of_node_put(cpu); - } - -- node_set_online(nid); -+ /* node_set_online() is an UB if 'nid' is negative */ -+ if (likely(nid >= 0)) -+ node_set_online(nid); - } - - get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); -diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c -index edea388e9d3fb..85753e32a4de9 100644 ---- a/arch/powerpc/mm/pageattr.c -+++ b/arch/powerpc/mm/pageattr.c -@@ -15,12 +15,14 @@ - #include - - -+static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr, -+ unsigned long old, unsigned long new) -+{ -+ return pte_update(&init_mm, addr, ptep, old & ~new, new & ~old, 0); -+} -+ - /* -- * Updates the attributes of a page in three steps: -- * -- * 1. take the page_table_lock -- * 2. install the new entry with the updated attributes -- * 3. flush the TLB -+ * Updates the attributes of a page atomically. - * - * This sequence is safe against concurrent updates, and also allows updating the - * attributes of a page currently being executed or accessed. -@@ -28,41 +30,39 @@ - static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) - { - long action = (long)data; -- pte_t pte; - -- spin_lock(&init_mm.page_table_lock); -- -- pte = ptep_get(ptep); -- -- /* modify the PTE bits as desired, then apply */ -+ /* modify the PTE bits as desired */ - switch (action) { - case SET_MEMORY_RO: -- pte = pte_wrprotect(pte); -+ /* Don't clear DIRTY bit */ -+ pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO); - break; - case SET_MEMORY_RW: -- pte = pte_mkwrite(pte_mkdirty(pte)); -+ pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW); - break; - case SET_MEMORY_NX: -- pte = pte_exprotect(pte); -+ pte_update_delta(ptep, addr, _PAGE_KERNEL_ROX, _PAGE_KERNEL_RO); - break; - case SET_MEMORY_X: -- pte = pte_mkexec(pte); -+ pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_ROX); -+ break; -+ case SET_MEMORY_NP: -+ pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0); -+ break; -+ case SET_MEMORY_P: -+ pte_update(&init_mm, addr, ptep, 0, _PAGE_PRESENT, 0); - break; - default: - WARN_ON_ONCE(1); - break; - } - -- pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0); -- - /* See ptesync comment in radix__set_pte_at() */ - if (radix_enabled()) - asm volatile("ptesync": : :"memory"); - - flush_tlb_kernel_range(addr, addr + PAGE_SIZE); - -- spin_unlock(&init_mm.page_table_lock); -- - return 0; - } - -@@ -96,36 +96,3 @@ int change_memory_attr(unsigned long addr, int numpages, long action) - return apply_to_existing_page_range(&init_mm, start, size, - change_page_attr, (void *)action); - } -- --/* -- * Set the attributes of a page: -- * -- * This function is used by PPC32 at the end of init to set final kernel memory -- * protection. It includes changing the maping of the page it is executing from -- * and data pages it is using. -- */ --static int set_page_attr(pte_t *ptep, unsigned long addr, void *data) --{ -- pgprot_t prot = __pgprot((unsigned long)data); -- -- spin_lock(&init_mm.page_table_lock); -- -- set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot)); -- flush_tlb_kernel_range(addr, addr + PAGE_SIZE); -- -- spin_unlock(&init_mm.page_table_lock); -- -- return 0; --} -- --int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot) --{ -- unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE); -- unsigned long sz = numpages * PAGE_SIZE; -- -- if (numpages <= 0) -- return 0; -- -- return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr, -- (void *)pgprot_val(prot)); --} -diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c -index cd16b407f47e1..9a93c1a5aa1d1 100644 ---- a/arch/powerpc/mm/pgtable.c -+++ b/arch/powerpc/mm/pgtable.c -@@ -203,6 +203,15 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - __set_pte_at(mm, addr, ptep, pte, 0); - } - -+void unmap_kernel_page(unsigned long va) -+{ -+ pmd_t *pmdp = pmd_off_k(va); -+ pte_t *ptep = pte_offset_kernel(pmdp, va); -+ -+ pte_clear(&init_mm, va, ptep); -+ flush_tlb_kernel_range(va, va + PAGE_SIZE); -+} -+ - /* - * This is called when relaxing access to a PTE. It's also called in the page - * fault path when we don't hit any of the major fault cases, ie, a minor -diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c -index dcf5ecca19d99..502e3d3d1dbf7 100644 ---- a/arch/powerpc/mm/pgtable_32.c -+++ b/arch/powerpc/mm/pgtable_32.c -@@ -138,10 +138,12 @@ void mark_initmem_nx(void) - unsigned long numpages = PFN_UP((unsigned long)_einittext) - - PFN_DOWN((unsigned long)_sinittext); - -- if (v_block_mapped((unsigned long)_sinittext)) -- mmu_mark_initmem_nx(); -- else -- set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL); -+ mmu_mark_initmem_nx(); -+ -+ if (!v_block_mapped((unsigned long)_sinittext)) { -+ set_memory_nx((unsigned long)_sinittext, numpages); -+ set_memory_rw((unsigned long)_sinittext, numpages); -+ } - } - - #ifdef CONFIG_STRICT_KERNEL_RWX -@@ -155,25 +157,21 @@ void mark_rodata_ro(void) - return; - } - -- numpages = PFN_UP((unsigned long)_etext) - -- PFN_DOWN((unsigned long)_stext); -- -- set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX); - /* -- * mark .rodata as read only. Use __init_begin rather than __end_rodata -- * to cover NOTES and EXCEPTION_TABLE. -+ * mark .text and .rodata as read only. Use __init_begin rather than -+ * __end_rodata to cover NOTES and EXCEPTION_TABLE. - */ - numpages = PFN_UP((unsigned long)__init_begin) - -- PFN_DOWN((unsigned long)__start_rodata); -+ PFN_DOWN((unsigned long)_stext); - -- set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO); -+ set_memory_ro((unsigned long)_stext, numpages); - - // mark_initmem_nx() should have already run by now - ptdump_check_wx(); - } - #endif - --#ifdef CONFIG_DEBUG_PAGEALLOC -+#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC) - void __kernel_map_pages(struct page *page, int numpages, int enable) - { - unsigned long addr = (unsigned long)page_address(page); -@@ -182,8 +180,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) - return; - - if (enable) -- set_memory_attr(addr, numpages, PAGE_KERNEL); -+ set_memory_p(addr, numpages); - else -- set_memory_attr(addr, numpages, __pgprot(0)); -+ set_memory_np(addr, numpages); - } - #endif /* CONFIG_DEBUG_PAGEALLOC */ -diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c -index 78c8cf01db5f9..175aabf101e87 100644 ---- a/arch/powerpc/mm/pgtable_64.c -+++ b/arch/powerpc/mm/pgtable_64.c -@@ -102,7 +102,8 @@ EXPORT_SYMBOL(__pte_frag_size_shift); - struct page *p4d_page(p4d_t p4d) - { - if (p4d_is_leaf(p4d)) { -- VM_WARN_ON(!p4d_huge(p4d)); -+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) -+ VM_WARN_ON(!p4d_huge(p4d)); - return pte_page(p4d_pte(p4d)); - } - return virt_to_page(p4d_pgtable(p4d)); -@@ -112,7 +113,8 @@ struct page *p4d_page(p4d_t p4d) - struct page *pud_page(pud_t pud) - { - if (pud_is_leaf(pud)) { -- VM_WARN_ON(!pud_huge(pud)); -+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) -+ VM_WARN_ON(!pud_huge(pud)); - return pte_page(pud_pte(pud)); - } - return virt_to_page(pud_pgtable(pud)); -@@ -125,7 +127,13 @@ struct page *pud_page(pud_t pud) - struct page *pmd_page(pmd_t pmd) - { - if (pmd_is_leaf(pmd)) { -- VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); -+ /* -+ * vmalloc_to_page may be called on any vmap address (not only -+ * vmalloc), and it uses pmd_page() etc., when huge vmap is -+ * enabled so these checks can't be used. -+ */ -+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) -+ VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); - return pte_page(pmd_pte(pmd)); - } - return virt_to_page(pmd_page_vaddr(pmd)); -diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c -index bf251191e78d9..32bfb215c4858 100644 ---- a/arch/powerpc/mm/ptdump/ptdump.c -+++ b/arch/powerpc/mm/ptdump/ptdump.c -@@ -183,7 +183,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr) - { - pte_t pte = __pte(st->current_flags); - -- if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx) -+ if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx) - return; - - if (!pte_write(pte) || !pte_exec(pte)) -diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c -index 03607ab90c66f..f884760ca5cfe 100644 ---- a/arch/powerpc/mm/ptdump/shared.c -+++ b/arch/powerpc/mm/ptdump/shared.c -@@ -17,9 +17,9 @@ static const struct flag_info flag_array[] = { - .clear = " ", - }, { - .mask = _PAGE_RW, -- .val = _PAGE_RW, -- .set = "rw", -- .clear = "r ", -+ .val = 0, -+ .set = "r ", -+ .clear = "rw", - }, { - .mask = _PAGE_EXEC, - .val = _PAGE_EXEC, -diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c -index fcbf7a917c566..8acf8a611a265 100644 ---- a/arch/powerpc/net/bpf_jit_comp.c -+++ b/arch/powerpc/net/bpf_jit_comp.c -@@ -23,15 +23,15 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size) - memset32(area, BREAKPOINT_INSTRUCTION, size / 4); - } - --/* Fix the branch target addresses for subprog calls */ --static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image, -- struct codegen_context *ctx, u32 *addrs) -+/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */ -+static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image, -+ struct codegen_context *ctx, u32 *addrs) - { - const struct bpf_insn *insn = fp->insnsi; - bool func_addr_fixed; - u64 func_addr; - u32 tmp_idx; -- int i, ret; -+ int i, j, ret; - - for (i = 0; i < fp->len; i++) { - /* -@@ -66,6 +66,23 @@ static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image, - * of the JITed sequence remains unchanged. - */ - ctx->idx = tmp_idx; -+ } else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) { -+ tmp_idx = ctx->idx; -+ ctx->idx = addrs[i] / 4; -+#ifdef CONFIG_PPC32 -+ PPC_LI32(ctx->b2p[insn[i].dst_reg] - 1, (u32)insn[i + 1].imm); -+ PPC_LI32(ctx->b2p[insn[i].dst_reg], (u32)insn[i].imm); -+ for (j = ctx->idx - addrs[i] / 4; j < 4; j++) -+ EMIT(PPC_RAW_NOP()); -+#else -+ func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32); -+ PPC_LI64(b2p[insn[i].dst_reg], func_addr); -+ /* overwrite rest with nops */ -+ for (j = ctx->idx - addrs[i] / 4; j < 5; j++) -+ EMIT(PPC_RAW_NOP()); -+#endif -+ ctx->idx = tmp_idx; -+ i++; - } - } - -@@ -193,13 +210,13 @@ skip_init_ctx: - /* - * Do not touch the prologue and epilogue as they will remain - * unchanged. Only fix the branch target address for subprog -- * calls in the body. -+ * calls in the body, and ldimm64 instructions. - * - * This does not change the offsets and lengths of the subprog - * call instruction sequences and hence, the size of the JITed - * image as well. - */ -- bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs); -+ bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs); - - /* There is no need to perform the usual passes. */ - goto skip_codegen_passes; -@@ -241,8 +258,8 @@ skip_codegen_passes: - fp->jited_len = alloclen; - - bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); -- bpf_jit_binary_lock_ro(bpf_hdr); - if (!fp->is_func || extra_pass) { -+ bpf_jit_binary_lock_ro(bpf_hdr); - bpf_prog_fill_jited_linfo(fp, addrs); - out_addrs: - kfree(addrs); -diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c -index 0da31d41d4131..bce5eda85170f 100644 ---- a/arch/powerpc/net/bpf_jit_comp32.c -+++ b/arch/powerpc/net/bpf_jit_comp32.c -@@ -191,6 +191,9 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun - - if (image && rel < 0x2000000 && rel >= -0x2000000) { - PPC_BL_ABS(func); -+ EMIT(PPC_RAW_NOP()); -+ EMIT(PPC_RAW_NOP()); -+ EMIT(PPC_RAW_NOP()); - } else { - /* Load function address into r0 */ - EMIT(PPC_RAW_LIS(_R0, IMM_H(func))); -@@ -289,6 +292,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * - bool func_addr_fixed; - u64 func_addr; - u32 true_cond; -+ u32 tmp_idx; -+ int j; - - /* - * addrs[] maps a BPF bytecode address into a real offset from -@@ -836,8 +841,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * - * 16 byte instruction that uses two 'struct bpf_insn' - */ - case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ -+ tmp_idx = ctx->idx; - PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm); - PPC_LI32(dst_reg, (u32)insn[i].imm); -+ /* padding to allow full 4 instructions for later patching */ -+ for (j = ctx->idx - tmp_idx; j < 4; j++) -+ EMIT(PPC_RAW_NOP()); - /* Adjust for two bpf instructions */ - addrs[++i] = ctx->idx * 4; - break; -diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c -index 8b5157ccfebae..57e1b6680365c 100644 ---- a/arch/powerpc/net/bpf_jit_comp64.c -+++ b/arch/powerpc/net/bpf_jit_comp64.c -@@ -318,6 +318,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * - u64 imm64; - u32 true_cond; - u32 tmp_idx; -+ int j; - - /* - * addrs[] maps a BPF bytecode address into a real offset from -@@ -632,17 +633,21 @@ bpf_alu32_trunc: - EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1])); - break; - case 64: -- /* -- * Way easier and faster(?) to store the value -- * into stack and then use ldbrx -- * -- * ctx->seen will be reliable in pass2, but -- * the instructions generated will remain the -- * same across all passes -- */ -+ /* Store the value to stack and then use byte-reverse loads */ - PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); - EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx))); -- EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); -+ if (cpu_has_feature(CPU_FTR_ARCH_206)) { -+ EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); -+ } else { -+ EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1])); -+ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) -+ EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32)); -+ EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4)); -+ EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1])); -+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) -+ EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32)); -+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2])); -+ } - break; - } - break; -@@ -806,9 +811,13 @@ emit_clear: - case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */ - imm64 = ((u64)(u32) insn[i].imm) | - (((u64)(u32) insn[i+1].imm) << 32); -+ tmp_idx = ctx->idx; -+ PPC_LI64(dst_reg, imm64); -+ /* padding to allow full 5 instructions for later patching */ -+ for (j = ctx->idx - tmp_idx; j < 5; j++) -+ EMIT(PPC_RAW_NOP()); - /* Adjust for two bpf instructions */ - addrs[++i] = ctx->idx * 4; -- PPC_LI64(dst_reg, imm64); - break; - - /* -diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile -index 2f46e31c76129..4f53d0b97539b 100644 ---- a/arch/powerpc/perf/Makefile -+++ b/arch/powerpc/perf/Makefile -@@ -3,11 +3,11 @@ - obj-y += callchain.o callchain_$(BITS).o perf_regs.o - obj-$(CONFIG_COMPAT) += callchain_32.o - --obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o -+obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o - obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \ - power5+-pmu.o power6-pmu.o power7-pmu.o \ - isa207-common.o power8-pmu.o power9-pmu.o \ -- generic-compat-pmu.o power10-pmu.o -+ generic-compat-pmu.o power10-pmu.o bhrb.o - obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o - - obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o -diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c -index 082f6d0308a47..8718289c051dd 100644 ---- a/arch/powerpc/perf/callchain.c -+++ b/arch/powerpc/perf/callchain.c -@@ -61,6 +61,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re - next_sp = fp[0]; - - if (next_sp == sp + STACK_INT_FRAME_SIZE && -+ validate_sp(sp, current, STACK_INT_FRAME_SIZE) && - fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { - /* - * This looks like an interrupt frame for an -diff --git a/arch/powerpc/perf/callchain.h b/arch/powerpc/perf/callchain.h -index d6fa6e25234f4..19a8d051ddf10 100644 ---- a/arch/powerpc/perf/callchain.h -+++ b/arch/powerpc/perf/callchain.h -@@ -2,7 +2,6 @@ - #ifndef _POWERPC_PERF_CALLCHAIN_H - #define _POWERPC_PERF_CALLCHAIN_H - --int read_user_stack_slow(const void __user *ptr, void *buf, int nb); - void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs); - void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, -@@ -26,17 +25,11 @@ static inline int __read_user_stack(const void __user *ptr, void *ret, - size_t size) - { - unsigned long addr = (unsigned long)ptr; -- int rc; - - if (addr > TASK_SIZE - size || (addr & (size - 1))) - return -EFAULT; - -- rc = copy_from_user_nofault(ret, ptr, size); -- -- if (IS_ENABLED(CONFIG_PPC64) && !radix_enabled() && rc) -- return read_user_stack_slow(ptr, ret, size); -- -- return rc; -+ return copy_from_user_nofault(ret, ptr, size); - } - - #endif /* _POWERPC_PERF_CALLCHAIN_H */ -diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c -index 8d0df4226328d..488e8a21a11ea 100644 ---- a/arch/powerpc/perf/callchain_64.c -+++ b/arch/powerpc/perf/callchain_64.c -@@ -18,33 +18,6 @@ - - #include "callchain.h" - --/* -- * On 64-bit we don't want to invoke hash_page on user addresses from -- * interrupt context, so if the access faults, we read the page tables -- * to find which page (if any) is mapped and access it directly. Radix -- * has no need for this so it doesn't use read_user_stack_slow. -- */ --int read_user_stack_slow(const void __user *ptr, void *buf, int nb) --{ -- -- unsigned long addr = (unsigned long) ptr; -- unsigned long offset; -- struct page *page; -- void *kaddr; -- -- if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) { -- kaddr = page_address(page); -- -- /* align address to page boundary */ -- offset = addr & ~PAGE_MASK; -- -- memcpy(buf, kaddr + offset, nb); -- put_page(page); -- return 0; -- } -- return -EFAULT; --} -- - static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret) - { - return __read_user_stack(ptr, ret, sizeof(*ret)); -diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c -index 73e62e9b179bc..1078784b74c9b 100644 ---- a/arch/powerpc/perf/core-book3s.c -+++ b/arch/powerpc/perf/core-book3s.c -@@ -857,6 +857,19 @@ static void write_pmc(int idx, unsigned long val) - } - } - -+static int any_pmc_overflown(struct cpu_hw_events *cpuhw) -+{ -+ int i, idx; -+ -+ for (i = 0; i < cpuhw->n_events; i++) { -+ idx = cpuhw->event[i]->hw.idx; -+ if ((idx) && ((int)read_pmc(idx) < 0)) -+ return idx; -+ } -+ -+ return 0; -+} -+ - /* Called from sysrq_handle_showregs() */ - void perf_event_print_debug(void) - { -@@ -1281,11 +1294,13 @@ static void power_pmu_disable(struct pmu *pmu) - - /* - * Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56 -+ * Also clear PMXE to disable PMI's getting triggered in some -+ * corner cases during PMU disable. - */ - val = mmcr0 = mfspr(SPRN_MMCR0); - val |= MMCR0_FC; - val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO | -- MMCR0_FC56); -+ MMCR0_PMXE | MMCR0_FC56); - /* Set mmcr0 PMCCEXT for p10 */ - if (ppmu->flags & PPMU_ARCH_31) - val |= MMCR0_PMCCEXT; -@@ -1299,6 +1314,29 @@ static void power_pmu_disable(struct pmu *pmu) - mb(); - isync(); - -+ /* -+ * Some corner cases could clear the PMU counter overflow -+ * while a masked PMI is pending. One such case is when -+ * a PMI happens during interrupt replay and perf counter -+ * values are cleared by PMU callbacks before replay. -+ * -+ * Disable the interrupt by clearing the paca bit for PMI -+ * since we are disabling the PMU now. Otherwise provide a -+ * warning if there is PMI pending, but no counter is found -+ * overflown. -+ * -+ * Since power_pmu_disable runs under local_irq_save, it -+ * could happen that code hits a PMC overflow without PMI -+ * pending in paca. Hence only clear PMI pending if it was -+ * set. -+ * -+ * If a PMI is pending, then MSR[EE] must be disabled (because -+ * the masked PMI handler disabling EE). So it is safe to -+ * call clear_pmi_irq_pending(). -+ */ -+ if (pmi_irq_pending()) -+ clear_pmi_irq_pending(); -+ - val = mmcra = cpuhw->mmcr.mmcra; - - /* -@@ -1390,6 +1428,15 @@ static void power_pmu_enable(struct pmu *pmu) - * (possibly updated for removal of events). - */ - if (!cpuhw->n_added) { -+ /* -+ * If there is any active event with an overflown PMC -+ * value, set back PACA_IRQ_PMI which would have been -+ * cleared in power_pmu_disable(). -+ */ -+ hard_irq_disable(); -+ if (any_pmc_overflown(cpuhw)) -+ set_pmi_irq_pending(); -+ - mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); - mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); - if (ppmu->flags & PPMU_ARCH_31) -@@ -2337,6 +2384,14 @@ static void __perf_event_interrupt(struct pt_regs *regs) - break; - } - } -+ -+ /* -+ * Clear PACA_IRQ_PMI in case it was set by -+ * set_pmi_irq_pending() when PMU was enabled -+ * after accounting for interrupts. -+ */ -+ clear_pmi_irq_pending(); -+ - if (!active) - /* reset non active counters that have overflowed */ - write_pmc(i + 1, 0); -@@ -2356,6 +2411,13 @@ static void __perf_event_interrupt(struct pt_regs *regs) - } - } - } -+ -+ /* -+ * During system wide profling or while specific CPU is monitored for an -+ * event, some corner cases could cause PMC to overflow in idle path. This -+ * will trigger a PMI after waking up from idle. Since counter values are _not_ -+ * saved/restored in idle path, can lead to below "Can't find PMC" message. -+ */ - if (unlikely(!found) && !arch_irq_disabled_regs(regs)) - printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n"); - -diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c -index ee721f420a7ba..1a53ab08447cb 100644 ---- a/arch/powerpc/perf/core-fsl-emb.c -+++ b/arch/powerpc/perf/core-fsl-emb.c -@@ -645,7 +645,6 @@ static void perf_event_interrupt(struct pt_regs *regs) - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); - struct perf_event *event; - unsigned long val; -- int found = 0; - - for (i = 0; i < ppmu->n_counter; ++i) { - event = cpuhw->event[i]; -@@ -654,7 +653,6 @@ static void perf_event_interrupt(struct pt_regs *regs) - if ((int)val < 0) { - if (event) { - /* event has overflowed */ -- found = 1; - record_and_restart(event, val, regs); - } else { - /* -@@ -672,11 +670,13 @@ static void perf_event_interrupt(struct pt_regs *regs) - isync(); - } - --void hw_perf_event_setup(int cpu) -+static int fsl_emb_pmu_prepare_cpu(unsigned int cpu) - { - struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); - - memset(cpuhw, 0, sizeof(*cpuhw)); -+ -+ return 0; - } - - int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) -@@ -689,6 +689,8 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) - pmu->name); - - perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW); -+ cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare", -+ fsl_emb_pmu_prepare_cpu, NULL); - - return 0; - } -diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h -index 8965b4463d433..5e86371a20c78 100644 ---- a/arch/powerpc/perf/hv-gpci-requests.h -+++ b/arch/powerpc/perf/hv-gpci-requests.h -@@ -79,6 +79,7 @@ REQUEST(__field(0, 8, partition_id) - ) - #include I(REQUEST_END) - -+#ifdef ENABLE_EVENTS_COUNTERINFO_V6 - /* - * Not available for counter_info_version >= 0x8, use - * run_instruction_cycles_by_partition(0x100) instead. -@@ -92,6 +93,7 @@ REQUEST(__field(0, 8, partition_id) - __count(0x10, 8, cycles) - ) - #include I(REQUEST_END) -+#endif - - #define REQUEST_NAME system_performance_capabilities - #define REQUEST_NUM 0x40 -@@ -103,6 +105,7 @@ REQUEST(__field(0, 1, perf_collect_privileged) - ) - #include I(REQUEST_END) - -+#ifdef ENABLE_EVENTS_COUNTERINFO_V6 - #define REQUEST_NAME processor_bus_utilization_abc_links - #define REQUEST_NUM 0x50 - #define REQUEST_IDX_KIND "hw_chip_id=?" -@@ -194,6 +197,7 @@ REQUEST(__field(0, 4, phys_processor_idx) - __count(0x28, 8, instructions_completed) - ) - #include I(REQUEST_END) -+#endif - - /* Processor_core_power_mode (0x95) skipped, no counters */ - /* Affinity_domain_information_by_virtual_processor (0xA0) skipped, -diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c -index c756228a081fb..28b770bbc10b4 100644 ---- a/arch/powerpc/perf/hv-gpci.c -+++ b/arch/powerpc/perf/hv-gpci.c -@@ -72,7 +72,7 @@ static struct attribute_group format_group = { - - static struct attribute_group event_group = { - .name = "events", -- .attrs = hv_gpci_event_attrs, -+ /* .attrs is set in init */ - }; - - #define HV_CAPS_ATTR(_name, _format) \ -@@ -330,6 +330,7 @@ static int hv_gpci_init(void) - int r; - unsigned long hret; - struct hv_perf_caps caps; -+ struct hv_gpci_request_buffer *arg; - - hv_gpci_assert_offsets_correct(); - -@@ -353,6 +354,36 @@ static int hv_gpci_init(void) - /* sampling not supported */ - h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - -+ arg = (void *)get_cpu_var(hv_gpci_reqb); -+ memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); -+ -+ /* -+ * hcall H_GET_PERF_COUNTER_INFO populates the output -+ * counter_info_version value based on the system hypervisor. -+ * Pass the counter request 0x10 corresponds to request type -+ * 'Dispatch_timebase_by_processor', to get the supported -+ * counter_info_version. -+ */ -+ arg->params.counter_request = cpu_to_be32(0x10); -+ -+ r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, -+ virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); -+ if (r) { -+ pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r); -+ arg->params.counter_info_version_out = 0x8; -+ } -+ -+ /* -+ * Use counter_info_version_out value to assign -+ * required hv-gpci event list. -+ */ -+ if (arg->params.counter_info_version_out >= 0x8) -+ event_group.attrs = hv_gpci_event_attrs; -+ else -+ event_group.attrs = hv_gpci_event_attrs_v6; -+ -+ put_cpu_var(hv_gpci_reqb); -+ - r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); - if (r) - return r; -diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h -index 4d108262bed79..c72020912dea5 100644 ---- a/arch/powerpc/perf/hv-gpci.h -+++ b/arch/powerpc/perf/hv-gpci.h -@@ -26,6 +26,7 @@ enum { - #define REQUEST_FILE "../hv-gpci-requests.h" - #define NAME_LOWER hv_gpci - #define NAME_UPPER HV_GPCI -+#define ENABLE_EVENTS_COUNTERINFO_V6 - #include "req-gen/perf.h" - #undef REQUEST_FILE - #undef NAME_LOWER -diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c -index e106909ff9c37..b8a100b9736c7 100644 ---- a/arch/powerpc/perf/imc-pmu.c -+++ b/arch/powerpc/perf/imc-pmu.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - /* Nest IMC data structures and variables */ - -@@ -49,7 +50,7 @@ static int trace_imc_mem_size; - * core and trace-imc - */ - static struct imc_pmu_ref imc_global_refc = { -- .lock = __MUTEX_INITIALIZER(imc_global_refc.lock), -+ .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock), - .id = 0, - .refc = 0, - }; -@@ -393,7 +394,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu) - get_hard_smp_processor_id(cpu)); - /* - * If this is the last cpu in this chip then, skip the reference -- * count mutex lock and make the reference count on this chip zero. -+ * count lock and make the reference count on this chip zero. - */ - ref = get_nest_pmu_ref(cpu); - if (!ref) -@@ -455,15 +456,15 @@ static void nest_imc_counters_release(struct perf_event *event) - /* - * See if we need to disable the nest PMU. - * If no events are currently in use, then we have to take a -- * mutex to ensure that we don't race with another task doing -+ * lock to ensure that we don't race with another task doing - * enable or disable the nest counters. - */ - ref = get_nest_pmu_ref(event->cpu); - if (!ref) - return; - -- /* Take the mutex lock for this node and then decrement the reference count */ -- mutex_lock(&ref->lock); -+ /* Take the lock for this node and then decrement the reference count */ -+ spin_lock(&ref->lock); - if (ref->refc == 0) { - /* - * The scenario where this is true is, when perf session is -@@ -475,7 +476,7 @@ static void nest_imc_counters_release(struct perf_event *event) - * an OPAL call to disable the engine in that node. - * - */ -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - return; - } - ref->refc--; -@@ -483,7 +484,7 @@ static void nest_imc_counters_release(struct perf_event *event) - rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, - get_hard_smp_processor_id(event->cpu)); - if (rc) { -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); - return; - } -@@ -491,7 +492,7 @@ static void nest_imc_counters_release(struct perf_event *event) - WARN(1, "nest-imc: Invalid event reference count\n"); - ref->refc = 0; - } -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - } - - static int nest_imc_event_init(struct perf_event *event) -@@ -550,26 +551,25 @@ static int nest_imc_event_init(struct perf_event *event) - - /* - * Get the imc_pmu_ref struct for this node. -- * Take the mutex lock and then increment the count of nest pmu events -- * inited. -+ * Take the lock and then increment the count of nest pmu events inited. - */ - ref = get_nest_pmu_ref(event->cpu); - if (!ref) - return -EINVAL; - -- mutex_lock(&ref->lock); -+ spin_lock(&ref->lock); - if (ref->refc == 0) { - rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, - get_hard_smp_processor_id(event->cpu)); - if (rc) { -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - pr_err("nest-imc: Unable to start the counters for node %d\n", - node_id); - return rc; - } - } - ++ref->refc; -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - - event->destroy = nest_imc_counters_release; - return 0; -@@ -605,9 +605,8 @@ static int core_imc_mem_init(int cpu, int size) - return -ENOMEM; - mem_info->vbase = page_address(page); - -- /* Init the mutex */ - core_imc_refc[core_id].id = core_id; -- mutex_init(&core_imc_refc[core_id].lock); -+ spin_lock_init(&core_imc_refc[core_id].lock); - - rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, - __pa((void *)mem_info->vbase), -@@ -696,9 +695,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) - perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); - } else { - /* -- * If this is the last cpu in this core then, skip taking refernce -- * count mutex lock for this core and directly zero "refc" for -- * this core. -+ * If this is the last cpu in this core then skip taking reference -+ * count lock for this core and directly zero "refc" for this core. - */ - opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, - get_hard_smp_processor_id(cpu)); -@@ -713,11 +711,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) - * last cpu in this core and core-imc event running - * in this cpu. - */ -- mutex_lock(&imc_global_refc.lock); -+ spin_lock(&imc_global_refc.lock); - if (imc_global_refc.id == IMC_DOMAIN_CORE) - imc_global_refc.refc--; - -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - } - return 0; - } -@@ -732,7 +730,7 @@ static int core_imc_pmu_cpumask_init(void) - - static void reset_global_refc(struct perf_event *event) - { -- mutex_lock(&imc_global_refc.lock); -+ spin_lock(&imc_global_refc.lock); - imc_global_refc.refc--; - - /* -@@ -744,7 +742,7 @@ static void reset_global_refc(struct perf_event *event) - imc_global_refc.refc = 0; - imc_global_refc.id = 0; - } -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - } - - static void core_imc_counters_release(struct perf_event *event) -@@ -757,17 +755,17 @@ static void core_imc_counters_release(struct perf_event *event) - /* - * See if we need to disable the IMC PMU. - * If no events are currently in use, then we have to take a -- * mutex to ensure that we don't race with another task doing -+ * lock to ensure that we don't race with another task doing - * enable or disable the core counters. - */ - core_id = event->cpu / threads_per_core; - -- /* Take the mutex lock and decrement the refernce count for this core */ -+ /* Take the lock and decrement the refernce count for this core */ - ref = &core_imc_refc[core_id]; - if (!ref) - return; - -- mutex_lock(&ref->lock); -+ spin_lock(&ref->lock); - if (ref->refc == 0) { - /* - * The scenario where this is true is, when perf session is -@@ -779,7 +777,7 @@ static void core_imc_counters_release(struct perf_event *event) - * an OPAL call to disable the engine in that core. - * - */ -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - return; - } - ref->refc--; -@@ -787,7 +785,7 @@ static void core_imc_counters_release(struct perf_event *event) - rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, - get_hard_smp_processor_id(event->cpu)); - if (rc) { -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - pr_err("IMC: Unable to stop the counters for core %d\n", core_id); - return; - } -@@ -795,7 +793,7 @@ static void core_imc_counters_release(struct perf_event *event) - WARN(1, "core-imc: Invalid event reference count\n"); - ref->refc = 0; - } -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - - reset_global_refc(event); - } -@@ -833,7 +831,6 @@ static int core_imc_event_init(struct perf_event *event) - if ((!pcmi->vbase)) - return -ENODEV; - -- /* Get the core_imc mutex for this core */ - ref = &core_imc_refc[core_id]; - if (!ref) - return -EINVAL; -@@ -841,22 +838,22 @@ static int core_imc_event_init(struct perf_event *event) - /* - * Core pmu units are enabled only when it is used. - * See if this is triggered for the first time. -- * If yes, take the mutex lock and enable the core counters. -+ * If yes, take the lock and enable the core counters. - * If not, just increment the count in core_imc_refc struct. - */ -- mutex_lock(&ref->lock); -+ spin_lock(&ref->lock); - if (ref->refc == 0) { - rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, - get_hard_smp_processor_id(event->cpu)); - if (rc) { -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - pr_err("core-imc: Unable to start the counters for core %d\n", - core_id); - return rc; - } - } - ++ref->refc; -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - - /* - * Since the system can run either in accumulation or trace-mode -@@ -867,7 +864,7 @@ static int core_imc_event_init(struct perf_event *event) - * to know whether any other trace/thread imc - * events are running. - */ -- mutex_lock(&imc_global_refc.lock); -+ spin_lock(&imc_global_refc.lock); - if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { - /* - * No other trace/thread imc events are running in -@@ -876,10 +873,10 @@ static int core_imc_event_init(struct perf_event *event) - imc_global_refc.id = IMC_DOMAIN_CORE; - imc_global_refc.refc++; - } else { -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - return -EBUSY; - } -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - - event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); - event->destroy = core_imc_counters_release; -@@ -951,10 +948,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu) - mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); - - /* Reduce the refc if thread-imc event running on this cpu */ -- mutex_lock(&imc_global_refc.lock); -+ spin_lock(&imc_global_refc.lock); - if (imc_global_refc.id == IMC_DOMAIN_THREAD) - imc_global_refc.refc--; -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - - return 0; - } -@@ -994,7 +991,7 @@ static int thread_imc_event_init(struct perf_event *event) - if (!target) - return -EINVAL; - -- mutex_lock(&imc_global_refc.lock); -+ spin_lock(&imc_global_refc.lock); - /* - * Check if any other trace/core imc events are running in the - * system, if not set the global id to thread-imc. -@@ -1003,10 +1000,10 @@ static int thread_imc_event_init(struct perf_event *event) - imc_global_refc.id = IMC_DOMAIN_THREAD; - imc_global_refc.refc++; - } else { -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - return -EBUSY; - } -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - - event->pmu->task_ctx_nr = perf_sw_context; - event->destroy = reset_global_refc; -@@ -1128,25 +1125,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags) - /* - * imc pmus are enabled only when it is used. - * See if this is triggered for the first time. -- * If yes, take the mutex lock and enable the counters. -+ * If yes, take the lock and enable the counters. - * If not, just increment the count in ref count struct. - */ - ref = &core_imc_refc[core_id]; - if (!ref) - return -EINVAL; - -- mutex_lock(&ref->lock); -+ spin_lock(&ref->lock); - if (ref->refc == 0) { - if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, - get_hard_smp_processor_id(smp_processor_id()))) { -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - pr_err("thread-imc: Unable to start the counter\ - for core %d\n", core_id); - return -EINVAL; - } - } - ++ref->refc; -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - return 0; - } - -@@ -1163,12 +1160,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags) - return; - } - -- mutex_lock(&ref->lock); -+ spin_lock(&ref->lock); - ref->refc--; - if (ref->refc == 0) { - if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, - get_hard_smp_processor_id(smp_processor_id()))) { -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - pr_err("thread-imc: Unable to stop the counters\ - for core %d\n", core_id); - return; -@@ -1176,7 +1173,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags) - } else if (ref->refc < 0) { - ref->refc = 0; - } -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - - /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ - mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); -@@ -1217,9 +1214,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size) - } - } - -- /* Init the mutex, if not already */ - trace_imc_refc[core_id].id = core_id; -- mutex_init(&trace_imc_refc[core_id].lock); -+ spin_lock_init(&trace_imc_refc[core_id].lock); - - mtspr(SPRN_LDBAR, 0); - return 0; -@@ -1239,10 +1235,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu) - * Reduce the refc if any trace-imc event running - * on this cpu. - */ -- mutex_lock(&imc_global_refc.lock); -+ spin_lock(&imc_global_refc.lock); - if (imc_global_refc.id == IMC_DOMAIN_TRACE) - imc_global_refc.refc--; -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - - return 0; - } -@@ -1364,17 +1360,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags) - } - - mtspr(SPRN_LDBAR, ldbar_value); -- mutex_lock(&ref->lock); -+ spin_lock(&ref->lock); - if (ref->refc == 0) { - if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE, - get_hard_smp_processor_id(smp_processor_id()))) { -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); - return -EINVAL; - } - } - ++ref->refc; -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - return 0; - } - -@@ -1407,19 +1403,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags) - return; - } - -- mutex_lock(&ref->lock); -+ spin_lock(&ref->lock); - ref->refc--; - if (ref->refc == 0) { - if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE, - get_hard_smp_processor_id(smp_processor_id()))) { -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); - return; - } - } else if (ref->refc < 0) { - ref->refc = 0; - } -- mutex_unlock(&ref->lock); -+ spin_unlock(&ref->lock); - - trace_imc_event_stop(event, flags); - } -@@ -1441,7 +1437,7 @@ static int trace_imc_event_init(struct perf_event *event) - * no other thread is running any core/thread imc - * events - */ -- mutex_lock(&imc_global_refc.lock); -+ spin_lock(&imc_global_refc.lock); - if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { - /* - * No core/thread imc events are running in the -@@ -1450,14 +1446,18 @@ static int trace_imc_event_init(struct perf_event *event) - imc_global_refc.id = IMC_DOMAIN_TRACE; - imc_global_refc.refc++; - } else { -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - return -EBUSY; - } -- mutex_unlock(&imc_global_refc.lock); -+ spin_unlock(&imc_global_refc.lock); - - event->hw.idx = -1; - -- event->pmu->task_ctx_nr = perf_hw_context; -+ /* -+ * There can only be a single PMU for perf_hw_context events which is assigned to -+ * core PMU. Hence use "perf_sw_context" for trace_imc. -+ */ -+ event->pmu->task_ctx_nr = perf_sw_context; - event->destroy = reset_global_refc; - return 0; - } -@@ -1522,10 +1522,10 @@ static int init_nest_pmu_ref(void) - i = 0; - for_each_node(nid) { - /* -- * Mutex lock to avoid races while tracking the number of -+ * Take the lock to avoid races while tracking the number of - * sessions using the chip's nest pmu units. - */ -- mutex_init(&nest_imc_refc[i].lock); -+ spin_lock_init(&nest_imc_refc[i].lock); - - /* - * Loop to init the "id" with the node_id. Variable "i" initialized to -diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c -index f92bf5f6b74f1..027a2add780e8 100644 ---- a/arch/powerpc/perf/isa207-common.c -+++ b/arch/powerpc/perf/isa207-common.c -@@ -108,7 +108,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) - *mmcra |= MMCRA_SDAR_MODE_TLB; - } - --static u64 p10_thresh_cmp_val(u64 value) -+static int p10_thresh_cmp_val(u64 value) - { - int exp = 0; - u64 result = value; -@@ -139,7 +139,7 @@ static u64 p10_thresh_cmp_val(u64 value) - * exponent is also zero. - */ - if (!(value & 0xC0) && exp) -- result = 0; -+ result = -1; - else - result = (exp << 8) | value; - } -@@ -187,7 +187,7 @@ static bool is_thresh_cmp_valid(u64 event) - unsigned int cmp, exp; - - if (cpu_has_feature(CPU_FTR_ARCH_31)) -- return p10_thresh_cmp_val(event) != 0; -+ return p10_thresh_cmp_val(event) >= 0; - - /* - * Check the mantissa upper two bits are not zero, unless the -@@ -456,12 +456,14 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, - value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT); - mask |= p10_CNST_THRESH_CMP_MASK; - value |= p10_CNST_THRESH_CMP_VAL(p10_thresh_cmp_val(event_config1)); -- } -+ } else if (event_is_threshold(event)) -+ return -1; - } else if (cpu_has_feature(CPU_FTR_ARCH_300)) { - if (event_is_threshold(event) && is_thresh_cmp_valid(event)) { - mask |= CNST_THRESH_MASK; - value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); -- } -+ } else if (event_is_threshold(event)) -+ return -1; - } else { - /* - * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, -diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h -index 93be7197d2502..564f14097f07b 100644 ---- a/arch/powerpc/perf/power10-events-list.h -+++ b/arch/powerpc/perf/power10-events-list.h -@@ -9,10 +9,10 @@ - /* - * Power10 event codes. - */ --EVENT(PM_RUN_CYC, 0x600f4); -+EVENT(PM_CYC, 0x600f4); - EVENT(PM_DISP_STALL_CYC, 0x100f8); - EVENT(PM_EXEC_STALL, 0x30008); --EVENT(PM_RUN_INST_CMPL, 0x500fa); -+EVENT(PM_INST_CMPL, 0x500fa); - EVENT(PM_BR_CMPL, 0x4d05e); - EVENT(PM_BR_MPRED_CMPL, 0x400f6); - EVENT(PM_BR_FIN, 0x2f04a); -@@ -50,8 +50,8 @@ EVENT(PM_DTLB_MISS, 0x300fc); - /* ITLB Reloaded */ - EVENT(PM_ITLB_MISS, 0x400fc); - --EVENT(PM_RUN_CYC_ALT, 0x0001e); --EVENT(PM_RUN_INST_CMPL_ALT, 0x00002); -+EVENT(PM_CYC_ALT, 0x0001e); -+EVENT(PM_INST_CMPL_ALT, 0x00002); - - /* - * Memory Access Events -diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c -index f9d64c63bb4a7..07ca62d084d9d 100644 ---- a/arch/powerpc/perf/power10-pmu.c -+++ b/arch/powerpc/perf/power10-pmu.c -@@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK; - - /* Table of alternatives, sorted by column 0 */ - static const unsigned int power10_event_alternatives[][MAX_ALT] = { -- { PM_RUN_CYC_ALT, PM_RUN_CYC }, -- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, -+ { PM_INST_CMPL_ALT, PM_INST_CMPL }, -+ { PM_CYC_ALT, PM_CYC }, - }; - - static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[]) -@@ -118,8 +118,8 @@ static int power10_check_attr_config(struct perf_event *ev) - return 0; - } - --GENERIC_EVENT_ATTR(cpu-cycles, PM_RUN_CYC); --GENERIC_EVENT_ATTR(instructions, PM_RUN_INST_CMPL); -+GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); -+GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL); - GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL); - GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL); - GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); -@@ -148,8 +148,8 @@ CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS); - CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS); - - static struct attribute *power10_events_attr_dd1[] = { -- GENERIC_EVENT_PTR(PM_RUN_CYC), -- GENERIC_EVENT_PTR(PM_RUN_INST_CMPL), -+ GENERIC_EVENT_PTR(PM_CYC), -+ GENERIC_EVENT_PTR(PM_INST_CMPL), - GENERIC_EVENT_PTR(PM_BR_CMPL), - GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL), - GENERIC_EVENT_PTR(PM_LD_REF_L1), -@@ -173,8 +173,8 @@ static struct attribute *power10_events_attr_dd1[] = { - }; - - static struct attribute *power10_events_attr[] = { -- GENERIC_EVENT_PTR(PM_RUN_CYC), -- GENERIC_EVENT_PTR(PM_RUN_INST_CMPL), -+ GENERIC_EVENT_PTR(PM_CYC), -+ GENERIC_EVENT_PTR(PM_INST_CMPL), - GENERIC_EVENT_PTR(PM_BR_FIN), - GENERIC_EVENT_PTR(PM_MPRED_BR_FIN), - GENERIC_EVENT_PTR(PM_LD_REF_L1), -@@ -271,8 +271,8 @@ static const struct attribute_group *power10_pmu_attr_groups[] = { - }; - - static int power10_generic_events_dd1[] = { -- [PERF_COUNT_HW_CPU_CYCLES] = PM_RUN_CYC, -- [PERF_COUNT_HW_INSTRUCTIONS] = PM_RUN_INST_CMPL, -+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, -+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL, - [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, - [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, -@@ -280,8 +280,8 @@ static int power10_generic_events_dd1[] = { - }; - - static int power10_generic_events[] = { -- [PERF_COUNT_HW_CPU_CYCLES] = PM_RUN_CYC, -- [PERF_COUNT_HW_INSTRUCTIONS] = PM_RUN_INST_CMPL, -+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, -+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_FIN, - [PERF_COUNT_HW_BRANCH_MISSES] = PM_MPRED_BR_FIN, - [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, -@@ -548,6 +548,24 @@ static u64 power10_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { - - #undef C - -+/* -+ * Set the MMCR0[CC56RUN] bit to enable counting for -+ * PMC5 and PMC6 regardless of the state of CTRL[RUN], -+ * so that we can use counters 5 and 6 as PM_INST_CMPL and -+ * PM_CYC. -+ */ -+static int power10_compute_mmcr(u64 event[], int n_ev, -+ unsigned int hwc[], struct mmcr_regs *mmcr, -+ struct perf_event *pevents[], u32 flags) -+{ -+ int ret; -+ -+ ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags); -+ if (!ret) -+ mmcr->mmcr0 |= MMCR0_C56RUN; -+ return ret; -+} -+ - static struct power_pmu power10_pmu = { - .name = "POWER10", - .n_counter = MAX_PMU_COUNTERS, -@@ -555,7 +573,7 @@ static struct power_pmu power10_pmu = { - .test_adder = ISA207_TEST_ADDER, - .group_constraint_mask = CNST_CACHE_PMC4_MASK, - .group_constraint_val = CNST_CACHE_PMC4_VAL, -- .compute_mmcr = isa207_compute_mmcr, -+ .compute_mmcr = power10_compute_mmcr, - .config_bhrb = power10_config_bhrb, - .bhrb_filter_map = power10_bhrb_filter_map, - .get_constraint = isa207_get_constraint, -diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c -index ff3382140d7e6..cbdd074ee2a70 100644 ---- a/arch/powerpc/perf/power9-pmu.c -+++ b/arch/powerpc/perf/power9-pmu.c -@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = { - - /* Table of alternatives, sorted by column 0 */ - static const unsigned int power9_event_alternatives[][MAX_ALT] = { -- { PM_INST_DISP, PM_INST_DISP_ALT }, -- { PM_RUN_CYC_ALT, PM_RUN_CYC }, -- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, -- { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT }, - { PM_BR_2PATH, PM_BR_2PATH_ALT }, -+ { PM_INST_DISP, PM_INST_DISP_ALT }, -+ { PM_RUN_CYC_ALT, PM_RUN_CYC }, -+ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT }, -+ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, - }; - - static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[]) -diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h -index fa9bc804e67af..6b2a59fefffa7 100644 ---- a/arch/powerpc/perf/req-gen/perf.h -+++ b/arch/powerpc/perf/req-gen/perf.h -@@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING( \ - #define REQUEST_(r_name, r_value, r_idx_1, r_fields) \ - r_fields - -+/* Generate event list for platforms with counter_info_version 0x6 or below */ -+static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = { -+#include REQUEST_FILE -+ NULL -+}; -+ -+/* -+ * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci -+ * events were deprecated for platform firmware that supports -+ * counter_info_version 0x8 or above. -+ * Those deprecated events are still part of platform firmware that -+ * support counter_info_version 0x6 and below. As per the getPerfCountInfo -+ * v1.018 documentation there is no counter_info_version 0x7. -+ * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of -+ * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms -+ * that supports counter_info_version 0x8 or above. -+ */ -+#undef ENABLE_EVENTS_COUNTERINFO_V6 -+ -+/* Generate event list for platforms with counter_info_version 0x8 or above*/ - static __maybe_unused struct attribute *hv_gpci_event_attrs[] = { - #include REQUEST_FILE - NULL -diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c -index b299e43f5ef94..823397c802def 100644 ---- a/arch/powerpc/platforms/44x/fsp2.c -+++ b/arch/powerpc/platforms/44x/fsp2.c -@@ -208,6 +208,7 @@ static void node_irq_request(const char *compat, irq_handler_t errirq_handler) - if (irq == NO_IRQ) { - pr_err("device tree node %pOFn is missing a interrupt", - np); -+ of_node_put(np); - return; - } - -@@ -215,6 +216,7 @@ static void node_irq_request(const char *compat, irq_handler_t errirq_handler) - if (rc) { - pr_err("fsp_of_probe: request_irq failed: np=%pOF rc=%d", - np, rc); -+ of_node_put(np); - return; - } - } -diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c -index ae8b812c92029..2481e78c04234 100644 ---- a/arch/powerpc/platforms/4xx/cpm.c -+++ b/arch/powerpc/platforms/4xx/cpm.c -@@ -327,6 +327,6 @@ late_initcall(cpm_init); - static int __init cpm_powersave_off(char *arg) - { - cpm.powersave_off = 1; -- return 0; -+ return 1; - } - __setup("powersave=off", cpm_powersave_off); -diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c -index 30342b60aa63f..42c3d40355d90 100644 ---- a/arch/powerpc/platforms/512x/clock-commonclk.c -+++ b/arch/powerpc/platforms/512x/clock-commonclk.c -@@ -984,7 +984,7 @@ static void mpc5121_clk_provide_migration_support(void) - - #define NODE_PREP do { \ - of_address_to_resource(np, 0, &res); \ -- snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \ -+ snprintf(devname, sizeof(devname), "%pa.%s", &res.start, np->name); \ - } while (0) - - #define NODE_CHK(clkname, clkitem, regnode, regflag) do { \ -diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c -index b91ebebd9ff20..e0049b7df2125 100644 ---- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c -+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c -@@ -530,6 +530,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op) - err_bcom_rx_irq: - bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); - err_bcom_rx: -+ free_irq(lpbfifo.irq, &lpbfifo); - err_irq: - iounmap(lpbfifo.regs); - lpbfifo.regs = NULL; -diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c -index b6133a237a709..6e18d07035680 100644 ---- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c -+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c -@@ -106,7 +106,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, - - goto next; - unreg: -- platform_device_del(pdev); -+ platform_device_put(pdev); - err: - pr_err("%pOF: registration failed\n", np); - next: -diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile -index 60e4e97a929db..260fbad7967b2 100644 ---- a/arch/powerpc/platforms/85xx/Makefile -+++ b/arch/powerpc/platforms/85xx/Makefile -@@ -3,7 +3,9 @@ - # Makefile for the PowerPC 85xx linux kernel. - # - obj-$(CONFIG_SMP) += smp.o --obj-$(CONFIG_FSL_PMC) += mpc85xx_pm_ops.o -+ifneq ($(CONFIG_FSL_CORENET_RCPM),y) -+obj-$(CONFIG_SMP) += mpc85xx_pm_ops.o -+endif - - obj-y += common.o - -diff --git a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c -index 7c0133f558d02..4a8af80011a6f 100644 ---- a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c -+++ b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c -@@ -17,6 +17,7 @@ - - static struct ccsr_guts __iomem *guts; - -+#ifdef CONFIG_FSL_PMC - static void mpc85xx_irq_mask(int cpu) - { - -@@ -49,6 +50,7 @@ static void mpc85xx_cpu_up_prepare(int cpu) - { - - } -+#endif - - static void mpc85xx_freeze_time_base(bool freeze) - { -@@ -76,10 +78,12 @@ static const struct of_device_id mpc85xx_smp_guts_ids[] = { - - static const struct fsl_pm_ops mpc85xx_pm_ops = { - .freeze_time_base = mpc85xx_freeze_time_base, -+#ifdef CONFIG_FSL_PMC - .irq_mask = mpc85xx_irq_mask, - .irq_unmask = mpc85xx_irq_unmask, - .cpu_die = mpc85xx_cpu_die, - .cpu_up_prepare = mpc85xx_cpu_up_prepare, -+#endif - }; - - int __init mpc85xx_setup_pmc(void) -@@ -94,9 +98,8 @@ int __init mpc85xx_setup_pmc(void) - pr_err("Could not map guts node address\n"); - return -ENOMEM; - } -+ qoriq_pm_ops = &mpc85xx_pm_ops; - } - -- qoriq_pm_ops = &mpc85xx_pm_ops; -- - return 0; - } -diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c -index c6df294054fe9..d7081e9af65c7 100644 ---- a/arch/powerpc/platforms/85xx/smp.c -+++ b/arch/powerpc/platforms/85xx/smp.c -@@ -40,7 +40,6 @@ struct epapr_spin_table { - u32 pir; - }; - --#ifdef CONFIG_HOTPLUG_CPU - static u64 timebase; - static int tb_req; - static int tb_valid; -@@ -112,6 +111,7 @@ static void mpc85xx_take_timebase(void) - local_irq_restore(flags); - } - -+#ifdef CONFIG_HOTPLUG_CPU - static void smp_85xx_cpu_offline_self(void) - { - unsigned int cpu = smp_processor_id(); -@@ -220,7 +220,7 @@ static int smp_85xx_start_cpu(int cpu) - local_irq_save(flags); - hard_irq_disable(); - -- if (qoriq_pm_ops) -+ if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) - qoriq_pm_ops->cpu_up_prepare(cpu); - - /* if cpu is not spinning, reset it */ -@@ -292,7 +292,7 @@ static int smp_85xx_kick_cpu(int nr) - booting_thread_hwid = cpu_thread_in_core(nr); - primary = cpu_first_thread_sibling(nr); - -- if (qoriq_pm_ops) -+ if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) - qoriq_pm_ops->cpu_up_prepare(nr); - - /* -@@ -495,21 +495,21 @@ void __init mpc85xx_smp_init(void) - smp_85xx_ops.probe = NULL; - } - --#ifdef CONFIG_HOTPLUG_CPU - #ifdef CONFIG_FSL_CORENET_RCPM -+ /* Assign a value to qoriq_pm_ops on PPC_E500MC */ - fsl_rcpm_init(); --#endif -- --#ifdef CONFIG_FSL_PMC -+#else -+ /* Assign a value to qoriq_pm_ops on !PPC_E500MC */ - mpc85xx_setup_pmc(); - #endif - if (qoriq_pm_ops) { - smp_85xx_ops.give_timebase = mpc85xx_give_timebase; - smp_85xx_ops.take_timebase = mpc85xx_take_timebase; -+#ifdef CONFIG_HOTPLUG_CPU - smp_85xx_ops.cpu_offline_self = smp_85xx_cpu_offline_self; - smp_85xx_ops.cpu_die = qoriq_cpu_kill; -- } - #endif -+ } - smp_ops = &smp_85xx_ops; - - #ifdef CONFIG_KEXEC_CORE -diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c -index c58b6f1c40e35..3ef5e9fd3a9b6 100644 ---- a/arch/powerpc/platforms/8xx/cpm1.c -+++ b/arch/powerpc/platforms/8xx/cpm1.c -@@ -280,6 +280,7 @@ cpm_setbrg(uint brg, uint rate) - out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) | - CPM_BRG_EN | CPM_BRG_DIV16); - } -+EXPORT_SYMBOL(cpm_setbrg); - - struct cpm_ioport16 { - __be16 dir, par, odr_sor, dat, intr; -diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c -index f2ba837249d69..04a6abf14c295 100644 ---- a/arch/powerpc/platforms/8xx/pic.c -+++ b/arch/powerpc/platforms/8xx/pic.c -@@ -153,6 +153,7 @@ int __init mpc8xx_pic_init(void) - if (mpc8xx_pic_host == NULL) { - printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); - ret = -ENOMEM; -+ goto out; - } - - ret = 0; -diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype -index a208997ade88b..1b1e67ff9d211 100644 ---- a/arch/powerpc/platforms/Kconfig.cputype -+++ b/arch/powerpc/platforms/Kconfig.cputype -@@ -111,6 +111,7 @@ config PPC_BOOK3S_64 - - config PPC_BOOK3E_64 - bool "Embedded processors" -+ select PPC_FSL_BOOK3E - select PPC_FPU # Make it a choice ? - select PPC_SMP_MUXED_IPI - select PPC_DOORBELL -@@ -136,9 +137,9 @@ config GENERIC_CPU - depends on PPC64 && CPU_LITTLE_ENDIAN - select ARCH_HAS_FAST_MULTIPLIER - --config GENERIC_CPU -+config POWERPC_CPU - bool "Generic 32 bits powerpc" -- depends on PPC32 && !PPC_8xx -+ depends on PPC32 && !PPC_8xx && !PPC_85xx - - config CELL_CPU - bool "Cell Broadband Engine" -@@ -169,11 +170,11 @@ config POWER9_CPU - - config E5500_CPU - bool "Freescale e5500" -- depends on E500 -+ depends on PPC64 && E500 - - config E6500_CPU - bool "Freescale e6500" -- depends on E500 -+ depends on PPC64 && E500 - - config 860_CPU - bool "8xx family" -@@ -192,11 +193,23 @@ config G4_CPU - depends on PPC_BOOK3S_32 - select ALTIVEC - -+config E500_CPU -+ bool "e500 (8540)" -+ depends on PPC_85xx && !PPC_E500MC -+ -+config E500MC_CPU -+ bool "e500mc" -+ depends on PPC_85xx && PPC_E500MC -+ -+config TOOLCHAIN_DEFAULT_CPU -+ bool "Rely on the toolchain's implicit default CPU" -+ depends on PPC32 -+ - endchoice - - config TARGET_CPU_BOOL - bool -- default !GENERIC_CPU -+ default !GENERIC_CPU && !TOOLCHAIN_DEFAULT_CPU - - config TARGET_CPU - string -@@ -211,6 +224,9 @@ config TARGET_CPU - default "e300c2" if E300C2_CPU - default "e300c3" if E300C3_CPU - default "G4" if G4_CPU -+ default "8540" if E500_CPU -+ default "e500mc" if E500MC_CPU -+ default "powerpc" if POWERPC_CPU - - config PPC_BOOK3S - def_bool y -@@ -287,7 +303,7 @@ config FSL_BOOKE - config PPC_FSL_BOOK3E - bool - select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 -- select FSL_EMB_PERFMON -+ imply FSL_EMB_PERFMON - select PPC_SMP_MUXED_IPI - select PPC_DOORBELL - default y if FSL_BOOKE -diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c -index 30172e52e16b7..4d82c92ddd523 100644 ---- a/arch/powerpc/platforms/book3s/vas-api.c -+++ b/arch/powerpc/platforms/book3s/vas-api.c -@@ -303,7 +303,7 @@ static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg) - return -EINVAL; - } - -- if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->open_win) { -+ if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->open_win) { - pr_err("VAS API is not registered\n"); - return -EACCES; - } -@@ -373,7 +373,7 @@ static int coproc_mmap(struct file *fp, struct vm_area_struct *vma) - return -EINVAL; - } - -- if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->paste_addr) { -+ if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) { - pr_err("%s(): VAS API is not registered\n", __func__); - return -EACCES; - } -diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c -index 82335e364c440..f630693c8de72 100644 ---- a/arch/powerpc/platforms/cell/axon_msi.c -+++ b/arch/powerpc/platforms/cell/axon_msi.c -@@ -226,6 +226,7 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) - if (!prop) { - dev_dbg(&dev->dev, - "axon_msi: no msi-address-(32|64) properties found\n"); -+ of_node_put(dn); - return -ENOENT; - } - -diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c -index fa08699aedeb8..d32f24de84798 100644 ---- a/arch/powerpc/platforms/cell/iommu.c -+++ b/arch/powerpc/platforms/cell/iommu.c -@@ -977,6 +977,7 @@ static int __init cell_iommu_fixed_mapping_init(void) - if (hbase < dbase || (hend > (dbase + dsize))) { - pr_debug("iommu: hash window doesn't fit in" - "real DMA window\n"); -+ of_node_put(np); - return -1; - } - } -diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c -index 5b9a7e9f144b3..dff8d5e7ab82b 100644 ---- a/arch/powerpc/platforms/cell/pervasive.c -+++ b/arch/powerpc/platforms/cell/pervasive.c -@@ -78,6 +78,7 @@ static int cbe_system_reset_exception(struct pt_regs *regs) - switch (regs->msr & SRR1_WAKEMASK) { - case SRR1_WAKEDEC: - set_dec(1); -+ break; - case SRR1_WAKEEE: - /* - * Handle these when interrupts get re-enabled and we take -diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c -index bed05b644c2c5..ed37a93bf858a 100644 ---- a/arch/powerpc/platforms/cell/spufs/inode.c -+++ b/arch/powerpc/platforms/cell/spufs/inode.c -@@ -659,6 +659,7 @@ spufs_init_isolated_loader(void) - return; - - loader = of_get_property(dn, "loader", &size); -+ of_node_put(dn); - if (!loader) - return; - -diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c -index 609bda2ad5dd2..4d9200bdba78c 100644 ---- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c -+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c -@@ -145,7 +145,7 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np) - } - io_base = ioremap(res.start, resource_size(&res)); - -- pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base); -+ pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base); - - __flipper_quiesce(io_base); - -diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c -index 15396333a90bd..132e5c175e2d6 100644 ---- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c -+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c -@@ -171,7 +171,7 @@ static struct irq_domain *hlwd_pic_init(struct device_node *np) - return NULL; - } - -- pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base); -+ pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base); - - __hlwd_quiesce(io_base); - -@@ -214,6 +214,7 @@ void hlwd_pic_probe(void) - irq_set_chained_handler(cascade_virq, - hlwd_pic_irq_cascade); - hlwd_irq_host = host; -+ of_node_put(np); - break; - } - } -diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c -index a802ef957d63e..458a63a30e803 100644 ---- a/arch/powerpc/platforms/embedded6xx/wii.c -+++ b/arch/powerpc/platforms/embedded6xx/wii.c -@@ -89,8 +89,8 @@ static void __iomem *wii_ioremap_hw_regs(char *name, char *compatible) - - hw_regs = ioremap(res.start, resource_size(&res)); - if (hw_regs) { -- pr_info("%s at 0x%08x mapped to 0x%p\n", name, -- res.start, hw_regs); -+ pr_info("%s at 0x%pa mapped to 0x%p\n", name, -+ &res.start, hw_regs); - } - - out_put: -diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h -new file mode 100644 -index 0000000000000..335417e95e66f ---- /dev/null -+++ b/arch/powerpc/platforms/microwatt/microwatt.h -@@ -0,0 +1,7 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+#ifndef _MICROWATT_H -+#define _MICROWATT_H -+ -+void microwatt_rng_init(void); -+ -+#endif /* _MICROWATT_H */ -diff --git a/arch/powerpc/platforms/microwatt/rng.c b/arch/powerpc/platforms/microwatt/rng.c -index 3d8ee6eb7dada..8cb161533e6aa 100644 ---- a/arch/powerpc/platforms/microwatt/rng.c -+++ b/arch/powerpc/platforms/microwatt/rng.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include "microwatt.h" - - #define DARN_ERR 0xFFFFFFFFFFFFFFFFul - -@@ -29,7 +30,7 @@ int microwatt_get_random_darn(unsigned long *v) - return 1; - } - --static __init int rng_init(void) -+void __init microwatt_rng_init(void) - { - unsigned long val; - int i; -@@ -37,12 +38,7 @@ static __init int rng_init(void) - for (i = 0; i < 10; i++) { - if (microwatt_get_random_darn(&val)) { - ppc_md.get_random_seed = microwatt_get_random_darn; -- return 0; -+ return; - } - } -- -- pr_warn("Unable to use DARN for get_random_seed()\n"); -- -- return -EIO; - } --machine_subsys_initcall(, rng_init); -diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c -index 0b02603bdb747..6b32539395a48 100644 ---- a/arch/powerpc/platforms/microwatt/setup.c -+++ b/arch/powerpc/platforms/microwatt/setup.c -@@ -16,6 +16,8 @@ - #include - #include - -+#include "microwatt.h" -+ - static void __init microwatt_init_IRQ(void) - { - xics_init(); -@@ -32,10 +34,16 @@ static int __init microwatt_populate(void) - } - machine_arch_initcall(microwatt, microwatt_populate); - -+static void __init microwatt_setup_arch(void) -+{ -+ microwatt_rng_init(); -+} -+ - define_machine(microwatt) { - .name = "microwatt", - .probe = microwatt_probe, - .init_IRQ = microwatt_init_IRQ, -+ .setup_arch = microwatt_setup_arch, - .progress = udbg_progress, - .calibrate_decr = generic_calibrate_decr, - }; -diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S -index ced2254154860..b8ae56e9f4146 100644 ---- a/arch/powerpc/platforms/powermac/cache.S -+++ b/arch/powerpc/platforms/powermac/cache.S -@@ -48,7 +48,7 @@ flush_disable_75x: - - /* Stop DST streams */ - BEGIN_FTR_SECTION -- DSSALL -+ PPC_DSSALL - sync - END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - -@@ -197,7 +197,7 @@ flush_disable_745x: - isync - - /* Stop prefetch streams */ -- DSSALL -+ PPC_DSSALL - sync - - /* Disable L2 prefetching */ -diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c -index f77a59b5c2e1a..df89d916236d9 100644 ---- a/arch/powerpc/platforms/powermac/low_i2c.c -+++ b/arch/powerpc/platforms/powermac/low_i2c.c -@@ -582,6 +582,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host, - bus->close = kw_i2c_close; - bus->xfer = kw_i2c_xfer; - mutex_init(&bus->mutex); -+ lockdep_register_key(&bus->lock_key); - lockdep_set_class(&bus->mutex, &bus->lock_key); - if (controller == busnode) - bus->flags = pmac_i2c_multibus; -@@ -810,6 +811,7 @@ static void __init pmu_i2c_probe(void) - bus->hostdata = bus + 1; - bus->xfer = pmu_i2c_xfer; - mutex_init(&bus->mutex); -+ lockdep_register_key(&bus->lock_key); - lockdep_set_class(&bus->mutex, &bus->lock_key); - bus->flags = pmac_i2c_multibus; - list_add(&bus->link, &pmac_i2c_busses); -@@ -933,6 +935,7 @@ static void __init smu_i2c_probe(void) - bus->hostdata = bus + 1; - bus->xfer = smu_i2c_xfer; - mutex_init(&bus->mutex); -+ lockdep_register_key(&bus->lock_key); - lockdep_set_class(&bus->mutex, &bus->lock_key); - bus->flags = 0; - list_add(&bus->link, &pmac_i2c_busses); -diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c -index 9a360ced663b0..e23a51a05f99a 100644 ---- a/arch/powerpc/platforms/powernv/opal-fadump.c -+++ b/arch/powerpc/platforms/powernv/opal-fadump.c -@@ -60,7 +60,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) - addr = be64_to_cpu(addr); - pr_debug("Kernel metadata addr: %llx\n", addr); - opal_fdm_active = (void *)addr; -- if (opal_fdm_active->registered_regions == 0) -+ if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) - return; - - ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr); -@@ -95,17 +95,17 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf); - static void opal_fadump_update_config(struct fw_dump *fadump_conf, - const struct opal_fadump_mem_struct *fdm) - { -- pr_debug("Boot memory regions count: %d\n", fdm->region_cnt); -+ pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt)); - - /* - * The destination address of the first boot memory region is the - * destination address of boot memory regions. - */ -- fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest; -+ fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest); - pr_debug("Destination address of boot memory regions: %#016llx\n", - fadump_conf->boot_mem_dest_addr); - -- fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr; -+ fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr); - } - - /* -@@ -126,9 +126,9 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, - fadump_conf->boot_memory_size = 0; - - pr_debug("Boot memory regions:\n"); -- for (i = 0; i < fdm->region_cnt; i++) { -- base = fdm->rgn[i].src; -- size = fdm->rgn[i].size; -+ for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) { -+ base = be64_to_cpu(fdm->rgn[i].src); -+ size = be64_to_cpu(fdm->rgn[i].size); - pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size); - - fadump_conf->boot_mem_addr[i] = base; -@@ -143,7 +143,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, - * Start address of reserve dump area (permanent reservation) for - * re-registering FADump after dump capture. - */ -- fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest; -+ fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest); - - /* - * Rarely, but it can so happen that system crashes before all -@@ -155,13 +155,14 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, - * Hope the memory that could not be preserved only has pages - * that are usually filtered out while saving the vmcore. - */ -- if (fdm->region_cnt > fdm->registered_regions) { -+ if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) { - pr_warn("Not all memory regions were saved!!!\n"); - pr_warn(" Unsaved memory regions:\n"); -- i = fdm->registered_regions; -- while (i < fdm->region_cnt) { -+ i = be16_to_cpu(fdm->registered_regions); -+ while (i < be16_to_cpu(fdm->region_cnt)) { - pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n", -- i, fdm->rgn[i].src, fdm->rgn[i].size); -+ i, be64_to_cpu(fdm->rgn[i].src), -+ be64_to_cpu(fdm->rgn[i].size)); - i++; - } - -@@ -170,7 +171,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, - } - - fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size); -- fadump_conf->boot_mem_regs_cnt = fdm->region_cnt; -+ fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt); - opal_fadump_update_config(fadump_conf, fdm); - } - -@@ -178,35 +179,38 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf, - static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm) - { - fdm->version = OPAL_FADUMP_VERSION; -- fdm->region_cnt = 0; -- fdm->registered_regions = 0; -- fdm->fadumphdr_addr = 0; -+ fdm->region_cnt = cpu_to_be16(0); -+ fdm->registered_regions = cpu_to_be16(0); -+ fdm->fadumphdr_addr = cpu_to_be64(0); - } - - static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf) - { - u64 addr = fadump_conf->reserve_dump_area_start; -+ u16 reg_cnt; - int i; - - opal_fdm = __va(fadump_conf->kernel_metadata); - opal_fadump_init_metadata(opal_fdm); - - /* Boot memory regions */ -+ reg_cnt = be16_to_cpu(opal_fdm->region_cnt); - for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) { -- opal_fdm->rgn[i].src = fadump_conf->boot_mem_addr[i]; -- opal_fdm->rgn[i].dest = addr; -- opal_fdm->rgn[i].size = fadump_conf->boot_mem_sz[i]; -+ opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]); -+ opal_fdm->rgn[i].dest = cpu_to_be64(addr); -+ opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]); - -- opal_fdm->region_cnt++; -+ reg_cnt++; - addr += fadump_conf->boot_mem_sz[i]; - } -+ opal_fdm->region_cnt = cpu_to_be16(reg_cnt); - - /* - * Kernel metadata is passed to f/w and retrieved in capture kerenl. - * So, use it to save fadump header address instead of calculating it. - */ -- opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest + -- fadump_conf->boot_memory_size); -+ opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) + -+ fadump_conf->boot_memory_size); - - opal_fadump_update_config(fadump_conf, opal_fdm); - -@@ -269,18 +273,21 @@ static u64 opal_fadump_get_bootmem_min(void) - static int opal_fadump_register(struct fw_dump *fadump_conf) - { - s64 rc = OPAL_PARAMETER; -+ u16 registered_regs; - int i, err = -EIO; - -- for (i = 0; i < opal_fdm->region_cnt; i++) { -+ registered_regs = be16_to_cpu(opal_fdm->registered_regions); -+ for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) { - rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE, -- opal_fdm->rgn[i].src, -- opal_fdm->rgn[i].dest, -- opal_fdm->rgn[i].size); -+ be64_to_cpu(opal_fdm->rgn[i].src), -+ be64_to_cpu(opal_fdm->rgn[i].dest), -+ be64_to_cpu(opal_fdm->rgn[i].size)); - if (rc != OPAL_SUCCESS) - break; - -- opal_fdm->registered_regions++; -+ registered_regs++; - } -+ opal_fdm->registered_regions = cpu_to_be16(registered_regs); - - switch (rc) { - case OPAL_SUCCESS: -@@ -291,7 +298,8 @@ static int opal_fadump_register(struct fw_dump *fadump_conf) - case OPAL_RESOURCE: - /* If MAX regions limit in f/w is hit, warn and proceed. */ - pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n", -- (opal_fdm->region_cnt - opal_fdm->registered_regions)); -+ (be16_to_cpu(opal_fdm->region_cnt) - -+ be16_to_cpu(opal_fdm->registered_regions))); - fadump_conf->dump_registered = 1; - err = 0; - break; -@@ -312,7 +320,7 @@ static int opal_fadump_register(struct fw_dump *fadump_conf) - * If some regions were registered before OPAL_MPIPL_ADD_RANGE - * OPAL call failed, unregister all regions. - */ -- if ((err < 0) && (opal_fdm->registered_regions > 0)) -+ if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0)) - opal_fadump_unregister(fadump_conf); - - return err; -@@ -328,7 +336,7 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf) - return -EIO; - } - -- opal_fdm->registered_regions = 0; -+ opal_fdm->registered_regions = cpu_to_be16(0); - fadump_conf->dump_registered = 0; - return 0; - } -@@ -563,19 +571,20 @@ static void opal_fadump_region_show(struct fw_dump *fadump_conf, - else - fdm_ptr = opal_fdm; - -- for (i = 0; i < fdm_ptr->region_cnt; i++) { -+ for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) { - /* - * Only regions that are registered for MPIPL - * would have dump data. - */ - if ((fadump_conf->dump_active) && -- (i < fdm_ptr->registered_regions)) -- dumped_bytes = fdm_ptr->rgn[i].size; -+ (i < be16_to_cpu(fdm_ptr->registered_regions))) -+ dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size); - - seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", -- fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest); -+ be64_to_cpu(fdm_ptr->rgn[i].src), -+ be64_to_cpu(fdm_ptr->rgn[i].dest)); - seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", -- fdm_ptr->rgn[i].size, dumped_bytes); -+ be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes); - } - - /* Dump is active. Show reserved area start address. */ -@@ -624,6 +633,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) - { - const __be32 *prop; - unsigned long dn; -+ __be64 be_addr; - u64 addr = 0; - int i, len; - s64 ret; -@@ -680,13 +690,13 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) - if (!prop) - return; - -- ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr); -- if ((ret != OPAL_SUCCESS) || !addr) { -+ ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr); -+ if ((ret != OPAL_SUCCESS) || !be_addr) { - pr_err("Failed to get Kernel metadata (%lld)\n", ret); - return; - } - -- addr = be64_to_cpu(addr); -+ addr = be64_to_cpu(be_addr); - pr_debug("Kernel metadata addr: %llx\n", addr); - - opal_fdm_active = __va(addr); -@@ -697,14 +707,14 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) - } - - /* Kernel regions not registered with f/w for MPIPL */ -- if (opal_fdm_active->registered_regions == 0) { -+ if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) { - opal_fdm_active = NULL; - return; - } - -- ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr); -- if (addr) { -- addr = be64_to_cpu(addr); -+ ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr); -+ if (be_addr) { -+ addr = be64_to_cpu(be_addr); - pr_debug("CPU metadata addr: %llx\n", addr); - opal_cpu_metadata = __va(addr); - } -diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h -index f1e9ecf548c5d..3f715efb0aa6e 100644 ---- a/arch/powerpc/platforms/powernv/opal-fadump.h -+++ b/arch/powerpc/platforms/powernv/opal-fadump.h -@@ -31,14 +31,14 @@ - * OPAL FADump kernel metadata - * - * The address of this structure will be registered with f/w for retrieving -- * and processing during crash dump. -+ * in the capture kernel to process the crash dump. - */ - struct opal_fadump_mem_struct { - u8 version; - u8 reserved[3]; -- u16 region_cnt; /* number of regions */ -- u16 registered_regions; /* Regions registered for MPIPL */ -- u64 fadumphdr_addr; -+ __be16 region_cnt; /* number of regions */ -+ __be16 registered_regions; /* Regions registered for MPIPL */ -+ __be64 fadumphdr_addr; - struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS]; - } __packed; - -@@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt, - for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) { - reg_entry = (struct hdat_fadump_reg_entry *)bufp; - val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) : -- reg_entry->reg_val); -+ (u64)(reg_entry->reg_val)); - opal_fadump_set_regval_regnum(regs, - be32_to_cpu(reg_entry->reg_type), - be32_to_cpu(reg_entry->reg_num), -diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c -index 1e5d51db40f84..5390c888db162 100644 ---- a/arch/powerpc/platforms/powernv/opal-lpc.c -+++ b/arch/powerpc/platforms/powernv/opal-lpc.c -@@ -396,6 +396,7 @@ void __init opal_lpc_init(void) - if (!of_get_property(np, "primary", NULL)) - continue; - opal_lpc_chip_id = of_get_ibm_chip_id(np); -+ of_node_put(np); - break; - } - if (opal_lpc_chip_id < 0) -diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c -index a191f4c60ce71..113bdb151f687 100644 ---- a/arch/powerpc/platforms/powernv/opal-prd.c -+++ b/arch/powerpc/platforms/powernv/opal-prd.c -@@ -369,6 +369,12 @@ static struct notifier_block opal_prd_event_nb = { - .priority = 0, - }; - -+static struct notifier_block opal_prd_event_nb2 = { -+ .notifier_call = opal_prd_msg_notifier, -+ .next = NULL, -+ .priority = 0, -+}; -+ - static int opal_prd_probe(struct platform_device *pdev) - { - int rc; -@@ -390,9 +396,10 @@ static int opal_prd_probe(struct platform_device *pdev) - return rc; - } - -- rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb); -+ rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb2); - if (rc) { - pr_err("Couldn't register PRD2 event notifier\n"); -+ opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb); - return rc; - } - -@@ -401,6 +408,8 @@ static int opal_prd_probe(struct platform_device *pdev) - pr_err("failed to register miscdev\n"); - opal_message_notifier_unregister(OPAL_MSG_PRD, - &opal_prd_event_nb); -+ opal_message_notifier_unregister(OPAL_MSG_PRD2, -+ &opal_prd_event_nb2); - return rc; - } - -@@ -411,6 +420,7 @@ static int opal_prd_remove(struct platform_device *pdev) - { - misc_deregister(&opal_prd_dev); - opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb); -+ opal_message_notifier_unregister(OPAL_MSG_PRD2, &opal_prd_event_nb2); - return 0; - } - -diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c -index e9d18519e650b..5178ec6f3715c 100644 ---- a/arch/powerpc/platforms/powernv/opal.c -+++ b/arch/powerpc/platforms/powernv/opal.c -@@ -892,6 +892,7 @@ static void opal_export_attrs(void) - kobj = kobject_create_and_add("exports", opal_kobj); - if (!kobj) { - pr_warn("kobject_create_and_add() of exports failed\n"); -+ of_node_put(np); - return; - } - -diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c -index 3dd35c327d1c5..624822a810193 100644 ---- a/arch/powerpc/platforms/powernv/pci-ioda.c -+++ b/arch/powerpc/platforms/powernv/pci-ioda.c -@@ -1618,6 +1618,7 @@ found: - tbl->it_ops = &pnv_ioda1_iommu_ops; - pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift; - pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; -+ tbl->it_index = (phb->hose->global_number << 16) | pe->pe_number; - if (!iommu_init_table(tbl, phb->hose->node, 0, 0)) - panic("Failed to initialize iommu table"); - -@@ -1788,6 +1789,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) - res_end = min(window_size, SZ_4G) >> tbl->it_page_shift; - } - -+ tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number; - if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end)) - rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); - else -diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c -index 28aac933a4391..e3e52ff2cbf58 100644 ---- a/arch/powerpc/platforms/powernv/pci-sriov.c -+++ b/arch/powerpc/platforms/powernv/pci-sriov.c -@@ -600,12 +600,12 @@ static void pnv_pci_sriov_disable(struct pci_dev *pdev) - struct pnv_iov_data *iov; - - iov = pnv_iov_get(pdev); -- num_vfs = iov->num_vfs; -- base_pe = iov->vf_pe_arr[0].pe_number; -- - if (WARN_ON(!iov)) - return; - -+ num_vfs = iov->num_vfs; -+ base_pe = iov->vf_pe_arr[0].pe_number; -+ - /* Release VF PEs */ - pnv_ioda_release_vf_PE(pdev); - -diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h -index 11df4e16a1cc3..528946ee7a777 100644 ---- a/arch/powerpc/platforms/powernv/powernv.h -+++ b/arch/powerpc/platforms/powernv/powernv.h -@@ -42,4 +42,6 @@ ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count); - u32 memcons_get_size(struct memcons *mc); - struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name); - -+void pnv_rng_init(void); -+ - #endif /* _POWERNV_H */ -diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c -index 72c25295c1c2b..5f81ff9b5265f 100644 ---- a/arch/powerpc/platforms/powernv/rng.c -+++ b/arch/powerpc/platforms/powernv/rng.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include "powernv.h" - - #define DARN_ERR 0xFFFFFFFFFFFFFFFFul - -@@ -28,22 +29,16 @@ struct powernv_rng { - - static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng); - -- --int powernv_hwrng_present(void) --{ -- struct powernv_rng *rng; -- -- rng = get_cpu_var(powernv_rng); -- put_cpu_var(rng); -- return rng != NULL; --} -- - static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val) - { - unsigned long parity; - - /* Calculate the parity of the value */ -- asm ("popcntd %0,%1" : "=r" (parity) : "r" (val)); -+ asm (".machine push; \ -+ .machine power7; \ -+ popcntd %0,%1; \ -+ .machine pop;" -+ : "=r" (parity) : "r" (val)); - - /* xor our value with the previous mask */ - val ^= rng->mask; -@@ -54,17 +49,6 @@ static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val) - return val; - } - --int powernv_get_random_real_mode(unsigned long *v) --{ -- struct powernv_rng *rng; -- -- rng = raw_cpu_read(powernv_rng); -- -- *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real)); -- -- return 1; --} -- - static int powernv_get_random_darn(unsigned long *v) - { - unsigned long val; -@@ -94,9 +78,6 @@ static int initialise_darn(void) - return 0; - } - } -- -- pr_warn("Unable to use DARN for get_random_seed()\n"); -- - return -EIO; - } - -@@ -104,12 +85,14 @@ int powernv_get_random_long(unsigned long *v) - { - struct powernv_rng *rng; - -- rng = get_cpu_var(powernv_rng); -- -- *v = rng_whiten(rng, in_be64(rng->regs)); -- -- put_cpu_var(rng); -- -+ if (mfmsr() & MSR_DR) { -+ rng = get_cpu_var(powernv_rng); -+ *v = rng_whiten(rng, in_be64(rng->regs)); -+ put_cpu_var(rng); -+ } else { -+ rng = raw_cpu_read(powernv_rng); -+ *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real)); -+ } - return 1; - } - EXPORT_SYMBOL_GPL(powernv_get_random_long); -@@ -159,32 +142,59 @@ static __init int rng_create(struct device_node *dn) - - rng_init_per_cpu(rng, dn); - -- pr_info_once("Registering arch random hook.\n"); -- - ppc_md.get_random_seed = powernv_get_random_long; - - return 0; - } - --static __init int rng_init(void) -+static int __init pnv_get_random_long_early(unsigned long *v) - { - struct device_node *dn; -- int rc; -- -- for_each_compatible_node(dn, NULL, "ibm,power-rng") { -- rc = rng_create(dn); -- if (rc) { -- pr_err("Failed creating rng for %pOF (%d).\n", -- dn, rc); -- continue; -- } - -- /* Create devices for hwrng driver */ -- of_platform_device_create(dn, NULL, NULL); -- } -+ if (!slab_is_available()) -+ return 0; -+ -+ if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early, -+ NULL) != pnv_get_random_long_early) -+ return 0; -+ -+ for_each_compatible_node(dn, NULL, "ibm,power-rng") -+ rng_create(dn); - -- initialise_darn(); -+ if (!ppc_md.get_random_seed) -+ return 0; -+ return ppc_md.get_random_seed(v); -+} -+ -+void __init pnv_rng_init(void) -+{ -+ struct device_node *dn; -+ -+ /* Prefer darn over the rest. */ -+ if (!initialise_darn()) -+ return; -+ -+ dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng"); -+ if (dn) -+ ppc_md.get_random_seed = pnv_get_random_long_early; -+ -+ of_node_put(dn); -+} -+ -+static int __init pnv_rng_late_init(void) -+{ -+ struct device_node *dn; -+ unsigned long v; -+ -+ /* In case it wasn't called during init for some other reason. */ -+ if (ppc_md.get_random_seed == pnv_get_random_long_early) -+ pnv_get_random_long_early(&v); -+ -+ if (ppc_md.get_random_seed == powernv_get_random_long) { -+ for_each_compatible_node(dn, NULL, "ibm,power-rng") -+ of_platform_device_create(dn, NULL, NULL); -+ } - - return 0; - } --machine_subsys_initcall(powernv, rng_init); -+machine_subsys_initcall(powernv, pnv_rng_late_init); -diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c -index a8db3f1530639..1b3c7e04a7af5 100644 ---- a/arch/powerpc/platforms/powernv/setup.c -+++ b/arch/powerpc/platforms/powernv/setup.c -@@ -190,6 +190,8 @@ static void __init pnv_setup_arch(void) - pnv_check_guarded_cores(); - - /* XXX PMCS */ -+ -+ pnv_rng_init(); - } - - static void __init pnv_init(void) -diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c -index e4a00ad06f9d3..67c8c4b2d8b17 100644 ---- a/arch/powerpc/platforms/powernv/ultravisor.c -+++ b/arch/powerpc/platforms/powernv/ultravisor.c -@@ -55,6 +55,7 @@ static int __init uv_init(void) - return -ENODEV; - - uv_memcons = memcons_init(node, "memcons"); -+ of_node_put(node); - if (!uv_memcons) - return -ENOENT; - -diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c -index a7aabc18039eb..c1bfad56447d4 100644 ---- a/arch/powerpc/platforms/powernv/vas-fault.c -+++ b/arch/powerpc/platforms/powernv/vas-fault.c -@@ -216,7 +216,7 @@ int vas_setup_fault_window(struct vas_instance *vinst) - vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT); - - attr.rx_fifo_size = vinst->fault_fifo_size; -- attr.rx_fifo = vinst->fault_fifo; -+ attr.rx_fifo = __pa(vinst->fault_fifo); - - /* - * Max creds is based on number of CRBs can fit in the FIFO. -diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c -index 0f8d39fbf2b21..b664838008c12 100644 ---- a/arch/powerpc/platforms/powernv/vas-window.c -+++ b/arch/powerpc/platforms/powernv/vas-window.c -@@ -404,7 +404,7 @@ static void init_winctx_regs(struct pnv_vas_window *window, - * - * See also: Design note in function header. - */ -- val = __pa(winctx->rx_fifo); -+ val = winctx->rx_fifo; - val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0); - write_hvwc_reg(window, VREG(LFIFO_BAR), val); - -@@ -739,7 +739,7 @@ static void init_winctx_for_rxwin(struct pnv_vas_window *rxwin, - */ - winctx->fifo_disable = true; - winctx->intr_disable = true; -- winctx->rx_fifo = NULL; -+ winctx->rx_fifo = 0; - } - - winctx->lnotify_lpid = rxattr->lnotify_lpid; -@@ -1310,8 +1310,8 @@ int vas_win_close(struct vas_window *vwin) - /* if send window, drop reference to matching receive window */ - if (window->tx_win) { - if (window->user_win) { -- put_vas_user_win_ref(&vwin->task_ref); - mm_context_remove_vas_window(vwin->task_ref.mm); -+ put_vas_user_win_ref(&vwin->task_ref); - } - put_rx_win(window->rxwin); - } -diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h -index 8bb08e395de05..08d9d3d5a22b0 100644 ---- a/arch/powerpc/platforms/powernv/vas.h -+++ b/arch/powerpc/platforms/powernv/vas.h -@@ -376,7 +376,7 @@ struct pnv_vas_window { - * is a container for the register fields in the window context. - */ - struct vas_winctx { -- void *rx_fifo; -+ u64 rx_fifo; - int rx_fifo_size; - int wcreds_max; - int rsvd_txbuf_count; -diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c -index 09fafcf2d3a06..f51fd4ac3f0b6 100644 ---- a/arch/powerpc/platforms/pseries/eeh_pseries.c -+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c -@@ -845,18 +845,8 @@ static int __init eeh_pseries_init(void) - return -EINVAL; - } - -- /* Initialize error log lock and size */ -- spin_lock_init(&slot_errbuf_lock); -- eeh_error_buf_size = rtas_token("rtas-error-log-max"); -- if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { -- pr_info("%s: unknown EEH error log size\n", -- __func__); -- eeh_error_buf_size = 1024; -- } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { -- pr_info("%s: EEH error log size %d exceeds the maximal %d\n", -- __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); -- eeh_error_buf_size = RTAS_ERROR_LOG_MAX; -- } -+ /* Initialize error log size */ -+ eeh_error_buf_size = rtas_get_error_log_max(); - - /* Set EEH probe mode */ - eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); -diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c -index a52af8fbf5711..ec5d84b4958c5 100644 ---- a/arch/powerpc/platforms/pseries/iommu.c -+++ b/arch/powerpc/platforms/pseries/iommu.c -@@ -85,19 +85,24 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node) - static void iommu_pseries_free_group(struct iommu_table_group *table_group, - const char *node_name) - { -- struct iommu_table *tbl; -- - if (!table_group) - return; - -- tbl = table_group->tables[0]; - #ifdef CONFIG_IOMMU_API - if (table_group->group) { - iommu_group_put(table_group->group); - BUG_ON(table_group->group); - } - #endif -- iommu_tce_table_put(tbl); -+ -+ /* Default DMA window table is at index 0, while DDW at 1. SR-IOV -+ * adapters only have table on index 1. -+ */ -+ if (table_group->tables[0]) -+ iommu_tce_table_put(table_group->tables[0]); -+ -+ if (table_group->tables[1]) -+ iommu_tce_table_put(table_group->tables[1]); - - kfree(table_group); - } -@@ -306,13 +311,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, - static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) - { - u64 rc; -+ long rpages = npages; -+ unsigned long limit; - - if (!firmware_has_feature(FW_FEATURE_STUFF_TCE)) - return tce_free_pSeriesLP(tbl->it_index, tcenum, - tbl->it_page_shift, npages); - -- rc = plpar_tce_stuff((u64)tbl->it_index, -- (u64)tcenum << tbl->it_page_shift, 0, npages); -+ do { -+ limit = min_t(unsigned long, rpages, 512); -+ -+ rc = plpar_tce_stuff((u64)tbl->it_index, -+ (u64)tcenum << tbl->it_page_shift, 0, limit); -+ -+ rpages -= limit; -+ tcenum += limit; -+ } while (rpages > 0 && !rc); - - if (rc && printk_ratelimit()) { - printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); -@@ -1092,15 +1106,6 @@ static phys_addr_t ddw_memory_hotplug_max(void) - phys_addr_t max_addr = memory_hotplug_max(); - struct device_node *memory; - -- /* -- * The "ibm,pmemory" can appear anywhere in the address space. -- * Assuming it is still backed by page structs, set the upper limit -- * for the huge DMA window as MAX_PHYSMEM_BITS. -- */ -- if (of_find_node_by_type(NULL, "ibm,pmemory")) -- return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ? -- (phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS); -- - for_each_node_by_type(memory, "memory") { - unsigned long start, size; - int n_mem_addr_cells, n_mem_size_cells, len; -@@ -1365,8 +1370,10 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) - len = order_base_2(query.largest_available_block << page_shift); - win_name = DMA64_PROPNAME; - } else { -- direct_mapping = true; -- win_name = DIRECT64_PROPNAME; -+ direct_mapping = !default_win_removed || -+ (len == MAX_PHYSMEM_BITS) || -+ (!pmem_present && (len == max_ram_len)); -+ win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME; - } - - ret = create_ddw(dev, ddw_avail, &create, page_shift, len); -diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c -index 3df6bdfea475a..d133597a84ca0 100644 ---- a/arch/powerpc/platforms/pseries/lpar.c -+++ b/arch/powerpc/platforms/pseries/lpar.c -@@ -638,16 +638,8 @@ static const struct proc_ops vcpudispatch_stats_freq_proc_ops = { - - static int __init vcpudispatch_stats_procfs_init(void) - { -- /* -- * Avoid smp_processor_id while preemptible. All CPUs should have -- * the same value for lppaca_shared_proc. -- */ -- preempt_disable(); -- if (!lppaca_shared_proc(get_lppaca())) { -- preempt_enable(); -+ if (!lppaca_shared_proc()) - return 0; -- } -- preempt_enable(); - - if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL, - &vcpudispatch_stats_proc_ops)) -diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c -index f71eac74ea92a..19503a8797823 100644 ---- a/arch/powerpc/platforms/pseries/lparcfg.c -+++ b/arch/powerpc/platforms/pseries/lparcfg.c -@@ -205,7 +205,7 @@ static void parse_ppp_data(struct seq_file *m) - ppp_data.active_system_procs); - - /* pool related entries are appropriate for shared configs */ -- if (lppaca_shared_proc(get_lppaca())) { -+ if (lppaca_shared_proc()) { - unsigned long pool_idle_time, pool_procs; - - seq_printf(m, "pool=%d\n", ppp_data.pool_num); -@@ -529,7 +529,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) - partition_potential_processors); - - seq_printf(m, "shared_processor_mode=%d\n", -- lppaca_shared_proc(get_lppaca())); -+ lppaca_shared_proc()); - - #ifdef CONFIG_PPC_BOOK3S_64 - seq_printf(m, "slb_size=%d\n", mmu_slb_size); -diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c -index e83e0891272d3..210a37a065fb7 100644 ---- a/arch/powerpc/platforms/pseries/mobility.c -+++ b/arch/powerpc/platforms/pseries/mobility.c -@@ -63,6 +63,27 @@ static int mobility_rtas_call(int token, char *buf, s32 scope) - - static int delete_dt_node(struct device_node *dn) - { -+ struct device_node *pdn; -+ bool is_platfac; -+ -+ pdn = of_get_parent(dn); -+ is_platfac = of_node_is_type(dn, "ibm,platform-facilities") || -+ of_node_is_type(pdn, "ibm,platform-facilities"); -+ of_node_put(pdn); -+ -+ /* -+ * The drivers that bind to nodes in the platform-facilities -+ * hierarchy don't support node removal, and the removal directive -+ * from firmware is always followed by an add of an equivalent -+ * node. The capability (e.g. RNG, encryption, compression) -+ * represented by the node is never interrupted by the migration. -+ * So ignore changes to this part of the tree. -+ */ -+ if (is_platfac) { -+ pr_notice("ignoring remove operation for %pOFfp\n", dn); -+ return 0; -+ } -+ - pr_debug("removing node %pOFfp\n", dn); - dlpar_detach_node(dn); - return 0; -@@ -222,6 +243,19 @@ static int add_dt_node(struct device_node *parent_dn, __be32 drc_index) - if (!dn) - return -ENOENT; - -+ /* -+ * Since delete_dt_node() ignores this node type, this is the -+ * necessary counterpart. We also know that a platform-facilities -+ * node returned from dlpar_configure_connector() has children -+ * attached, and dlpar_attach_node() only adds the parent, leaking -+ * the children. So ignore these on the add side for now. -+ */ -+ if (of_node_is_type(dn, "ibm,platform-facilities")) { -+ pr_notice("ignoring add operation for %pOF\n", dn); -+ dlpar_free_cc_nodes(dn); -+ return 0; -+ } -+ - rc = dlpar_attach_node(dn, parent_dn); - if (rc) - dlpar_free_cc_nodes(dn); -diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c -index f48e87ac89c9b..3cfcc748052e9 100644 ---- a/arch/powerpc/platforms/pseries/papr_scm.c -+++ b/arch/powerpc/platforms/pseries/papr_scm.c -@@ -1159,6 +1159,13 @@ static int papr_scm_probe(struct platform_device *pdev) - return -ENODEV; - } - -+ /* -+ * open firmware platform device create won't update the NUMA -+ * distance table. For PAPR SCM devices we use numa_map_to_online_node() -+ * to find the nearest online NUMA node and that requires correct -+ * distance table information. -+ */ -+ update_numa_distance(dn); - - p = kzalloc(sizeof(*p), GFP_KERNEL); - if (!p) -diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c -index 90c9d3531694b..4ba8245681192 100644 ---- a/arch/powerpc/platforms/pseries/pci_dlpar.c -+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c -@@ -78,6 +78,9 @@ int remove_phb_dynamic(struct pci_controller *phb) - - pseries_msi_free_domains(phb); - -+ /* Keep a reference so phb isn't freed yet */ -+ get_device(&host_bridge->dev); -+ - /* Remove the PCI bus and unregister the bridge device from sysfs */ - phb->bus = NULL; - pci_remove_bus(b); -@@ -101,6 +104,7 @@ int remove_phb_dynamic(struct pci_controller *phb) - * the pcibios_free_controller_deferred() callback; - * see pseries_root_bridge_prepare(). - */ -+ put_device(&host_bridge->dev); - - return 0; - } -diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h -index 3544778e06d01..2a97cc20fe8fe 100644 ---- a/arch/powerpc/platforms/pseries/pseries.h -+++ b/arch/powerpc/platforms/pseries/pseries.h -@@ -115,4 +115,6 @@ extern u32 pseries_security_flavor; - void pseries_setup_security_mitigations(void); - void pseries_lpar_read_hblkrm_characteristics(void); - -+void pseries_rng_init(void); -+ - #endif /* _PSERIES_PSERIES_H */ -diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c -index 6268545947b83..6ddfdeaace9ef 100644 ---- a/arch/powerpc/platforms/pseries/rng.c -+++ b/arch/powerpc/platforms/pseries/rng.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include "pseries.h" - - - static int pseries_get_random_long(unsigned long *v) -@@ -24,19 +25,13 @@ static int pseries_get_random_long(unsigned long *v) - return 0; - } - --static __init int rng_init(void) -+void __init pseries_rng_init(void) - { - struct device_node *dn; - - dn = of_find_compatible_node(NULL, NULL, "ibm,random"); - if (!dn) -- return -ENODEV; -- -- pr_info("Registering arch random hook.\n"); -- -+ return; - ppc_md.get_random_seed = pseries_get_random_long; -- - of_node_put(dn); -- return 0; - } --machine_subsys_initcall(pseries, rng_init); -diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c -index f79126f16258a..d25053755c8b8 100644 ---- a/arch/powerpc/platforms/pseries/setup.c -+++ b/arch/powerpc/platforms/pseries/setup.c -@@ -816,7 +816,7 @@ static void __init pSeries_setup_arch(void) - if (firmware_has_feature(FW_FEATURE_LPAR)) { - vpa_init(boot_cpuid); - -- if (lppaca_shared_proc(get_lppaca())) { -+ if (lppaca_shared_proc()) { - static_branch_enable(&shared_processor); - pv_spinlocks_init(); - } -@@ -840,6 +840,8 @@ static void __init pSeries_setup_arch(void) - - if (swiotlb_force == SWIOTLB_FORCE) - ppc_swiotlb_enable = 1; -+ -+ pseries_rng_init(); - } - - static void pseries_panic(char *str) -diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c -index b043e3936d215..b54f6fc27896f 100644 ---- a/arch/powerpc/platforms/pseries/vas.c -+++ b/arch/powerpc/platforms/pseries/vas.c -@@ -324,7 +324,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, - * So no unpacking needs to be done. - */ - rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain, -- VPHN_FLAG_VCPU, smp_processor_id()); -+ VPHN_FLAG_VCPU, hard_smp_processor_id()); - if (rc != H_SUCCESS) { - pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc); - goto out; -@@ -441,8 +441,8 @@ static int vas_deallocate_window(struct vas_window *vwin) - atomic_dec(&caps->used_lpar_creds); - mutex_unlock(&vas_pseries_mutex); - -- put_vas_user_win_ref(&vwin->task_ref); - mm_context_remove_vas_window(vwin->task_ref.mm); -+ put_vas_user_win_ref(&vwin->task_ref); - - kfree(win); - return 0; -diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile -index 348f595810523..d08239ae2bcd2 100644 ---- a/arch/powerpc/purgatory/Makefile -+++ b/arch/powerpc/purgatory/Makefile -@@ -4,6 +4,11 @@ KASAN_SANITIZE := n - - targets += trampoline_$(BITS).o purgatory.ro kexec-purgatory.c - -+# When profile-guided optimization is enabled, llvm emits two different -+# overlapping text sections, which is not supported by kexec. Remove profile -+# optimization flags. -+KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS)) -+ - LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined - - $(obj)/purgatory.ro: $(obj)/trampoline_$(BITS).o FORCE -diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c -index 1d33b7a5ea832..dc774b204c061 100644 ---- a/arch/powerpc/sysdev/dart_iommu.c -+++ b/arch/powerpc/sysdev/dart_iommu.c -@@ -404,9 +404,10 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) - } - - /* Initialize the DART HW */ -- if (dart_init(dn) != 0) -+ if (dart_init(dn) != 0) { -+ of_node_put(dn); - return; -- -+ } - /* - * U4 supports a DART bypass, we use it for 64-bit capable devices to - * improve performance. However, that only works for devices connected -@@ -419,6 +420,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) - - /* Setup pci_dma ops */ - set_pci_dma_ops(&dma_iommu_ops); -+ of_node_put(dn); - } - - #ifdef CONFIG_PM -diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S -index efeeb1b885a17..329b9c4ae5429 100644 ---- a/arch/powerpc/sysdev/dcr-low.S -+++ b/arch/powerpc/sysdev/dcr-low.S -@@ -11,7 +11,7 @@ - #include - - #define DCR_ACCESS_PROLOG(table) \ -- cmpli cr0,r3,1024; \ -+ cmplwi cr0,r3,1024; \ - rlwinm r3,r3,4,18,27; \ - lis r5,table@h; \ - ori r5,r5,table@l; \ -diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c -index 8963eaffb1b7b..39186ad6b3c3a 100644 ---- a/arch/powerpc/sysdev/fsl_gtm.c -+++ b/arch/powerpc/sysdev/fsl_gtm.c -@@ -86,7 +86,7 @@ static LIST_HEAD(gtms); - */ - struct gtm_timer *gtm_get_timer16(void) - { -- struct gtm *gtm = NULL; -+ struct gtm *gtm; - int i; - - list_for_each_entry(gtm, >ms, list_node) { -@@ -103,7 +103,7 @@ struct gtm_timer *gtm_get_timer16(void) - spin_unlock_irq(>m->lock); - } - -- if (gtm) -+ if (!list_empty(>ms)) - return ERR_PTR(-EBUSY); - return ERR_PTR(-ENODEV); - } -diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c -index e6b06c3f81973..c55ccec0a1690 100644 ---- a/arch/powerpc/sysdev/fsl_msi.c -+++ b/arch/powerpc/sysdev/fsl_msi.c -@@ -211,8 +211,10 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - dev_err(&pdev->dev, - "node %pOF has an invalid fsl,msi phandle %u\n", - hose->dn, np->phandle); -+ of_node_put(np); - return -EINVAL; - } -+ of_node_put(np); - } - - for_each_pci_msi_entry(entry, pdev) { -diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c -index b8f76f3fd9941..a14a88e5025e5 100644 ---- a/arch/powerpc/sysdev/fsl_pci.c -+++ b/arch/powerpc/sysdev/fsl_pci.c -@@ -520,6 +520,7 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary) - struct resource rsrc; - const int *bus_range; - u8 hdr_type, progif; -+ u32 class_code; - struct device_node *dev; - struct ccsr_pci __iomem *pci; - u16 temp; -@@ -593,6 +594,13 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary) - PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS; - if (fsl_pcie_check_link(hose)) - hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; -+ /* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */ -+ if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) { -+ early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code); -+ class_code &= 0xff; -+ class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; -+ early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code); -+ } - } else { - /* - * Set PBFR(PCI Bus Function Register)[10] = 1 to -diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h -index 1d7a412056959..5ffaa60f1fa09 100644 ---- a/arch/powerpc/sysdev/fsl_pci.h -+++ b/arch/powerpc/sysdev/fsl_pci.h -@@ -18,6 +18,7 @@ struct platform_device; - - #define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */ - #define PCIE_LTSSM_L0 0x16 /* L0 state */ -+#define PCIE_FSL_CSR_CLASSCODE 0x474 /* FSL GPEX CSR */ - #define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */ - #define PCIE_IP_REV_3_0 0x02080300 /* PCIE IP block version Rev3.0 */ - #define PIWAR_EN 0x80000000 /* Enable */ -diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c -index ff7906b48ca1e..1bfc9afa8a1a1 100644 ---- a/arch/powerpc/sysdev/fsl_rio.c -+++ b/arch/powerpc/sysdev/fsl_rio.c -@@ -505,8 +505,10 @@ int fsl_rio_setup(struct platform_device *dev) - if (rc) { - dev_err(&dev->dev, "Can't get %pOF property 'reg'\n", - rmu_node); -+ of_node_put(rmu_node); - goto err_rmu; - } -+ of_node_put(rmu_node); - rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs)); - if (!rmu_regs_win) { - dev_err(&dev->dev, "Unable to map rmu register window\n"); -diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c -index 042bb38fa5c24..a06297aa3f1be 100644 ---- a/arch/powerpc/sysdev/tsi108_pci.c -+++ b/arch/powerpc/sysdev/tsi108_pci.c -@@ -216,9 +216,8 @@ int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary) - - (hose)->ops = &tsi108_direct_pci_ops; - -- printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. " -- "Firmware bus number: %d->%d\n", -- rsrc.start, hose->first_busno, hose->last_busno); -+ pr_info("Found tsi108 PCI host bridge at 0x%pa. Firmware bus number: %d->%d\n", -+ &rsrc.start, hose->first_busno, hose->last_busno); - - /* Interpret the "ranges" property */ - /* This also maps the I/O region and sets isa_io/mem_base */ -diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c -index 675d708863d57..db0452e7c3515 100644 ---- a/arch/powerpc/sysdev/xics/icp-opal.c -+++ b/arch/powerpc/sysdev/xics/icp-opal.c -@@ -196,6 +196,7 @@ int icp_opal_init(void) - - printk("XICS: Using OPAL ICP fallbacks\n"); - -+ of_node_put(np); - return 0; - } - -diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c -index b9da317b7a2d7..4533d4a46ece2 100644 ---- a/arch/powerpc/sysdev/xics/ics-rtas.c -+++ b/arch/powerpc/sysdev/xics/ics-rtas.c -@@ -37,8 +37,8 @@ static void ics_rtas_unmask_irq(struct irq_data *d) - - server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); - -- call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, -- server, DEFAULT_PRIORITY); -+ call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, -+ DEFAULT_PRIORITY); - if (call_status != 0) { - printk(KERN_ERR - "%s: ibm_set_xive irq %u server %x returned %d\n", -@@ -47,7 +47,7 @@ static void ics_rtas_unmask_irq(struct irq_data *d) - } - - /* Now unmask the interrupt (often a no-op) */ -- call_status = rtas_call_reentrant(ibm_int_on, 1, 1, NULL, hw_irq); -+ call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); - if (call_status != 0) { - printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", - __func__, hw_irq, call_status); -@@ -69,7 +69,7 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) - if (hw_irq == XICS_IPI) - return; - -- call_status = rtas_call_reentrant(ibm_int_off, 1, 1, NULL, hw_irq); -+ call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); - if (call_status != 0) { - printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", - __func__, hw_irq, call_status); -@@ -77,8 +77,8 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) - } - - /* Have to set XIVE to 0xff to be able to remove a slot */ -- call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, -- xics_default_server, 0xff); -+ call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, -+ xics_default_server, 0xff); - if (call_status != 0) { - printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", - __func__, hw_irq, call_status); -@@ -109,7 +109,7 @@ static int ics_rtas_set_affinity(struct irq_data *d, - if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) - return -1; - -- status = rtas_call_reentrant(ibm_get_xive, 1, 3, xics_status, hw_irq); -+ status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); - - if (status) { - printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", -@@ -127,8 +127,8 @@ static int ics_rtas_set_affinity(struct irq_data *d, - pr_debug("%s: irq %d [hw 0x%x] server: 0x%x\n", __func__, d->irq, - hw_irq, irq_server); - -- status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, -- hw_irq, irq_server, xics_status[1]); -+ status = rtas_call(ibm_set_xive, 3, 1, NULL, -+ hw_irq, irq_server, xics_status[1]); - - if (status) { - printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", -@@ -159,7 +159,7 @@ static int ics_rtas_check(struct ics *ics, unsigned int hw_irq) - return -EINVAL; - - /* Check if RTAS knows about this interrupt */ -- rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, hw_irq); -+ rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); - if (rc) - return -ENXIO; - -@@ -175,7 +175,7 @@ static long ics_rtas_get_server(struct ics *ics, unsigned long vec) - { - int rc, status[2]; - -- rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, vec); -+ rc = rtas_call(ibm_get_xive, 1, 3, status, vec); - if (rc) - return -1; - return status[0]; -diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig -index 97796c6b63f04..785c292d104b7 100644 ---- a/arch/powerpc/sysdev/xive/Kconfig -+++ b/arch/powerpc/sysdev/xive/Kconfig -@@ -3,7 +3,6 @@ config PPC_XIVE - bool - select PPC_SMP_MUXED_IPI - select HARDIRQS_SW_RESEND -- select IRQ_DOMAIN_NOMAP - - config PPC_XIVE_NATIVE - bool -diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c -index c5d75c02ad8b5..7b69299c29123 100644 ---- a/arch/powerpc/sysdev/xive/common.c -+++ b/arch/powerpc/sysdev/xive/common.c -@@ -1443,8 +1443,7 @@ static const struct irq_domain_ops xive_irq_domain_ops = { - - static void __init xive_init_host(struct device_node *np) - { -- xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ, -- &xive_irq_domain_ops, NULL); -+ xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL); - if (WARN_ON(xive_irq_domain == NULL)) - return; - irq_set_default_host(xive_irq_domain); -diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c -index f143b6f111ac0..43bd2579d942b 100644 ---- a/arch/powerpc/sysdev/xive/spapr.c -+++ b/arch/powerpc/sysdev/xive/spapr.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -55,7 +56,7 @@ static int xive_irq_bitmap_add(int base, int count) - spin_lock_init(&xibm->lock); - xibm->base = base; - xibm->count = count; -- xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL); -+ xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL); - if (!xibm->bitmap) { - kfree(xibm); - return -ENOMEM; -@@ -67,6 +68,17 @@ static int xive_irq_bitmap_add(int base, int count) - return 0; - } - -+static void xive_irq_bitmap_remove_all(void) -+{ -+ struct xive_irq_bitmap *xibm, *tmp; -+ -+ list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) { -+ list_del(&xibm->list); -+ bitmap_free(xibm->bitmap); -+ kfree(xibm); -+ } -+} -+ - static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm) - { - int irq; -@@ -425,6 +437,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) - - data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); - if (!data->trig_mmio) { -+ iounmap(data->eoi_mmio); - pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); - return -ENOMEM; - } -@@ -653,6 +666,9 @@ static int xive_spapr_debug_show(struct seq_file *m, void *private) - struct xive_irq_bitmap *xibm; - char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); - -+ if (!buf) -+ return -ENOMEM; -+ - list_for_each_entry(xibm, &xive_irq_bitmaps, list) { - memset(buf, 0, PAGE_SIZE); - bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count); -@@ -701,6 +717,7 @@ static bool xive_get_max_prio(u8 *max_prio) - } - - reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len); -+ of_node_put(rootdn); - if (!reg) { - pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n"); - return false; -@@ -800,7 +817,7 @@ bool __init xive_spapr_init(void) - u32 val; - u32 len; - const __be32 *reg; -- int i; -+ int i, err; - - if (xive_spapr_disabled()) - return false; -@@ -816,32 +833,35 @@ bool __init xive_spapr_init(void) - /* Resource 1 is the OS ring TIMA */ - if (of_address_to_resource(np, 1, &r)) { - pr_err("Failed to get thread mgmnt area resource\n"); -- return false; -+ goto err_put; - } - tima = ioremap(r.start, resource_size(&r)); - if (!tima) { - pr_err("Failed to map thread mgmnt area\n"); -- return false; -+ goto err_put; - } - - if (!xive_get_max_prio(&max_prio)) -- return false; -+ goto err_unmap; - - /* Feed the IRQ number allocator with the ranges given in the DT */ - reg = of_get_property(np, "ibm,xive-lisn-ranges", &len); - if (!reg) { - pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n"); -- return false; -+ goto err_unmap; - } - - if (len % (2 * sizeof(u32)) != 0) { - pr_err("invalid 'ibm,xive-lisn-ranges' property\n"); -- return false; -+ goto err_unmap; - } - -- for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) -- xive_irq_bitmap_add(be32_to_cpu(reg[0]), -- be32_to_cpu(reg[1])); -+ for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) { -+ err = xive_irq_bitmap_add(be32_to_cpu(reg[0]), -+ be32_to_cpu(reg[1])); -+ if (err < 0) -+ goto err_mem_free; -+ } - - /* Iterate the EQ sizes and pick one */ - of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) { -@@ -852,10 +872,19 @@ bool __init xive_spapr_init(void) - - /* Initialize XIVE core with our backend */ - if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio)) -- return false; -+ goto err_mem_free; - -+ of_node_put(np); - pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); - return true; -+ -+err_mem_free: -+ xive_irq_bitmap_remove_all(); -+err_unmap: -+ iounmap(tima); -+err_put: -+ of_node_put(np); -+ return false; - } - - machine_arch_initcall(pseries, xive_core_debug_init); -diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh -index 014e00e74d2b6..63792af004170 100755 ---- a/arch/powerpc/tools/relocs_check.sh -+++ b/arch/powerpc/tools/relocs_check.sh -@@ -39,6 +39,7 @@ $objdump -R "$vmlinux" | - # R_PPC_NONE - grep -F -w -v 'R_PPC64_RELATIVE - R_PPC64_NONE -+R_PPC64_UADDR64 - R_PPC_ADDR16_LO - R_PPC_ADDR16_HI - R_PPC_ADDR16_HA -@@ -54,9 +55,3 @@ fi - num_bad=$(echo "$bad_relocs" | wc -l) - echo "WARNING: $num_bad bad relocations" - echo "$bad_relocs" -- --# If we see this type of relocation it's an idication that --# we /may/ be using an old version of binutils. --if echo "$bad_relocs" | grep -q -F -w R_PPC64_UADDR64; then -- echo "WARNING: You need at least binutils >= 2.19 to build a CONFIG_RELOCATABLE kernel" --fi -diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c -index dd8241c009e53..8b5277c3b1476 100644 ---- a/arch/powerpc/xmon/xmon.c -+++ b/arch/powerpc/xmon/xmon.c -@@ -59,6 +59,7 @@ - #ifdef CONFIG_PPC64 - #include - #include -+#include - #endif - - #include "nonstdio.h" -@@ -1528,9 +1529,9 @@ bpt_cmds(void) - cmd = inchar(); - - switch (cmd) { -- static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; -- int mode; -- case 'd': /* bd - hardware data breakpoint */ -+ case 'd': { /* bd - hardware data breakpoint */ -+ static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; -+ int mode; - if (xmon_is_ro) { - printf(xmon_ro_msg); - break; -@@ -1563,6 +1564,7 @@ bpt_cmds(void) - - force_enable_xmon(); - break; -+ } - - case 'i': /* bi - hardware instr breakpoint */ - if (xmon_is_ro) { -@@ -3264,8 +3266,7 @@ static void show_task(struct task_struct *volatile tsk) - * appropriate for calling from xmon. This could be moved - * to a common, generic, routine used by both. - */ -- state = (p_state == 0) ? 'R' : -- (p_state < 0) ? 'U' : -+ state = (p_state == TASK_RUNNING) ? 'R' : - (p_state & TASK_UNINTERRUPTIBLE) ? 'D' : - (p_state & TASK_STOPPED) ? 'T' : - (p_state & TASK_TRACED) ? 'C' : -diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig -index f076cee11af69..8dd7f01ee031d 100644 ---- a/arch/riscv/Kconfig -+++ b/arch/riscv/Kconfig -@@ -23,6 +23,7 @@ config RISCV - select ARCH_HAS_GIGANTIC_PAGE - select ARCH_HAS_KCOV - select ARCH_HAS_MMIOWB -+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE - select ARCH_HAS_PTE_SPECIAL - select ARCH_HAS_SET_DIRECT_MAP if MMU - select ARCH_HAS_SET_MEMORY if MMU -@@ -46,7 +47,7 @@ config RISCV - select CLINT_TIMER if !MMU - select COMMON_CLK - select EDAC_SUPPORT -- select GENERIC_ARCH_TOPOLOGY if SMP -+ select GENERIC_ARCH_TOPOLOGY - select GENERIC_ATOMIC64 if !64BIT - select GENERIC_CLOCKEVENTS_BROADCAST if SMP - select GENERIC_EARLY_IOREMAP -@@ -158,10 +159,9 @@ config PA_BITS - - config PAGE_OFFSET - hex -- default 0xC0000000 if 32BIT && MAXPHYSMEM_1GB -+ default 0xC0000000 if 32BIT - default 0x80000000 if 64BIT && !MMU -- default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB -- default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB -+ default 0xffffffe000000000 if 64BIT - - config KASAN_SHADOW_OFFSET - hex -@@ -270,24 +270,6 @@ config MODULE_SECTIONS - bool - select HAVE_MOD_ARCH_SPECIFIC - --choice -- prompt "Maximum Physical Memory" -- default MAXPHYSMEM_1GB if 32BIT -- default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW -- default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY -- -- config MAXPHYSMEM_1GB -- depends on 32BIT -- bool "1GiB" -- config MAXPHYSMEM_2GB -- depends on 64BIT && CMODEL_MEDLOW -- bool "2GiB" -- config MAXPHYSMEM_128GB -- depends on 64BIT && CMODEL_MEDANY -- bool "128GiB" --endchoice -- -- - config SMP - bool "Symmetric Multi-Processing" - help -@@ -380,6 +362,28 @@ config RISCV_BASE_PMU - - endmenu - -+config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI -+ def_bool y -+ # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc -+ depends on AS_IS_GNU && AS_VERSION >= 23800 -+ help -+ Newer binutils versions default to ISA spec version 20191213 which -+ moves some instructions from the I extension to the Zicsr and Zifencei -+ extensions. -+ -+config TOOLCHAIN_NEEDS_OLD_ISA_SPEC -+ def_bool y -+ depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI -+ # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16 -+ depends on CC_IS_CLANG && CLANG_VERSION < 170000 -+ help -+ Certain versions of clang do not support zicsr and zifencei via -march -+ but newer versions of binutils require it for the reasons noted in the -+ help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This -+ option causes an older ISA spec compatible with these older versions -+ of clang to be passed to GAS, which has the same result as passing zicsr -+ and zifencei to -march. -+ - config FPU - bool "FPU support" - default y -diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas -index b44d6ecdb46e5..0aacd7052585b 100644 ---- a/arch/riscv/Kconfig.erratas -+++ b/arch/riscv/Kconfig.erratas -@@ -2,6 +2,7 @@ menu "CPU errata selection" - - config RISCV_ERRATA_ALTERNATIVE - bool "RISC-V alternative scheme" -+ depends on !XIP_KERNEL - default y - help - This Kconfig allows the kernel to automatically patch the -diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs -index 30676ebb16ebd..46a534f047931 100644 ---- a/arch/riscv/Kconfig.socs -+++ b/arch/riscv/Kconfig.socs -@@ -14,8 +14,8 @@ config SOC_SIFIVE - select CLK_SIFIVE - select CLK_SIFIVE_PRCI - select SIFIVE_PLIC -- select RISCV_ERRATA_ALTERNATIVE -- select ERRATA_SIFIVE -+ select RISCV_ERRATA_ALTERNATIVE if !XIP_KERNEL -+ select ERRATA_SIFIVE if !XIP_KERNEL - help - This enables support for SiFive SoC platform hardware. - -diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile -index 0eb4568fbd290..0f17c6b6b7294 100644 ---- a/arch/riscv/Makefile -+++ b/arch/riscv/Makefile -@@ -13,7 +13,11 @@ LDFLAGS_vmlinux := - ifeq ($(CONFIG_DYNAMIC_FTRACE),y) - LDFLAGS_vmlinux := --no-relax - KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY -- CC_FLAGS_FTRACE := -fpatchable-function-entry=8 -+ifeq ($(CONFIG_RISCV_ISA_C),y) -+ CC_FLAGS_FTRACE := -fpatchable-function-entry=4 -+else -+ CC_FLAGS_FTRACE := -fpatchable-function-entry=2 -+endif - endif - - ifeq ($(CONFIG_CMODEL_MEDLOW),y) -@@ -39,6 +43,7 @@ else - endif - - ifeq ($(CONFIG_LD_IS_LLD),y) -+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0) - KBUILD_CFLAGS += -mno-relax - KBUILD_AFLAGS += -mno-relax - ifndef CONFIG_AS_IS_LLVM -@@ -46,12 +51,21 @@ ifndef CONFIG_AS_IS_LLVM - KBUILD_AFLAGS += -Wa,-mno-relax - endif - endif -+endif - - # ISA string setting - riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima - riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima - riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd - riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c -+ -+ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC -+KBUILD_CFLAGS += -Wa,-misa-spec=2.2 -+KBUILD_AFLAGS += -Wa,-misa-spec=2.2 -+else -+riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei -+endif -+ - KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y)) - KBUILD_AFLAGS += -march=$(riscv-march-y) - -@@ -68,7 +82,11 @@ ifeq ($(CONFIG_PERF_EVENTS),y) - KBUILD_CFLAGS += -fno-omit-frame-pointer - endif - -+# Avoid generating .eh_frame sections. -+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables -+ - KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) -+KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) - - # GCC versions that support the "-mstrict-align" option default to allowing - # unaligned accesses. While unaligned accesses are explicitly allowed in the -@@ -108,11 +126,13 @@ PHONY += vdso_install - vdso_install: - $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ - -+ifeq ($(KBUILD_EXTMOD),) - ifeq ($(CONFIG_MMU),y) - prepare: vdso_prepare - vdso_prepare: prepare0 - $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h - endif -+endif - - ifneq ($(CONFIG_XIP_KERNEL),y) - ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy) -diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi -index 5e8ca81424821..fa9162e3afa3f 100644 ---- a/arch/riscv/boot/dts/canaan/k210.dtsi -+++ b/arch/riscv/boot/dts/canaan/k210.dtsi -@@ -65,6 +65,18 @@ - compatible = "riscv,cpu-intc"; - }; - }; -+ -+ cpu-map { -+ cluster0 { -+ core0 { -+ cpu = <&cpu0>; -+ }; -+ -+ core1 { -+ cpu = <&cpu1>; -+ }; -+ }; -+ }; - }; - - sram: memory@80000000 { -@@ -113,7 +125,8 @@ - compatible = "canaan,k210-plic", "sifive,plic-1.0.0"; - reg = <0xC000000 0x4000000>; - interrupt-controller; -- interrupts-extended = <&cpu0_intc 11 &cpu1_intc 11>; -+ interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>, -+ <&cpu1_intc 11>, <&cpu1_intc 9>; - riscv,ndev = <65>; - }; - -diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts -index 0bcaf35045e79..82e7f8069ae77 100644 ---- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts -+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts -@@ -203,6 +203,8 @@ - compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <50000000>; -+ spi-tx-bus-width = <4>; -+ spi-rx-bus-width = <4>; - m25p,fast-read; - broken-flash-reset; - }; -diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts -index ac8a03f5867ad..8d335233853a7 100644 ---- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts -+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts -@@ -205,6 +205,8 @@ - compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <50000000>; -+ spi-tx-bus-width = <4>; -+ spi-rx-bus-width = <4>; - m25p,fast-read; - broken-flash-reset; - }; -diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts -index 623998194bc18..6703cfc055887 100644 ---- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts -+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts -@@ -213,6 +213,8 @@ - compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <50000000>; -+ spi-tx-bus-width = <4>; -+ spi-rx-bus-width = <4>; - m25p,fast-read; - broken-flash-reset; - }; -diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts -index cf605ba0d67e4..ac0b56f7d2c9f 100644 ---- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts -+++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts -@@ -178,6 +178,8 @@ - compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <50000000>; -+ spi-tx-bus-width = <4>; -+ spi-rx-bus-width = <4>; - m25p,fast-read; - broken-flash-reset; - }; -diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts -index b254c60589a1c..cce5eca31f257 100644 ---- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts -+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts -@@ -12,7 +12,7 @@ - #address-cells = <2>; - #size-cells = <2>; - model = "Microchip PolarFire-SoC Icicle Kit"; -- compatible = "microchip,mpfs-icicle-kit"; -+ compatible = "microchip,mpfs-icicle-kit", "microchip,mpfs"; - - aliases { - ethernet0 = &emac1; -@@ -56,8 +56,17 @@ - status = "okay"; - }; - --&sdcard { -+&mmc { - status = "okay"; -+ -+ bus-width = <4>; -+ disable-wp; -+ cap-sd-highspeed; -+ card-detect-delay = <200>; -+ sd-uhs-sdr12; -+ sd-uhs-sdr25; -+ sd-uhs-sdr50; -+ sd-uhs-sdr104; - }; - - &emac0 { -diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi -index 9d2fbbc1f7778..4ef4bcb748729 100644 ---- a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi -+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi -@@ -6,11 +6,8 @@ - / { - #address-cells = <2>; - #size-cells = <2>; -- model = "Microchip MPFS Icicle Kit"; -- compatible = "microchip,mpfs-icicle-kit"; -- -- chosen { -- }; -+ model = "Microchip PolarFire SoC"; -+ compatible = "microchip,mpfs"; - - cpus { - #address-cells = <1>; -@@ -262,39 +259,14 @@ - status = "disabled"; - }; - -- emmc: mmc@20008000 { -+ /* Common node entry for emmc/sd */ -+ mmc: mmc@20008000 { - compatible = "cdns,sd4hc"; - reg = <0x0 0x20008000 0x0 0x1000>; - interrupt-parent = <&plic>; - interrupts = <88 89>; - pinctrl-names = "default"; - clocks = <&clkcfg 6>; -- bus-width = <4>; -- cap-mmc-highspeed; -- mmc-ddr-3_3v; -- max-frequency = <200000000>; -- non-removable; -- no-sd; -- no-sdio; -- voltage-ranges = <3300 3300>; -- status = "disabled"; -- }; -- -- sdcard: sdhc@20008000 { -- compatible = "cdns,sd4hc"; -- reg = <0x0 0x20008000 0x0 0x1000>; -- interrupt-parent = <&plic>; -- interrupts = <88>; -- pinctrl-names = "default"; -- clocks = <&clkcfg 6>; -- bus-width = <4>; -- disable-wp; -- cap-sd-highspeed; -- card-detect-delay = <200>; -- sd-uhs-sdr12; -- sd-uhs-sdr25; -- sd-uhs-sdr50; -- sd-uhs-sdr104; - max-frequency = <200000000>; - status = "disabled"; - }; -diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi -index 7db8610534834..64c06c9b41dc8 100644 ---- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi -+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi -@@ -166,7 +166,7 @@ - clocks = <&prci PRCI_CLK_TLCLK>; - status = "disabled"; - }; -- dma: dma@3000000 { -+ dma: dma-controller@3000000 { - compatible = "sifive,fu540-c000-pdma"; - reg = <0x0 0x3000000 0x0 0x8000>; - interrupt-parent = <&plic0>; -diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi -index abbb960f90a00..f72bb158a7ab3 100644 ---- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi -+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi -@@ -134,6 +134,30 @@ - interrupt-controller; - }; - }; -+ -+ cpu-map { -+ cluster0 { -+ core0 { -+ cpu = <&cpu0>; -+ }; -+ -+ core1 { -+ cpu = <&cpu1>; -+ }; -+ -+ core2 { -+ cpu = <&cpu2>; -+ }; -+ -+ core3 { -+ cpu = <&cpu3>; -+ }; -+ -+ core4 { -+ cpu = <&cpu4>; -+ }; -+ }; -+ }; - }; - soc { - #address-cells = <2>; -@@ -304,7 +328,7 @@ - bus-range = <0x0 0xff>; - ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */ - <0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */ -- <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */ -+ <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x10000000>, /* mem */ - <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */ - num-lanes = <0x8>; - interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>; -diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts -index 60846e88ae4b1..2f4d677c9c4ff 100644 ---- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts -+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts -@@ -3,6 +3,8 @@ - - #include "fu540-c000.dtsi" - #include -+#include -+#include - - /* Clock frequency (in Hz) of the PCB crystal for rtcclk */ - #define RTCCLK_FREQ 1000000 -@@ -46,6 +48,42 @@ - compatible = "gpio-restart"; - gpios = <&gpio 10 GPIO_ACTIVE_LOW>; - }; -+ -+ led-controller { -+ compatible = "pwm-leds"; -+ -+ led-d1 { -+ pwms = <&pwm0 0 7812500 PWM_POLARITY_INVERTED>; -+ active-low; -+ color = ; -+ max-brightness = <255>; -+ label = "d1"; -+ }; -+ -+ led-d2 { -+ pwms = <&pwm0 1 7812500 PWM_POLARITY_INVERTED>; -+ active-low; -+ color = ; -+ max-brightness = <255>; -+ label = "d2"; -+ }; -+ -+ led-d3 { -+ pwms = <&pwm0 2 7812500 PWM_POLARITY_INVERTED>; -+ active-low; -+ color = ; -+ max-brightness = <255>; -+ label = "d3"; -+ }; -+ -+ led-d4 { -+ pwms = <&pwm0 3 7812500 PWM_POLARITY_INVERTED>; -+ active-low; -+ color = ; -+ max-brightness = <255>; -+ label = "d4"; -+ }; -+ }; - }; - - &uart0 { -@@ -80,6 +118,7 @@ - spi-max-frequency = <20000000>; - voltage-ranges = <3300 3300>; - disable-wp; -+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>; - }; - }; - -diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts -index 2e4ea84f27e77..b40990210fb50 100644 ---- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts -+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts -@@ -2,6 +2,7 @@ - /* Copyright (c) 2020 SiFive, Inc */ - - #include "fu740-c000.dtsi" -+#include - #include - - /* Clock frequency (in Hz) of the PCB crystal for rtcclk */ -@@ -228,6 +229,7 @@ - spi-max-frequency = <20000000>; - voltage-ranges = <3300 3300>; - disable-wp; -+ gpios = <&gpio 15 GPIO_ACTIVE_LOW>; - }; - }; - -diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig -index 4ebc80315f013..f2a2f9c9ed49c 100644 ---- a/arch/riscv/configs/defconfig -+++ b/arch/riscv/configs/defconfig -@@ -72,9 +72,11 @@ CONFIG_GPIOLIB=y - CONFIG_GPIO_SIFIVE=y - # CONFIG_PTP_1588_CLOCK is not set - CONFIG_POWER_RESET=y --CONFIG_DRM=y --CONFIG_DRM_RADEON=y --CONFIG_DRM_VIRTIO_GPU=y -+CONFIG_DRM=m -+CONFIG_DRM_RADEON=m -+CONFIG_DRM_NOUVEAU=m -+CONFIG_DRM_VIRTIO_GPU=m -+CONFIG_FB=y - CONFIG_FRAMEBUFFER_CONSOLE=y - CONFIG_USB=y - CONFIG_USB_XHCI_HCD=y -diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig -index b16a2a12c82a8..3b9f83221f9c2 100644 ---- a/arch/riscv/configs/nommu_k210_defconfig -+++ b/arch/riscv/configs/nommu_k210_defconfig -@@ -29,8 +29,6 @@ CONFIG_EMBEDDED=y - CONFIG_SLOB=y - # CONFIG_MMU is not set - CONFIG_SOC_CANAAN=y --CONFIG_SOC_CANAAN_K210_DTB_SOURCE="k210_generic" --CONFIG_MAXPHYSMEM_2GB=y - CONFIG_SMP=y - CONFIG_NR_CPUS=2 - CONFIG_CMDLINE="earlycon console=ttySIF0" -diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig -index 61f887f654199..15d1fd0a70184 100644 ---- a/arch/riscv/configs/nommu_k210_sdcard_defconfig -+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig -@@ -21,11 +21,9 @@ CONFIG_EMBEDDED=y - CONFIG_SLOB=y - # CONFIG_MMU is not set - CONFIG_SOC_CANAAN=y --CONFIG_SOC_CANAAN_K210_DTB_SOURCE="k210_generic" --CONFIG_MAXPHYSMEM_2GB=y - CONFIG_SMP=y - CONFIG_NR_CPUS=2 --CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro" -+CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro" - CONFIG_CMDLINE_FORCE=y - # CONFIG_SECCOMP is not set - # CONFIG_STACKPROTECTOR is not set -diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig -index e046a0babde43..f224be697785f 100644 ---- a/arch/riscv/configs/nommu_virt_defconfig -+++ b/arch/riscv/configs/nommu_virt_defconfig -@@ -27,7 +27,6 @@ CONFIG_SLOB=y - # CONFIG_SLAB_MERGE_DEFAULT is not set - # CONFIG_MMU is not set - CONFIG_SOC_VIRT=y --CONFIG_MAXPHYSMEM_2GB=y - CONFIG_SMP=y - CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0" - CONFIG_CMDLINE_FORCE=y -diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig -index 434ef5b645998..cdd113e7a2912 100644 ---- a/arch/riscv/configs/rv32_defconfig -+++ b/arch/riscv/configs/rv32_defconfig -@@ -71,6 +71,7 @@ CONFIG_POWER_RESET=y - CONFIG_DRM=y - CONFIG_DRM_RADEON=y - CONFIG_DRM_VIRTIO_GPU=y -+CONFIG_FB=y - CONFIG_FRAMEBUFFER_CONSOLE=y - CONFIG_USB=y - CONFIG_USB_XHCI_HCD=y -diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h -index 67406c3763890..0377ce0fcc726 100644 ---- a/arch/riscv/include/asm/alternative-macros.h -+++ b/arch/riscv/include/asm/alternative-macros.h -@@ -23,9 +23,9 @@ - 888 : - \new_c - 889 : -- .previous - .org . - (889b - 888b) + (887b - 886b) - .org . - (887b - 886b) + (889b - 888b) -+ .previous - .endif - .endm - -@@ -60,9 +60,9 @@ - "888 :\n" \ - new_c "\n" \ - "889 :\n" \ -- ".previous\n" \ - ".org . - (887b - 886b) + (889b - 888b)\n" \ - ".org . - (889b - 888b) + (887b - 886b)\n" \ -+ ".previous\n" \ - ".endif\n" - - #define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \ -diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h -index 618d7c5af1a2d..e15a1c9f1cf88 100644 ---- a/arch/riscv/include/asm/asm.h -+++ b/arch/riscv/include/asm/asm.h -@@ -23,6 +23,7 @@ - #define REG_L __REG_SEL(ld, lw) - #define REG_S __REG_SEL(sd, sw) - #define REG_SC __REG_SEL(sc.d, sc.w) -+#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq) - #define REG_ASM __REG_SEL(.dword, .word) - #define SZREG __REG_SEL(8, 4) - #define LGREG __REG_SEL(3, 2) -diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h -index 49b398fe99f1b..1bb8662875dda 100644 ---- a/arch/riscv/include/asm/efi.h -+++ b/arch/riscv/include/asm/efi.h -@@ -10,10 +10,10 @@ - #include - #include - #include -+#include - - #ifdef CONFIG_EFI - extern void efi_init(void); --extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); - #else - #define efi_init() - #endif -@@ -21,7 +21,10 @@ extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); - int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); - int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); - --#define arch_efi_call_virt_setup() efi_virtmap_load() -+#define arch_efi_call_virt_setup() ({ \ -+ sync_kernel_mappings(efi_mm.pgd); \ -+ efi_virtmap_load(); \ -+ }) - #define arch_efi_call_virt_teardown() efi_virtmap_unload() - - #define arch_efi_call_virt(p, f, args...) p->f(args) -diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h -index 54cbf07fb4e96..8839cd2b28d14 100644 ---- a/arch/riscv/include/asm/fixmap.h -+++ b/arch/riscv/include/asm/fixmap.h -@@ -22,6 +22,14 @@ - */ - enum fixed_addresses { - FIX_HOLE, -+ /* -+ * The fdt fixmap mapping must be PMD aligned and will be mapped -+ * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit. -+ */ -+ FIX_FDT_END, -+ FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, -+ -+ /* Below fixmaps will be mapped using fixmap_pte */ - FIX_PTE, - FIX_PMD, - FIX_TEXT_POKE1, -diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h -index 04dad33800418..d47d87c2d7e3d 100644 ---- a/arch/riscv/include/asm/ftrace.h -+++ b/arch/riscv/include/asm/ftrace.h -@@ -42,6 +42,14 @@ struct dyn_arch_ftrace { - * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to - * return address (original pc + 4) - * -+ *: -+ * 0: auipc t0/ra, 0x? -+ * 4: jalr t0/ra, ?(t0/ra) -+ * -+ *: -+ * 0: nop -+ * 4: nop -+ * - * Dynamic ftrace generates probes to call sites, so we must deal with - * both auipc and jalr at the same time. - */ -@@ -52,25 +60,43 @@ struct dyn_arch_ftrace { - #define AUIPC_OFFSET_MASK (0xfffff000) - #define AUIPC_PAD (0x00001000) - #define JALR_SHIFT 20 --#define JALR_BASIC (0x000080e7) --#define AUIPC_BASIC (0x00000097) -+#define JALR_RA (0x000080e7) -+#define AUIPC_RA (0x00000097) -+#define JALR_T0 (0x000282e7) -+#define AUIPC_T0 (0x00000297) - #define NOP4 (0x00000013) - --#define make_call(caller, callee, call) \ -+#define to_jalr_t0(offset) \ -+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0) -+ -+#define to_auipc_t0(offset) \ -+ ((offset & JALR_SIGN_MASK) ? \ -+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) : \ -+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_T0)) -+ -+#define make_call_t0(caller, callee, call) \ - do { \ -- call[0] = to_auipc_insn((unsigned int)((unsigned long)callee - \ -- (unsigned long)caller)); \ -- call[1] = to_jalr_insn((unsigned int)((unsigned long)callee - \ -- (unsigned long)caller)); \ -+ unsigned int offset = \ -+ (unsigned long) callee - (unsigned long) caller; \ -+ call[0] = to_auipc_t0(offset); \ -+ call[1] = to_jalr_t0(offset); \ - } while (0) - --#define to_jalr_insn(offset) \ -- (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC) -+#define to_jalr_ra(offset) \ -+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA) - --#define to_auipc_insn(offset) \ -+#define to_auipc_ra(offset) \ - ((offset & JALR_SIGN_MASK) ? \ -- (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) : \ -- ((offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC)) -+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) : \ -+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_RA)) -+ -+#define make_call_ra(caller, callee, call) \ -+do { \ -+ unsigned int offset = \ -+ (unsigned long) callee - (unsigned long) caller; \ -+ call[0] = to_auipc_ra(offset); \ -+ call[1] = to_jalr_ra(offset); \ -+} while (0) - - /* - * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here. -@@ -83,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); - #define ftrace_init_nop ftrace_init_nop - #endif - --#endif -+#endif /* CONFIG_DYNAMIC_FTRACE */ - - #endif /* _ASM_RISCV_FTRACE_H */ -diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h -index a5c2ca1d1cd8b..ec19d6afc8965 100644 ---- a/arch/riscv/include/asm/hugetlb.h -+++ b/arch/riscv/include/asm/hugetlb.h -@@ -5,4 +5,10 @@ - #include - #include - -+static inline void arch_clear_hugepage_flags(struct page *page) -+{ -+ clear_bit(PG_dcache_clean, &page->flags); -+} -+#define arch_clear_hugepage_flags arch_clear_hugepage_flags -+ - #endif /* _ASM_RISCV_HUGETLB_H */ -diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h -index 69605a4742706..92080a2279372 100644 ---- a/arch/riscv/include/asm/io.h -+++ b/arch/riscv/include/asm/io.h -@@ -101,9 +101,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr)) - __io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr)) - __io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr)) - __io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr)) --#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count) --#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count) --#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count) -+#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count) -+#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count) -+#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count) - - __io_writes_outs(writes, u8, b, __io_bw(), __io_aw()) - __io_writes_outs(writes, u16, w, __io_bw(), __io_aw()) -@@ -115,22 +115,22 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw()) - __io_writes_outs(outs, u8, b, __io_pbw(), __io_paw()) - __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw()) - __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) --#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count) --#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count) --#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count) -+#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count) -+#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count) -+#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count) - - #ifdef CONFIG_64BIT - __io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr)) - #define readsq(addr, buffer, count) __readsq(addr, buffer, count) - - __io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr)) --#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count) -+#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count) - - __io_writes_outs(writes, u64, q, __io_bw(), __io_aw()) - #define writesq(addr, buffer, count) __writesq(addr, buffer, count) - - __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) --#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count) -+#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count) - #endif - - #include -diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h -index d6c277992f76a..b53891964ae03 100644 ---- a/arch/riscv/include/asm/irq_work.h -+++ b/arch/riscv/include/asm/irq_work.h -@@ -4,7 +4,7 @@ - - static inline bool arch_irq_work_has_interrupt(void) - { -- return true; -+ return IS_ENABLED(CONFIG_SMP); - } - extern void arch_irq_work_raise(void); - #endif /* _ASM_RISCV_IRQ_WORK_H */ -diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h -index 38af2ec7b9bf9..729991e8f7825 100644 ---- a/arch/riscv/include/asm/jump_label.h -+++ b/arch/riscv/include/asm/jump_label.h -@@ -18,6 +18,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, - bool branch) - { - asm_volatile_goto( -+ " .align 2 \n\t" - " .option push \n\t" - " .option norelax \n\t" - " .option norvc \n\t" -@@ -39,6 +40,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, - bool branch) - { - asm_volatile_goto( -+ " .align 2 \n\t" - " .option push \n\t" - " .option norelax \n\t" - " .option norvc \n\t" -diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h -index aff6c33ab0c08..4c58ee7f95ecf 100644 ---- a/arch/riscv/include/asm/mmio.h -+++ b/arch/riscv/include/asm/mmio.h -@@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) - * Relaxed I/O memory access primitives. These follow the Device memory - * ordering rules but do not guarantee any ordering relative to Normal memory - * accesses. These are defined to order the indicated access (either a read or -- * write) with all other I/O memory accesses. Since the platform specification -- * defines that all I/O regions are strongly ordered on channel 2, no explicit -- * fences are required to enforce this ordering. -+ * write) with all other I/O memory accesses to the same peripheral. Since the -+ * platform specification defines that all I/O regions are strongly ordered on -+ * channel 0, no explicit fences are required to enforce this ordering. - */ - /* FIXME: These are now the same as asm-generic */ - #define __io_rbr() do {} while (0) -@@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) - #endif - - /* -- * I/O memory access primitives. Reads are ordered relative to any -- * following Normal memory access. Writes are ordered relative to any prior -- * Normal memory access. The memory barriers here are necessary as RISC-V -+ * I/O memory access primitives. Reads are ordered relative to any following -+ * Normal memory read and delay() loop. Writes are ordered relative to any -+ * prior Normal memory write. The memory barriers here are necessary as RISC-V - * doesn't define any ordering between the memory space and the I/O space. - */ - #define __io_br() do {} while (0) --#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") --#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") -+#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) -+#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) - #define __io_aw() mmiowb_set_pending() - - #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) -diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h -index 4254ff2ff0494..1075beae1ac64 100644 ---- a/arch/riscv/include/asm/module.lds.h -+++ b/arch/riscv/include/asm/module.lds.h -@@ -2,8 +2,8 @@ - /* Copyright (C) 2017 Andes Technology Corporation */ - #ifdef CONFIG_MODULE_SECTIONS - SECTIONS { -- .plt (NOLOAD) : { BYTE(0) } -- .got (NOLOAD) : { BYTE(0) } -- .got.plt (NOLOAD) : { BYTE(0) } -+ .plt : { BYTE(0) } -+ .got : { BYTE(0) } -+ .got.plt : { BYTE(0) } - } - #endif -diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h -index f36368de839f5..3cd00332d70f5 100644 ---- a/arch/riscv/include/asm/parse_asm.h -+++ b/arch/riscv/include/asm/parse_asm.h -@@ -3,6 +3,9 @@ - * Copyright (C) 2020 SiFive - */ - -+#ifndef _ASM_RISCV_INSN_H -+#define _ASM_RISCV_INSN_H -+ - #include - - /* The bit field of immediate value in I-type instruction */ -@@ -217,3 +220,5 @@ static inline bool is_ ## INSN_NAME ## _insn(long insn) \ - (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \ - (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \ - (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); }) -+ -+#endif /* _ASM_RISCV_INSN_H */ -diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h -index 9a7d7346001ee..98d9de07cba17 100644 ---- a/arch/riscv/include/asm/patch.h -+++ b/arch/riscv/include/asm/patch.h -@@ -9,4 +9,6 @@ - int patch_text_nosync(void *addr, const void *insns, size_t len); - int patch_text(void *addr, u32 insn); - -+extern int riscv_patch_in_stop_machine; -+ - #endif /* _ASM_RISCV_PATCH_H */ -diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h -index 0af6933a7100d..98e0403324823 100644 ---- a/arch/riscv/include/asm/pgalloc.h -+++ b/arch/riscv/include/asm/pgalloc.h -@@ -38,6 +38,13 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) - } - #endif /* __PAGETABLE_PMD_FOLDED */ - -+static inline void sync_kernel_mappings(pgd_t *pgd) -+{ -+ memcpy(pgd + USER_PTRS_PER_PGD, -+ init_mm.pgd + USER_PTRS_PER_PGD, -+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); -+} -+ - static inline pgd_t *pgd_alloc(struct mm_struct *mm) - { - pgd_t *pgd; -@@ -46,9 +53,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) - if (likely(pgd != NULL)) { - memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); - /* Copy kernel mappings */ -- memcpy(pgd + USER_PTRS_PER_PGD, -- init_mm.pgd + USER_PTRS_PER_PGD, -- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); -+ sync_kernel_mappings(pgd); - } - return pgd; - } -diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h -index 39b550310ec64..397cb945b16eb 100644 ---- a/arch/riscv/include/asm/pgtable.h -+++ b/arch/riscv/include/asm/pgtable.h -@@ -66,9 +66,13 @@ - - #define FIXADDR_TOP PCI_IO_START - #ifdef CONFIG_64BIT --#define FIXADDR_SIZE PMD_SIZE -+#define MAX_FDT_SIZE PMD_SIZE -+#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) -+#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE) - #else --#define FIXADDR_SIZE PGDIR_SIZE -+#define MAX_FDT_SIZE PGDIR_SIZE -+#define FIX_FDT_SIZE MAX_FDT_SIZE -+#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE) - #endif - #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) - -diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h -index a7d2811f35365..62d0e6e61da83 100644 ---- a/arch/riscv/include/asm/smp.h -+++ b/arch/riscv/include/asm/smp.h -@@ -43,7 +43,6 @@ void arch_send_call_function_ipi_mask(struct cpumask *mask); - void arch_send_call_function_single_ipi(int cpu); - - int riscv_hartid_to_cpuid(int hartid); --void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); - - /* Set custom IPI operations */ - void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops); -@@ -85,13 +84,6 @@ static inline unsigned long cpuid_to_hartid_map(int cpu) - return boot_cpu_hartid; - } - --static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, -- struct cpumask *out) --{ -- cpumask_clear(out); -- cpumask_set_cpu(boot_cpu_hartid, out); --} -- - static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) - { - } -@@ -102,6 +94,8 @@ static inline void riscv_clear_ipi(void) - - #endif /* CONFIG_SMP */ - -+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); -+ - #if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP) - bool cpu_has_hotplug(unsigned int cpu); - #else -diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h -index 60da0dcacf145..42d97043e5376 100644 ---- a/arch/riscv/include/asm/thread_info.h -+++ b/arch/riscv/include/asm/thread_info.h -@@ -11,11 +11,17 @@ - #include - #include - -+#ifdef CONFIG_KASAN -+#define KASAN_STACK_ORDER 1 -+#else -+#define KASAN_STACK_ORDER 0 -+#endif -+ - /* thread information allocation */ - #ifdef CONFIG_64BIT --#define THREAD_SIZE_ORDER (2) -+#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) - #else --#define THREAD_SIZE_ORDER (1) -+#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER) - #endif - #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) - -@@ -36,6 +42,9 @@ - - #ifndef __ASSEMBLY__ - -+extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)]; -+extern unsigned long spin_shadow_stack; -+ - #include - #include - -diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h -index 507cae273bc62..d6a7428f6248d 100644 ---- a/arch/riscv/include/asm/timex.h -+++ b/arch/riscv/include/asm/timex.h -@@ -41,7 +41,7 @@ static inline u32 get_cycles_hi(void) - static inline unsigned long random_get_entropy(void) - { - if (unlikely(clint_time_val == NULL)) -- return 0; -+ return random_get_entropy_fallback(); - return get_cycles(); - } - #define random_get_entropy() random_get_entropy() -diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h -index 801019381dea3..a09196f8de688 100644 ---- a/arch/riscv/include/asm/tlbflush.h -+++ b/arch/riscv/include/asm/tlbflush.h -@@ -12,6 +12,8 @@ - #include - - #ifdef CONFIG_MMU -+extern unsigned long asid_mask; -+ - static inline void local_flush_tlb_all(void) - { - __asm__ __volatile__ ("sfence.vma" : : : "memory"); -diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h -index f314ff44c48d1..d4d628af21a45 100644 ---- a/arch/riscv/include/asm/uaccess.h -+++ b/arch/riscv/include/asm/uaccess.h -@@ -216,7 +216,7 @@ do { \ - might_fault(); \ - access_ok(__p, sizeof(*__p)) ? \ - __get_user((x), __p) : \ -- ((x) = 0, -EFAULT); \ -+ ((x) = (__force __typeof__(x))0, -EFAULT); \ - }) - - #define __put_user_asm(insn, x, ptr, err) \ -diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h -index 6c316093a1e59..977ee6181dabf 100644 ---- a/arch/riscv/include/asm/unistd.h -+++ b/arch/riscv/include/asm/unistd.h -@@ -9,7 +9,6 @@ - */ - - #define __ARCH_WANT_SYS_CLONE --#define __ARCH_WANT_MEMFD_SECRET - - #include - -diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h -new file mode 100644 -index 0000000000000..66b13a5228808 ---- /dev/null -+++ b/arch/riscv/include/uapi/asm/setup.h -@@ -0,0 +1,8 @@ -+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ -+ -+#ifndef _UAPI_ASM_RISCV_SETUP_H -+#define _UAPI_ASM_RISCV_SETUP_H -+ -+#define COMMAND_LINE_SIZE 1024 -+ -+#endif /* _UAPI_ASM_RISCV_SETUP_H */ -diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h -index 8062996c2dfd0..d95fbf5846b0b 100644 ---- a/arch/riscv/include/uapi/asm/unistd.h -+++ b/arch/riscv/include/uapi/asm/unistd.h -@@ -21,6 +21,7 @@ - #endif /* __LP64__ */ - - #define __ARCH_WANT_SYS_CLONE3 -+#define __ARCH_WANT_MEMFD_SECRET - - #include - -diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile -index 3397ddac1a30c..16308ef1e5787 100644 ---- a/arch/riscv/kernel/Makefile -+++ b/arch/riscv/kernel/Makefile -@@ -50,6 +50,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o - obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o - obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o - -+obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o -+ - obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o - obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o - obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o -diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c -index df84e0c13db18..66ddfba1cfbef 100644 ---- a/arch/riscv/kernel/cpu-hotplug.c -+++ b/arch/riscv/kernel/cpu-hotplug.c -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - #include - - void cpu_stop(void); -@@ -46,6 +47,7 @@ int __cpu_disable(void) - return ret; - - remove_cpu_topology(cpu); -+ numa_remove_cpu(cpu); - set_cpu_online(cpu, false); - irq_migrate_all_off_this_cpu(); - -diff --git a/arch/riscv/kernel/crash_save_regs.S b/arch/riscv/kernel/crash_save_regs.S -index 7832fb763abac..b2a1908c0463e 100644 ---- a/arch/riscv/kernel/crash_save_regs.S -+++ b/arch/riscv/kernel/crash_save_regs.S -@@ -44,7 +44,7 @@ SYM_CODE_START(riscv_crash_save_regs) - REG_S t6, PT_T6(a0) /* x31 */ - - csrr t1, CSR_STATUS -- csrr t2, CSR_EPC -+ auipc t2, 0x0 - csrr t3, CSR_TVAL - csrr t4, CSR_CAUSE - -diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c -index 0241592982314..1aa540350abd3 100644 ---- a/arch/riscv/kernel/efi.c -+++ b/arch/riscv/kernel/efi.c -@@ -65,7 +65,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) - - if (md->attribute & EFI_MEMORY_RO) { - val = pte_val(pte) & ~_PAGE_WRITE; -- val = pte_val(pte) | _PAGE_READ; -+ val |= _PAGE_READ; - pte = __pte(val); - } - if (md->attribute & EFI_MEMORY_XP) { -diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S -index 98f502654edd3..5ca2860cc06cd 100644 ---- a/arch/riscv/kernel/entry.S -+++ b/arch/riscv/kernel/entry.S -@@ -108,7 +108,7 @@ _save_context: - .option pop - - #ifdef CONFIG_TRACE_IRQFLAGS -- call trace_hardirqs_off -+ call __trace_hardirqs_off - #endif - - #ifdef CONFIG_CONTEXT_TRACKING -@@ -144,7 +144,7 @@ skip_context_tracking: - li t0, EXC_BREAKPOINT - beq s4, t0, 1f - #ifdef CONFIG_TRACE_IRQFLAGS -- call trace_hardirqs_on -+ call __trace_hardirqs_on - #endif - csrs CSR_STATUS, SR_IE - -@@ -235,7 +235,7 @@ ret_from_exception: - REG_L s0, PT_STATUS(sp) - csrc CSR_STATUS, SR_IE - #ifdef CONFIG_TRACE_IRQFLAGS -- call trace_hardirqs_off -+ call __trace_hardirqs_off - #endif - #ifdef CONFIG_RISCV_M_MODE - /* the MPP value is too large to be used as an immediate arg for addi */ -@@ -271,10 +271,10 @@ restore_all: - REG_L s1, PT_STATUS(sp) - andi t0, s1, SR_PIE - beqz t0, 1f -- call trace_hardirqs_on -+ call __trace_hardirqs_on - j 2f - 1: -- call trace_hardirqs_off -+ call __trace_hardirqs_off - 2: - #endif - REG_L a0, PT_STATUS(sp) -@@ -387,6 +387,19 @@ handle_syscall_trace_exit: - - #ifdef CONFIG_VMAP_STACK - handle_kernel_stack_overflow: -+ /* -+ * Takes the psuedo-spinlock for the shadow stack, in case multiple -+ * harts are concurrently overflowing their kernel stacks. We could -+ * store any value here, but since we're overflowing the kernel stack -+ * already we only have SP to use as a scratch register. So we just -+ * swap in the address of the spinlock, as that's definately non-zero. -+ * -+ * Pairs with a store_release in handle_bad_stack(). -+ */ -+1: la sp, spin_shadow_stack -+ REG_AMOSWAP_AQ sp, sp, (sp) -+ bnez sp, 1b -+ - la sp, shadow_stack - addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE - -diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c -index 7f1e5203de886..1bf92cfa6764e 100644 ---- a/arch/riscv/kernel/ftrace.c -+++ b/arch/riscv/kernel/ftrace.c -@@ -15,11 +15,21 @@ - int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex) - { - mutex_lock(&text_mutex); -+ -+ /* -+ * The code sequences we use for ftrace can't be patched while the -+ * kernel is running, so we need to use stop_machine() to modify them -+ * for now. This doesn't play nice with text_mutex, we use this flag -+ * to elide the check. -+ */ -+ riscv_patch_in_stop_machine = true; -+ - return 0; - } - - int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex) - { -+ riscv_patch_in_stop_machine = false; - mutex_unlock(&text_mutex); - return 0; - } -@@ -57,12 +67,15 @@ static int ftrace_check_current_call(unsigned long hook_pos, - } - - static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target, -- bool enable) -+ bool enable, bool ra) - { - unsigned int call[2]; - unsigned int nops[2] = {NOP4, NOP4}; - -- make_call(hook_pos, target, call); -+ if (ra) -+ make_call_ra(hook_pos, target, call); -+ else -+ make_call_t0(hook_pos, target, call); - - /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */ - if (patch_text_nosync -@@ -72,42 +85,13 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target, - return 0; - } - --/* -- * Put 5 instructions with 16 bytes at the front of function within -- * patchable function entry nops' area. -- * -- * 0: REG_S ra, -SZREG(sp) -- * 1: auipc ra, 0x? -- * 2: jalr -?(ra) -- * 3: REG_L ra, -SZREG(sp) -- * -- * So the opcodes is: -- * 0: 0xfe113c23 (sd)/0xfe112e23 (sw) -- * 1: 0x???????? -> auipc -- * 2: 0x???????? -> jalr -- * 3: 0xff813083 (ld)/0xffc12083 (lw) -- */ --#if __riscv_xlen == 64 --#define INSN0 0xfe113c23 --#define INSN3 0xff813083 --#elif __riscv_xlen == 32 --#define INSN0 0xfe112e23 --#define INSN3 0xffc12083 --#endif -- --#define FUNC_ENTRY_SIZE 16 --#define FUNC_ENTRY_JMP 4 -- - int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) - { -- unsigned int call[4] = {INSN0, 0, 0, INSN3}; -- unsigned long target = addr; -- unsigned long caller = rec->ip + FUNC_ENTRY_JMP; -+ unsigned int call[2]; - -- call[1] = to_auipc_insn((unsigned int)(target - caller)); -- call[2] = to_jalr_insn((unsigned int)(target - caller)); -+ make_call_t0(rec->ip, addr, call); - -- if (patch_text_nosync((void *)rec->ip, call, FUNC_ENTRY_SIZE)) -+ if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE)) - return -EPERM; - - return 0; -@@ -116,15 +100,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) - int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, - unsigned long addr) - { -- unsigned int nops[4] = {NOP4, NOP4, NOP4, NOP4}; -+ unsigned int nops[2] = {NOP4, NOP4}; - -- if (patch_text_nosync((void *)rec->ip, nops, FUNC_ENTRY_SIZE)) -+ if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE)) - return -EPERM; - - return 0; - } - -- - /* - * This is called early on, and isn't wrapped by - * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold -@@ -136,9 +119,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) - { - int out; - -- ftrace_arch_code_modify_prepare(); -+ mutex_lock(&text_mutex); - out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); -- ftrace_arch_code_modify_post_process(); -+ mutex_unlock(&text_mutex); - - return out; - } -@@ -146,10 +129,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) - int ftrace_update_ftrace_func(ftrace_func_t func) - { - int ret = __ftrace_modify_call((unsigned long)&ftrace_call, -- (unsigned long)func, true); -+ (unsigned long)func, true, true); - if (!ret) { - ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call, -- (unsigned long)func, true); -+ (unsigned long)func, true, true); - } - - return ret; -@@ -166,16 +149,16 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, - unsigned long addr) - { - unsigned int call[2]; -- unsigned long caller = rec->ip + FUNC_ENTRY_JMP; -+ unsigned long caller = rec->ip; - int ret; - -- make_call(caller, old_addr, call); -+ make_call_t0(caller, old_addr, call); - ret = ftrace_check_current_call(caller, call); - - if (ret) - return ret; - -- return __ftrace_modify_call(caller, addr, true); -+ return __ftrace_modify_call(caller, addr, true, false); - } - #endif - -@@ -210,12 +193,12 @@ int ftrace_enable_ftrace_graph_caller(void) - int ret; - - ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call, -- (unsigned long)&prepare_ftrace_return, true); -+ (unsigned long)&prepare_ftrace_return, true, true); - if (ret) - return ret; - - return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call, -- (unsigned long)&prepare_ftrace_return, true); -+ (unsigned long)&prepare_ftrace_return, true, true); - } - - int ftrace_disable_ftrace_graph_caller(void) -@@ -223,12 +206,12 @@ int ftrace_disable_ftrace_graph_caller(void) - int ret; - - ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call, -- (unsigned long)&prepare_ftrace_return, false); -+ (unsigned long)&prepare_ftrace_return, false, true); - if (ret) - return ret; - - return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call, -- (unsigned long)&prepare_ftrace_return, false); -+ (unsigned long)&prepare_ftrace_return, false, true); - } - #endif /* CONFIG_DYNAMIC_FTRACE */ - #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S -index 52c5ff9804c55..4c3c7592b6fc8 100644 ---- a/arch/riscv/kernel/head.S -+++ b/arch/riscv/kernel/head.S -@@ -301,6 +301,7 @@ clear_bss_done: - REG_S a0, (a2) - - /* Initialize page tables and relocate to virtual addresses */ -+ la tp, init_task - la sp, init_thread_union + THREAD_SIZE - XIP_FIXUP_OFFSET sp - #ifdef CONFIG_BUILTIN_DTB -diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S -index a80b52a74f58c..059c5e216ae75 100644 ---- a/arch/riscv/kernel/kexec_relocate.S -+++ b/arch/riscv/kernel/kexec_relocate.S -@@ -159,25 +159,15 @@ SYM_CODE_START(riscv_kexec_norelocate) - * s0: (const) Phys address to jump to - * s1: (const) Phys address of the FDT image - * s2: (const) The hartid of the current hart -- * s3: (const) kernel_map.va_pa_offset, used when switching MMU off - */ - mv s0, a1 - mv s1, a2 - mv s2, a3 -- mv s3, a4 - - /* Disable / cleanup interrupts */ - csrw CSR_SIE, zero - csrw CSR_SIP, zero - -- /* Switch to physical addressing */ -- la s4, 1f -- sub s4, s4, s3 -- csrw CSR_STVEC, s4 -- csrw CSR_SATP, zero -- --.align 2 --1: - /* Pass the arguments to the next kernel / Cleanup*/ - mv a0, s2 - mv a1, s1 -@@ -214,7 +204,15 @@ SYM_CODE_START(riscv_kexec_norelocate) - csrw CSR_SCAUSE, zero - csrw CSR_SSCRATCH, zero - -- jalr zero, a2, 0 -+ /* -+ * Switch to physical addressing -+ * This will also trigger a jump to CSR_STVEC -+ * which in this case is the address of the new -+ * kernel. -+ */ -+ csrw CSR_STVEC, a2 -+ csrw CSR_SATP, zero -+ - SYM_CODE_END(riscv_kexec_norelocate) - - .section ".rodata" -diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c -index e6eca271a4d60..db41c676e5a26 100644 ---- a/arch/riscv/kernel/machine_kexec.c -+++ b/arch/riscv/kernel/machine_kexec.c -@@ -15,6 +15,8 @@ - #include /* For unreachable() */ - #include /* For cpu_down() */ - #include -+#include -+#include - - /* - * kexec_image_info - Print received image details -@@ -65,7 +67,9 @@ machine_kexec_prepare(struct kimage *image) - if (image->segment[i].memsz <= sizeof(fdt)) - continue; - -- if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt))) -+ if (image->file_mode) -+ memcpy(&fdt, image->segment[i].buf, sizeof(fdt)); -+ else if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt))) - continue; - - if (fdt_check_header(&fdt)) -@@ -136,19 +140,70 @@ void machine_shutdown(void) - #endif - } - -+/* Override the weak function in kernel/panic.c */ -+void crash_smp_send_stop(void) -+{ -+ static int cpus_stopped; -+ -+ /* -+ * This function can be called twice in panic path, but obviously -+ * we execute this only once. -+ */ -+ if (cpus_stopped) -+ return; -+ -+ smp_send_stop(); -+ cpus_stopped = 1; -+} -+ -+static void machine_kexec_mask_interrupts(void) -+{ -+ unsigned int i; -+ struct irq_desc *desc; -+ -+ for_each_irq_desc(i, desc) { -+ struct irq_chip *chip; -+ int ret; -+ -+ chip = irq_desc_get_chip(desc); -+ if (!chip) -+ continue; -+ -+ /* -+ * First try to remove the active state. If this -+ * fails, try to EOI the interrupt. -+ */ -+ ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false); -+ -+ if (ret && irqd_irq_inprogress(&desc->irq_data) && -+ chip->irq_eoi) -+ chip->irq_eoi(&desc->irq_data); -+ -+ if (chip->irq_mask) -+ chip->irq_mask(&desc->irq_data); -+ -+ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) -+ chip->irq_disable(&desc->irq_data); -+ } -+} -+ - /* - * machine_crash_shutdown - Prepare to kexec after a kernel crash - * - * This function is called by crash_kexec just before machine_kexec -- * below and its goal is similar to machine_shutdown, but in case of -- * a kernel crash. Since we don't handle such cases yet, this function -- * is empty. -+ * and its goal is to shutdown non-crashing cpus and save registers. - */ - void - machine_crash_shutdown(struct pt_regs *regs) - { -+ local_irq_disable(); -+ -+ /* shutdown non-crashing cpus */ -+ crash_smp_send_stop(); -+ - crash_save_cpu(regs, smp_processor_id()); -- machine_shutdown(); -+ machine_kexec_mask_interrupts(); -+ - pr_info("Starting crashdump kernel...\n"); - } - -@@ -169,7 +224,8 @@ machine_kexec(struct kimage *image) - struct kimage_arch *internal = &image->arch; - unsigned long jump_addr = (unsigned long) image->start; - unsigned long first_ind_entry = (unsigned long) &image->head; -- unsigned long this_hart_id = raw_smp_processor_id(); -+ unsigned long this_cpu_id = __smp_processor_id(); -+ unsigned long this_hart_id = cpuid_to_hartid_map(this_cpu_id); - unsigned long fdt_addr = internal->fdt_addr; - void *control_code_buffer = page_address(image->control_code_page); - riscv_kexec_method kexec_method = NULL; -diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S -index d171eca623b6f..125de818d1bab 100644 ---- a/arch/riscv/kernel/mcount-dyn.S -+++ b/arch/riscv/kernel/mcount-dyn.S -@@ -13,8 +13,8 @@ - - .text - --#define FENTRY_RA_OFFSET 12 --#define ABI_SIZE_ON_STACK 72 -+#define FENTRY_RA_OFFSET 8 -+#define ABI_SIZE_ON_STACK 80 - #define ABI_A0 0 - #define ABI_A1 8 - #define ABI_A2 16 -@@ -23,10 +23,10 @@ - #define ABI_A5 40 - #define ABI_A6 48 - #define ABI_A7 56 --#define ABI_RA 64 -+#define ABI_T0 64 -+#define ABI_RA 72 - - .macro SAVE_ABI -- addi sp, sp, -SZREG - addi sp, sp, -ABI_SIZE_ON_STACK - - REG_S a0, ABI_A0(sp) -@@ -37,6 +37,7 @@ - REG_S a5, ABI_A5(sp) - REG_S a6, ABI_A6(sp) - REG_S a7, ABI_A7(sp) -+ REG_S t0, ABI_T0(sp) - REG_S ra, ABI_RA(sp) - .endm - -@@ -49,24 +50,18 @@ - REG_L a5, ABI_A5(sp) - REG_L a6, ABI_A6(sp) - REG_L a7, ABI_A7(sp) -+ REG_L t0, ABI_T0(sp) - REG_L ra, ABI_RA(sp) - - addi sp, sp, ABI_SIZE_ON_STACK -- addi sp, sp, SZREG - .endm - - #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - .macro SAVE_ALL -- addi sp, sp, -SZREG - addi sp, sp, -PT_SIZE_ON_STACK - -- REG_S x1, PT_EPC(sp) -- addi sp, sp, PT_SIZE_ON_STACK -- REG_L x1, (sp) -- addi sp, sp, -PT_SIZE_ON_STACK -+ REG_S t0, PT_EPC(sp) - REG_S x1, PT_RA(sp) -- REG_L x1, PT_EPC(sp) -- - REG_S x2, PT_SP(sp) - REG_S x3, PT_GP(sp) - REG_S x4, PT_TP(sp) -@@ -100,15 +95,11 @@ - .endm - - .macro RESTORE_ALL -+ REG_L t0, PT_EPC(sp) - REG_L x1, PT_RA(sp) -- addi sp, sp, PT_SIZE_ON_STACK -- REG_S x1, (sp) -- addi sp, sp, -PT_SIZE_ON_STACK -- REG_L x1, PT_EPC(sp) - REG_L x2, PT_SP(sp) - REG_L x3, PT_GP(sp) - REG_L x4, PT_TP(sp) -- REG_L x5, PT_T0(sp) - REG_L x6, PT_T1(sp) - REG_L x7, PT_T2(sp) - REG_L x8, PT_S0(sp) -@@ -137,17 +128,16 @@ - REG_L x31, PT_T6(sp) - - addi sp, sp, PT_SIZE_ON_STACK -- addi sp, sp, SZREG - .endm - #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ - - ENTRY(ftrace_caller) - SAVE_ABI - -- addi a0, ra, -FENTRY_RA_OFFSET -+ addi a0, t0, -FENTRY_RA_OFFSET - la a1, function_trace_op - REG_L a2, 0(a1) -- REG_L a1, ABI_SIZE_ON_STACK(sp) -+ mv a1, ra - mv a3, sp - - ftrace_call: -@@ -155,8 +145,8 @@ ftrace_call: - call ftrace_stub - - #ifdef CONFIG_FUNCTION_GRAPH_TRACER -- addi a0, sp, ABI_SIZE_ON_STACK -- REG_L a1, ABI_RA(sp) -+ addi a0, sp, ABI_RA -+ REG_L a1, ABI_T0(sp) - addi a1, a1, -FENTRY_RA_OFFSET - #ifdef HAVE_FUNCTION_GRAPH_FP_TEST - mv a2, s0 -@@ -166,17 +156,17 @@ ftrace_graph_call: - call ftrace_stub - #endif - RESTORE_ABI -- ret -+ jr t0 - ENDPROC(ftrace_caller) - - #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - ENTRY(ftrace_regs_caller) - SAVE_ALL - -- addi a0, ra, -FENTRY_RA_OFFSET -+ addi a0, t0, -FENTRY_RA_OFFSET - la a1, function_trace_op - REG_L a2, 0(a1) -- REG_L a1, PT_SIZE_ON_STACK(sp) -+ mv a1, ra - mv a3, sp - - ftrace_regs_call: -@@ -196,6 +186,6 @@ ftrace_graph_regs_call: - #endif - - RESTORE_ALL -- ret -+ jr t0 - ENDPROC(ftrace_regs_caller) - #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ -diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c -index 68a9e3d1fe16a..4a48287513c37 100644 ---- a/arch/riscv/kernel/module.c -+++ b/arch/riscv/kernel/module.c -@@ -13,6 +13,19 @@ - #include - #include - -+/* -+ * The auipc+jalr instruction pair can reach any PC-relative offset -+ * in the range [-2^31 - 2^11, 2^31 - 2^11) -+ */ -+static bool riscv_insn_valid_32bit_offset(ptrdiff_t val) -+{ -+#ifdef CONFIG_32BIT -+ return true; -+#else -+ return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11)); -+#endif -+} -+ - static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) - { - if (v != (u32)v) { -@@ -95,7 +108,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, - ptrdiff_t offset = (void *)v - (void *)location; - s32 hi20; - -- if (offset != (s32)offset) { -+ if (!riscv_insn_valid_32bit_offset(offset)) { - pr_err( - "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, (long long)v, location); -@@ -197,10 +210,9 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, - Elf_Addr v) - { - ptrdiff_t offset = (void *)v - (void *)location; -- s32 fill_v = offset; - u32 hi20, lo12; - -- if (offset != fill_v) { -+ if (!riscv_insn_valid_32bit_offset(offset)) { - /* Only emit the plt entry if offset over 32-bit range */ - if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { - offset = module_emit_plt_entry(me, v); -@@ -224,10 +236,9 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, - Elf_Addr v) - { - ptrdiff_t offset = (void *)v - (void *)location; -- s32 fill_v = offset; - u32 hi20, lo12; - -- if (offset != fill_v) { -+ if (!riscv_insn_valid_32bit_offset(offset)) { - pr_err( - "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", - me->name, (long long)v, location); -diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c -index 0b552873a5778..e099961453cca 100644 ---- a/arch/riscv/kernel/patch.c -+++ b/arch/riscv/kernel/patch.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - - struct patch_insn { -@@ -19,6 +20,8 @@ struct patch_insn { - atomic_t cpu_count; - }; - -+int riscv_patch_in_stop_machine = false; -+ - #ifdef CONFIG_MMU - /* - * The fix_to_virt(, idx) needs a const value (not a dynamic variable of -@@ -59,8 +62,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) - * Before reaching here, it was expected to lock the text_mutex - * already, so we don't need to give another lock here and could - * ensure that it was safe between each cores. -+ * -+ * We're currently using stop_machine() for ftrace & kprobes, and while -+ * that ensures text_mutex is held before installing the mappings it -+ * does not ensure text_mutex is held by the calling thread. That's -+ * safe but triggers a lockdep failure, so just elide it for that -+ * specific case. - */ -- lockdep_assert_held(&text_mutex); -+ if (!riscv_patch_in_stop_machine) -+ lockdep_assert_held(&text_mutex); - - if (across_pages) - patch_map(addr + len, FIX_TEXT_POKE1); -@@ -104,7 +114,7 @@ static int patch_text_cb(void *data) - struct patch_insn *patch = data; - int ret = 0; - -- if (atomic_inc_return(&patch->cpu_count) == 1) { -+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { - ret = - patch_text_nosync(patch->addr, &patch->insn, - GET_INSN_LENGTH(patch->insn)); -@@ -121,13 +131,25 @@ NOKPROBE_SYMBOL(patch_text_cb); - - int patch_text(void *addr, u32 insn) - { -+ int ret; - struct patch_insn patch = { - .addr = addr, - .insn = insn, - .cpu_count = ATOMIC_INIT(0), - }; - -- return stop_machine_cpuslocked(patch_text_cb, -- &patch, cpu_online_mask); -+ /* -+ * kprobes takes text_mutex, before calling patch_text(), but as we call -+ * calls stop_machine(), the lockdep assertion in patch_insn_write() -+ * gets confused by the context in which the lock is taken. -+ * Instead, ensure the lock is held before calling stop_machine(), and -+ * set riscv_patch_in_stop_machine to skip the check in -+ * patch_insn_write(). -+ */ -+ lockdep_assert_held(&text_mutex); -+ riscv_patch_in_stop_machine = true; -+ ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask); -+ riscv_patch_in_stop_machine = false; -+ return ret; - } - NOKPROBE_SYMBOL(patch_text); -diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c -index 0bb1854dce833..357f985041cb9 100644 ---- a/arch/riscv/kernel/perf_callchain.c -+++ b/arch/riscv/kernel/perf_callchain.c -@@ -15,8 +15,8 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, - { - struct stackframe buftail; - unsigned long ra = 0; -- unsigned long *user_frame_tail = -- (unsigned long *)(fp - sizeof(struct stackframe)); -+ unsigned long __user *user_frame_tail = -+ (unsigned long __user *)(fp - sizeof(struct stackframe)); - - /* Check accessibility of one struct frame_tail beyond */ - if (!access_ok(user_frame_tail, sizeof(buftail))) -@@ -56,10 +56,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, - void perf_callchain_user(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - unsigned long fp = 0; - - /* RISC-V does not support perf in guest mode. */ -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) -+ if (guest_cbs && guest_cbs->is_in_guest()) - return; - - fp = regs->s0; -@@ -72,14 +73,16 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, - - static bool fill_callchain(void *entry, unsigned long pc) - { -- return perf_callchain_store(entry, pc); -+ return perf_callchain_store(entry, pc) == 0; - } - - void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); -+ - /* RISC-V does not support perf in guest mode. */ -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ if (guest_cbs && guest_cbs->is_in_guest()) { - pr_warn("RISC-V does not support perf in guest mode!"); - return; - } -diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c -index 00088dc6da4b6..7548b1d62509c 100644 ---- a/arch/riscv/kernel/probes/kprobes.c -+++ b/arch/riscv/kernel/probes/kprobes.c -@@ -1,5 +1,7 @@ - // SPDX-License-Identifier: GPL-2.0+ - -+#define pr_fmt(fmt) "kprobes: " fmt -+ - #include - #include - #include -@@ -46,18 +48,35 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) - post_kprobe_handler(p, kcb, regs); - } - --int __kprobes arch_prepare_kprobe(struct kprobe *p) -+static bool __kprobes arch_check_kprobe(struct kprobe *p) - { -- unsigned long probe_addr = (unsigned long)p->addr; -+ unsigned long tmp = (unsigned long)p->addr - p->offset; -+ unsigned long addr = (unsigned long)p->addr; - -- if (probe_addr & 0x1) { -- pr_warn("Address not aligned.\n"); -+ while (tmp <= addr) { -+ if (tmp == addr) -+ return true; - -- return -EINVAL; -+ tmp += GET_INSN_LENGTH(*(u16 *)tmp); - } - -+ return false; -+} -+ -+int __kprobes arch_prepare_kprobe(struct kprobe *p) -+{ -+ u16 *insn = (u16 *)p->addr; -+ -+ if ((unsigned long)insn & 0x1) -+ return -EILSEQ; -+ -+ if (!arch_check_kprobe(p)) -+ return -EILSEQ; -+ - /* copy instruction */ -- p->opcode = *p->addr; -+ p->opcode = (kprobe_opcode_t)(*insn++); -+ if (GET_INSN_LENGTH(p->opcode) == 4) -+ p->opcode |= (kprobe_opcode_t)(*insn) << 16; - - /* decode instruction */ - switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) { -@@ -191,7 +210,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, - break; - case KPROBE_HIT_SS: - case KPROBE_REENTER: -- pr_warn("Unrecoverable kprobe detected.\n"); -+ pr_warn("Failed to recover from reentered kprobes.\n"); - dump_kprobe(p); - BUG(); - break; -diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c -index d73e96f6ed7c5..a20568bd1f1a8 100644 ---- a/arch/riscv/kernel/probes/simulate-insn.c -+++ b/arch/riscv/kernel/probes/simulate-insn.c -@@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg - u32 rd_index = (opcode >> 7) & 0x1f; - u32 rs1_index = (opcode >> 15) & 0x1f; - -- ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); -+ ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); - if (!ret) - return ret; - -- ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); -+ ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); - if (!ret) - return ret; - -diff --git a/arch/riscv/kernel/probes/simulate-insn.h b/arch/riscv/kernel/probes/simulate-insn.h -index cb6ff7dccb92e..de8474146a9b6 100644 ---- a/arch/riscv/kernel/probes/simulate-insn.h -+++ b/arch/riscv/kernel/probes/simulate-insn.h -@@ -31,9 +31,9 @@ __RISCV_INSN_FUNCS(fence, 0x7f, 0x0f); - } while (0) - - __RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001); --__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002); -+__RISCV_INSN_FUNCS(c_jr, 0xf07f, 0x8002); - __RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001); --__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002); -+__RISCV_INSN_FUNCS(c_jalr, 0xf07f, 0x9002); - __RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001); - __RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001); - __RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002); -diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c -index 7a057b5f0adc7..194f166b2cc40 100644 ---- a/arch/riscv/kernel/probes/uprobes.c -+++ b/arch/riscv/kernel/probes/uprobes.c -@@ -59,8 +59,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) - - instruction_pointer_set(regs, utask->xol_vaddr); - -- regs->status &= ~SR_SPIE; -- - return 0; - } - -@@ -69,11 +67,10 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) - struct uprobe_task *utask = current->utask; - - WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR); -+ current->thread.bad_cause = utask->autask.saved_cause; - - instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size); - -- regs->status |= SR_SPIE; -- - return 0; - } - -@@ -106,13 +103,12 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) - { - struct uprobe_task *utask = current->utask; - -+ current->thread.bad_cause = utask->autask.saved_cause; - /* - * Task has received a fatal signal, so reset back to probbed - * address. - */ - instruction_pointer_set(regs, utask->vaddr); -- -- regs->status &= ~SR_SPIE; - } - - bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, -diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c -index 03ac3aa611f59..bda3bc2947186 100644 ---- a/arch/riscv/kernel/process.c -+++ b/arch/riscv/kernel/process.c -@@ -124,6 +124,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, - { - struct pt_regs *childregs = task_pt_regs(p); - -+ memset(&p->thread.s, 0, sizeof(p->thread.s)); -+ - /* p->thread holds context to be restored by __switch_to() */ - if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { - /* Kernel thread */ -diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c -index ee5878d968cc1..9c842c41684ac 100644 ---- a/arch/riscv/kernel/reset.c -+++ b/arch/riscv/kernel/reset.c -@@ -12,7 +12,7 @@ static void default_power_off(void) - wait_for_interrupt(); - } - --void (*pm_power_off)(void) = default_power_off; -+void (*pm_power_off)(void) = NULL; - EXPORT_SYMBOL(pm_power_off); - - void machine_restart(char *cmd) -@@ -23,10 +23,16 @@ void machine_restart(char *cmd) - - void machine_halt(void) - { -- pm_power_off(); -+ if (pm_power_off != NULL) -+ pm_power_off(); -+ else -+ default_power_off(); - } - - void machine_power_off(void) - { -- pm_power_off(); -+ if (pm_power_off != NULL) -+ pm_power_off(); -+ else -+ default_power_off(); - } -diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c -index b9620e5f00baf..8cc147491c675 100644 ---- a/arch/riscv/kernel/setup.c -+++ b/arch/riscv/kernel/setup.c -@@ -59,6 +59,16 @@ atomic_t hart_lottery __section(".sdata") - unsigned long boot_cpu_hartid; - static DEFINE_PER_CPU(struct cpu, cpu_devices); - -+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) -+{ -+ int cpu; -+ -+ cpumask_clear(out); -+ for_each_cpu(cpu, in) -+ cpumask_set_cpu(cpuid_to_hartid_map(cpu), out); -+} -+EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask); -+ - /* - * Place kernel memory regions on the resource tree so that - * kexec-tools can retrieve them from /proc/iomem. While there -@@ -189,7 +199,7 @@ static void __init init_resources(void) - res = &mem_res[res_idx--]; - - res->name = "Reserved"; -- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; -+ res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE; - res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); - res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; - -@@ -214,7 +224,7 @@ static void __init init_resources(void) - - if (unlikely(memblock_is_nomap(region))) { - res->name = "Reserved"; -- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; -+ res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE; - } else { - res->name = "System RAM"; - res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; -@@ -250,10 +260,10 @@ static void __init parse_dtb(void) - pr_info("Machine model: %s\n", name); - dump_stack_set_arch_desc("%s (DT)", name); - } -- return; -+ } else { -+ pr_err("No DTB passed to the kernel\n"); - } - -- pr_err("No DTB passed to the kernel\n"); - #ifdef CONFIG_CMDLINE_FORCE - strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); - pr_info("Forcing kernel command line to: %s\n", boot_command_line); -@@ -276,10 +286,7 @@ void __init setup_arch(char **cmdline_p) - #if IS_ENABLED(CONFIG_BUILTIN_DTB) - unflatten_and_copy_device_tree(); - #else -- if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa)))) -- unflatten_device_tree(); -- else -- pr_err("No DTB found in kernel mappings\n"); -+ unflatten_device_tree(); - #endif - misc_mem_init(); - -@@ -320,10 +327,11 @@ subsys_initcall(topology_init); - - void free_initmem(void) - { -- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) -- set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), -- IS_ENABLED(CONFIG_64BIT) ? -- set_memory_rw : set_memory_rw_nx); -+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { -+ set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_memory_rw_nx); -+ if (IS_ENABLED(CONFIG_64BIT)) -+ set_kernel_memory(__init_begin, __init_end, set_memory_nx); -+ } - - free_initmem_default(POISON_FREE_INITMEM); - } -diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c -index c2d5ecbe55264..8892569aad23b 100644 ---- a/arch/riscv/kernel/signal.c -+++ b/arch/riscv/kernel/signal.c -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - - extern u32 __user_rt_sigreturn[2]; - -@@ -121,6 +122,8 @@ SYSCALL_DEFINE0(rt_sigreturn) - if (restore_altstack(&frame->uc.uc_stack)) - goto badframe; - -+ regs->cause = -1UL; -+ - return regs->a0; - - badframe: -@@ -176,6 +179,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, - { - struct rt_sigframe __user *frame; - long err = 0; -+ unsigned long __maybe_unused addr; - - frame = get_sigframe(ksig, regs, sizeof(*frame)); - if (!access_ok(frame, sizeof(*frame))) -@@ -204,7 +208,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, - if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn, - sizeof(frame->sigreturn_code))) - return -EFAULT; -- regs->ra = (unsigned long)&frame->sigreturn_code; -+ -+ addr = (unsigned long)&frame->sigreturn_code; -+ /* Make sure the two instructions are pushed to icache. */ -+ flush_icache_range(addr, addr + sizeof(frame->sigreturn_code)); -+ -+ regs->ra = addr; - #endif /* CONFIG_MMU */ - - /* -diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c -index 921d9d7df4001..d0147294691d9 100644 ---- a/arch/riscv/kernel/smp.c -+++ b/arch/riscv/kernel/smp.c -@@ -59,16 +59,6 @@ int riscv_hartid_to_cpuid(int hartid) - return -ENOENT; - } - --void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) --{ -- int cpu; -- -- cpumask_clear(out); -- for_each_cpu(cpu, in) -- cpumask_set_cpu(cpuid_to_hartid_map(cpu), out); --} --EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask); -- - bool arch_match_cpu_phys_id(int cpu, u64 phys_id) - { - return phys_id == cpuid_to_hartid_map(cpu); -diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c -index bd82375db51a6..0f323e935dd89 100644 ---- a/arch/riscv/kernel/smpboot.c -+++ b/arch/riscv/kernel/smpboot.c -@@ -53,6 +53,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) - unsigned int curr_cpuid; - - curr_cpuid = smp_processor_id(); -+ store_cpu_topology(curr_cpuid); - numa_store_cpu_info(curr_cpuid); - numa_add_cpu(curr_cpuid); - -@@ -165,9 +166,9 @@ asmlinkage __visible void smp_callin(void) - mmgrab(mm); - current->active_mm = mm; - -+ store_cpu_topology(curr_cpuid); - notify_cpu_starting(curr_cpuid); - numa_add_cpu(curr_cpuid); -- update_siblings_masks(curr_cpuid); - set_cpu_online(curr_cpuid, 1); - - /* -diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c -index 315db3d0229bf..894ae66421a76 100644 ---- a/arch/riscv/kernel/stacktrace.c -+++ b/arch/riscv/kernel/stacktrace.c -@@ -22,15 +22,17 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, - bool (*fn)(void *, unsigned long), void *arg) - { - unsigned long fp, sp, pc; -+ int level = 0; - - if (regs) { - fp = frame_pointer(regs); - sp = user_stack_pointer(regs); - pc = instruction_pointer(regs); - } else if (task == NULL || task == current) { -- fp = (unsigned long)__builtin_frame_address(1); -- sp = (unsigned long)__builtin_frame_address(0); -- pc = (unsigned long)__builtin_return_address(0); -+ fp = (unsigned long)__builtin_frame_address(0); -+ sp = sp_in_global; -+ pc = (unsigned long)walk_stackframe; -+ level = -1; - } else { - /* task blocked in __switch_to */ - fp = task->thread.s[0]; -@@ -42,7 +44,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, - unsigned long low, high; - struct stackframe *frame; - -- if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc))) -+ if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc)))) - break; - - /* Validate frame pointer */ -@@ -59,7 +61,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, - } else { - fp = frame->fp; - pc = ftrace_graph_ret_addr(current, NULL, frame->ra, -- (unsigned long *)(fp - 8)); -+ &frame->ra); - } - - } -@@ -92,7 +94,7 @@ void notrace walk_stackframe(struct task_struct *task, - while (!kstack_end(ksp)) { - if (__kernel_text_address(pc) && unlikely(!fn(arg, pc))) - break; -- pc = (*ksp++) - 0x4; -+ pc = READ_ONCE_NOCHECK(*ksp++) - 0x4; - } - } - -diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c -index 12f8a7fce78b1..bb402685057a2 100644 ---- a/arch/riscv/kernel/sys_riscv.c -+++ b/arch/riscv/kernel/sys_riscv.c -@@ -18,10 +18,6 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, - if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) - return -EINVAL; - -- if ((prot & PROT_WRITE) && (prot & PROT_EXEC)) -- if (unlikely(!(prot & PROT_READ))) -- return -EINVAL; -- - return ksys_mmap_pgoff(addr, len, prot, flags, fd, - offset >> (PAGE_SHIFT - page_shift_offset)); - } -diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c -index 8217b0f67c6cb..1cf21db4fcc77 100644 ---- a/arch/riscv/kernel/time.c -+++ b/arch/riscv/kernel/time.c -@@ -5,6 +5,7 @@ - */ - - #include -+#include - #include - #include - #include -@@ -29,6 +30,8 @@ void __init time_init(void) - - of_clk_init(NULL); - timer_probe(); -+ -+ tick_setup_hrtimer_broadcast(); - } - - void clocksource_arch_init(struct clocksource *cs) -diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c -new file mode 100644 -index 0000000000000..095ac976d7da1 ---- /dev/null -+++ b/arch/riscv/kernel/trace_irq.c -@@ -0,0 +1,27 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2022 Changbin Du -+ */ -+ -+#include -+#include -+#include "trace_irq.h" -+ -+/* -+ * trace_hardirqs_on/off require the caller to setup frame pointer properly. -+ * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel. -+ * Here we add one extra level so they can be safely called by low -+ * level entry code which $fp is used for other purpose. -+ */ -+ -+void __trace_hardirqs_on(void) -+{ -+ trace_hardirqs_on(); -+} -+NOKPROBE_SYMBOL(__trace_hardirqs_on); -+ -+void __trace_hardirqs_off(void) -+{ -+ trace_hardirqs_off(); -+} -+NOKPROBE_SYMBOL(__trace_hardirqs_off); -diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h -new file mode 100644 -index 0000000000000..99fe67377e5ed ---- /dev/null -+++ b/arch/riscv/kernel/trace_irq.h -@@ -0,0 +1,11 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2022 Changbin Du -+ */ -+#ifndef __TRACE_IRQ_H -+#define __TRACE_IRQ_H -+ -+void __trace_hardirqs_on(void); -+void __trace_hardirqs_off(void); -+ -+#endif /* __TRACE_IRQ_H */ -diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c -index 0daaa3e4630d4..4f38b3c47e6d5 100644 ---- a/arch/riscv/kernel/traps.c -+++ b/arch/riscv/kernel/traps.c -@@ -16,12 +16,14 @@ - #include - #include - #include -+#include - - #include - #include -+#include - #include - #include --#include -+#include - - int show_unhandled_signals = 1; - -@@ -31,22 +33,29 @@ void die(struct pt_regs *regs, const char *str) - { - static int die_counter; - int ret; -+ long cause; -+ unsigned long flags; - - oops_enter(); - -- spin_lock_irq(&die_lock); -+ spin_lock_irqsave(&die_lock, flags); - console_verbose(); - bust_spinlocks(1); - - pr_emerg("%s [#%d]\n", str, ++die_counter); - print_modules(); -- show_regs(regs); -+ if (regs) -+ show_regs(regs); - -- ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV); -+ cause = regs ? regs->cause : -1; -+ ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV); -+ -+ if (kexec_should_crash(current)) -+ crash_kexec(regs); - - bust_spinlocks(0); - add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); -- spin_unlock_irq(&die_lock); -+ spin_unlock_irqrestore(&die_lock, flags); - oops_exit(); - - if (in_interrupt()) -@@ -54,7 +63,7 @@ void die(struct pt_regs *regs, const char *str) - if (panic_on_oops) - panic("Fatal exception"); - if (ret != NOTIFY_STOP) -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) -@@ -206,18 +215,36 @@ static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], - * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used - * to get per-cpu overflow stack(get_overflow_stack). - */ --long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)]; -+long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16); - asmlinkage unsigned long get_overflow_stack(void) - { - return (unsigned long)this_cpu_ptr(overflow_stack) + - OVERFLOW_STACK_SIZE; - } - -+/* -+ * A pseudo spinlock to protect the shadow stack from being used by multiple -+ * harts concurrently. This isn't a real spinlock because the lock side must -+ * be taken without a valid stack and only a single register, it's only taken -+ * while in the process of panicing anyway so the performance and error -+ * checking a proper spinlock gives us doesn't matter. -+ */ -+unsigned long spin_shadow_stack; -+ - asmlinkage void handle_bad_stack(struct pt_regs *regs) - { - unsigned long tsk_stk = (unsigned long)current->stack; - unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); - -+ /* -+ * We're done with the shadow stack by this point, as we're on the -+ * overflow stack. Tell any other concurrent overflowing harts that -+ * they can proceed with panicing by releasing the pseudo-spinlock. -+ * -+ * This pairs with an amoswap.aq in handle_kernel_stack_overflow. -+ */ -+ smp_store_release(&spin_shadow_stack, 0); -+ - console_verbose(); - - pr_emerg("Insufficient stack space to handle exception!\n"); -diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile -index f2e065671e4d5..06e6b27f3bcc9 100644 ---- a/arch/riscv/kernel/vdso/Makefile -+++ b/arch/riscv/kernel/vdso/Makefile -@@ -17,6 +17,7 @@ vdso-syms += flush_icache - obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o - - ccflags-y := -fno-stack-protector -+ccflags-y += -DDISABLE_BRANCH_PROFILING - - ifneq ($(c-gettimeofday-y),) - CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y) -@@ -28,9 +29,12 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) - - obj-y += vdso.o - CPPFLAGS_vdso.lds += -P -C -U$(ARCH) -+ifneq ($(filter vgettimeofday, $(vdso-syms)),) -+CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY -+endif - - # Disable -pg to prevent insert call site --CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os -+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) - - # Disable profiling and instrumentation for VDSO code - GCOV_PROFILE := n -diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S -index e9111f700af08..3729cb28aac8d 100644 ---- a/arch/riscv/kernel/vdso/vdso.lds.S -+++ b/arch/riscv/kernel/vdso/vdso.lds.S -@@ -65,9 +65,11 @@ VERSION - LINUX_4.15 { - global: - __vdso_rt_sigreturn; -+#ifdef HAS_VGETTIMEOFDAY - __vdso_gettimeofday; - __vdso_clock_gettime; - __vdso_clock_getres; -+#endif - __vdso_getcpu; - __vdso_flush_icache; - local: *; -diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S -index 07d1d2152ba5c..e0609e1f0864d 100644 ---- a/arch/riscv/lib/memmove.S -+++ b/arch/riscv/lib/memmove.S -@@ -1,64 +1,316 @@ --/* SPDX-License-Identifier: GPL-2.0 */ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* -+ * Copyright (C) 2022 Michael T. Kloos -+ */ - - #include - #include - --ENTRY(__memmove) --WEAK(memmove) -- move t0, a0 -- move t1, a1 -- -- beq a0, a1, exit_memcpy -- beqz a2, exit_memcpy -- srli t2, a2, 0x2 -- -- slt t3, a0, a1 -- beqz t3, do_reverse -- -- andi a2, a2, 0x3 -- li t4, 1 -- beqz t2, byte_copy -- --word_copy: -- lw t3, 0(a1) -- addi t2, t2, -1 -- addi a1, a1, 4 -- sw t3, 0(a0) -- addi a0, a0, 4 -- bnez t2, word_copy -- beqz a2, exit_memcpy -- j byte_copy -- --do_reverse: -- add a0, a0, a2 -- add a1, a1, a2 -- andi a2, a2, 0x3 -- li t4, -1 -- beqz t2, reverse_byte_copy -- --reverse_word_copy: -- addi a1, a1, -4 -- addi t2, t2, -1 -- lw t3, 0(a1) -- addi a0, a0, -4 -- sw t3, 0(a0) -- bnez t2, reverse_word_copy -- beqz a2, exit_memcpy -- --reverse_byte_copy: -- addi a0, a0, -1 -- addi a1, a1, -1 -+SYM_FUNC_START(__memmove) -+SYM_FUNC_START_WEAK(memmove) -+ /* -+ * Returns -+ * a0 - dest -+ * -+ * Parameters -+ * a0 - Inclusive first byte of dest -+ * a1 - Inclusive first byte of src -+ * a2 - Length of copy n -+ * -+ * Because the return matches the parameter register a0, -+ * we will not clobber or modify that register. -+ * -+ * Note: This currently only works on little-endian. -+ * To port to big-endian, reverse the direction of shifts -+ * in the 2 misaligned fixup copy loops. -+ */ - -+ /* Return if nothing to do */ -+ beq a0, a1, return_from_memmove -+ beqz a2, return_from_memmove -+ -+ /* -+ * Register Uses -+ * Forward Copy: a1 - Index counter of src -+ * Reverse Copy: a4 - Index counter of src -+ * Forward Copy: t3 - Index counter of dest -+ * Reverse Copy: t4 - Index counter of dest -+ * Both Copy Modes: t5 - Inclusive first multibyte/aligned of dest -+ * Both Copy Modes: t6 - Non-Inclusive last multibyte/aligned of dest -+ * Both Copy Modes: t0 - Link / Temporary for load-store -+ * Both Copy Modes: t1 - Temporary for load-store -+ * Both Copy Modes: t2 - Temporary for load-store -+ * Both Copy Modes: a5 - dest to src alignment offset -+ * Both Copy Modes: a6 - Shift ammount -+ * Both Copy Modes: a7 - Inverse Shift ammount -+ * Both Copy Modes: a2 - Alternate breakpoint for unrolled loops -+ */ -+ -+ /* -+ * Solve for some register values now. -+ * Byte copy does not need t5 or t6. -+ */ -+ mv t3, a0 -+ add t4, a0, a2 -+ add a4, a1, a2 -+ -+ /* -+ * Byte copy if copying less than (2 * SZREG) bytes. This can -+ * cause problems with the bulk copy implementation and is -+ * small enough not to bother. -+ */ -+ andi t0, a2, -(2 * SZREG) -+ beqz t0, byte_copy -+ -+ /* -+ * Now solve for t5 and t6. -+ */ -+ andi t5, t3, -SZREG -+ andi t6, t4, -SZREG -+ /* -+ * If dest(Register t3) rounded down to the nearest naturally -+ * aligned SZREG address, does not equal dest, then add SZREG -+ * to find the low-bound of SZREG alignment in the dest memory -+ * region. Note that this could overshoot the dest memory -+ * region if n is less than SZREG. This is one reason why -+ * we always byte copy if n is less than SZREG. -+ * Otherwise, dest is already naturally aligned to SZREG. -+ */ -+ beq t5, t3, 1f -+ addi t5, t5, SZREG -+ 1: -+ -+ /* -+ * If the dest and src are co-aligned to SZREG, then there is -+ * no need for the full rigmarole of a full misaligned fixup copy. -+ * Instead, do a simpler co-aligned copy. -+ */ -+ xor t0, a0, a1 -+ andi t1, t0, (SZREG - 1) -+ beqz t1, coaligned_copy -+ /* Fall through to misaligned fixup copy */ -+ -+misaligned_fixup_copy: -+ bltu a1, a0, misaligned_fixup_copy_reverse -+ -+misaligned_fixup_copy_forward: -+ jal t0, byte_copy_until_aligned_forward -+ -+ andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */ -+ slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */ -+ sub a5, a1, t3 /* Find the difference between src and dest */ -+ andi a1, a1, -SZREG /* Align the src pointer */ -+ addi a2, t6, SZREG /* The other breakpoint for the unrolled loop*/ -+ -+ /* -+ * Compute The Inverse Shift -+ * a7 = XLEN - a6 = XLEN + -a6 -+ * 2s complement negation to find the negative: -a6 = ~a6 + 1 -+ * Add that to XLEN. XLEN = SZREG * 8. -+ */ -+ not a7, a6 -+ addi a7, a7, (SZREG * 8 + 1) -+ -+ /* -+ * Fix Misalignment Copy Loop - Forward -+ * load_val0 = load_ptr[0]; -+ * do { -+ * load_val1 = load_ptr[1]; -+ * store_ptr += 2; -+ * store_ptr[0 - 2] = (load_val0 >> {a6}) | (load_val1 << {a7}); -+ * -+ * if (store_ptr == {a2}) -+ * break; -+ * -+ * load_val0 = load_ptr[2]; -+ * load_ptr += 2; -+ * store_ptr[1 - 2] = (load_val1 >> {a6}) | (load_val0 << {a7}); -+ * -+ * } while (store_ptr != store_ptr_end); -+ * store_ptr = store_ptr_end; -+ */ -+ -+ REG_L t0, (0 * SZREG)(a1) -+ 1: -+ REG_L t1, (1 * SZREG)(a1) -+ addi t3, t3, (2 * SZREG) -+ srl t0, t0, a6 -+ sll t2, t1, a7 -+ or t2, t0, t2 -+ REG_S t2, ((0 * SZREG) - (2 * SZREG))(t3) -+ -+ beq t3, a2, 2f -+ -+ REG_L t0, (2 * SZREG)(a1) -+ addi a1, a1, (2 * SZREG) -+ srl t1, t1, a6 -+ sll t2, t0, a7 -+ or t2, t1, t2 -+ REG_S t2, ((1 * SZREG) - (2 * SZREG))(t3) -+ -+ bne t3, t6, 1b -+ 2: -+ mv t3, t6 /* Fix the dest pointer in case the loop was broken */ -+ -+ add a1, t3, a5 /* Restore the src pointer */ -+ j byte_copy_forward /* Copy any remaining bytes */ -+ -+misaligned_fixup_copy_reverse: -+ jal t0, byte_copy_until_aligned_reverse -+ -+ andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */ -+ slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */ -+ sub a5, a4, t4 /* Find the difference between src and dest */ -+ andi a4, a4, -SZREG /* Align the src pointer */ -+ addi a2, t5, -SZREG /* The other breakpoint for the unrolled loop*/ -+ -+ /* -+ * Compute The Inverse Shift -+ * a7 = XLEN - a6 = XLEN + -a6 -+ * 2s complement negation to find the negative: -a6 = ~a6 + 1 -+ * Add that to XLEN. XLEN = SZREG * 8. -+ */ -+ not a7, a6 -+ addi a7, a7, (SZREG * 8 + 1) -+ -+ /* -+ * Fix Misalignment Copy Loop - Reverse -+ * load_val1 = load_ptr[0]; -+ * do { -+ * load_val0 = load_ptr[-1]; -+ * store_ptr -= 2; -+ * store_ptr[1] = (load_val0 >> {a6}) | (load_val1 << {a7}); -+ * -+ * if (store_ptr == {a2}) -+ * break; -+ * -+ * load_val1 = load_ptr[-2]; -+ * load_ptr -= 2; -+ * store_ptr[0] = (load_val1 >> {a6}) | (load_val0 << {a7}); -+ * -+ * } while (store_ptr != store_ptr_end); -+ * store_ptr = store_ptr_end; -+ */ -+ -+ REG_L t1, ( 0 * SZREG)(a4) -+ 1: -+ REG_L t0, (-1 * SZREG)(a4) -+ addi t4, t4, (-2 * SZREG) -+ sll t1, t1, a7 -+ srl t2, t0, a6 -+ or t2, t1, t2 -+ REG_S t2, ( 1 * SZREG)(t4) -+ -+ beq t4, a2, 2f -+ -+ REG_L t1, (-2 * SZREG)(a4) -+ addi a4, a4, (-2 * SZREG) -+ sll t0, t0, a7 -+ srl t2, t1, a6 -+ or t2, t0, t2 -+ REG_S t2, ( 0 * SZREG)(t4) -+ -+ bne t4, t5, 1b -+ 2: -+ mv t4, t5 /* Fix the dest pointer in case the loop was broken */ -+ -+ add a4, t4, a5 /* Restore the src pointer */ -+ j byte_copy_reverse /* Copy any remaining bytes */ -+ -+/* -+ * Simple copy loops for SZREG co-aligned memory locations. -+ * These also make calls to do byte copies for any unaligned -+ * data at their terminations. -+ */ -+coaligned_copy: -+ bltu a1, a0, coaligned_copy_reverse -+ -+coaligned_copy_forward: -+ jal t0, byte_copy_until_aligned_forward -+ -+ 1: -+ REG_L t1, ( 0 * SZREG)(a1) -+ addi a1, a1, SZREG -+ addi t3, t3, SZREG -+ REG_S t1, (-1 * SZREG)(t3) -+ bne t3, t6, 1b -+ -+ j byte_copy_forward /* Copy any remaining bytes */ -+ -+coaligned_copy_reverse: -+ jal t0, byte_copy_until_aligned_reverse -+ -+ 1: -+ REG_L t1, (-1 * SZREG)(a4) -+ addi a4, a4, -SZREG -+ addi t4, t4, -SZREG -+ REG_S t1, ( 0 * SZREG)(t4) -+ bne t4, t5, 1b -+ -+ j byte_copy_reverse /* Copy any remaining bytes */ -+ -+/* -+ * These are basically sub-functions within the function. They -+ * are used to byte copy until the dest pointer is in alignment. -+ * At which point, a bulk copy method can be used by the -+ * calling code. These work on the same registers as the bulk -+ * copy loops. Therefore, the register values can be picked -+ * up from where they were left and we avoid code duplication -+ * without any overhead except the call in and return jumps. -+ */ -+byte_copy_until_aligned_forward: -+ beq t3, t5, 2f -+ 1: -+ lb t1, 0(a1) -+ addi a1, a1, 1 -+ addi t3, t3, 1 -+ sb t1, -1(t3) -+ bne t3, t5, 1b -+ 2: -+ jalr zero, 0x0(t0) /* Return to multibyte copy loop */ -+ -+byte_copy_until_aligned_reverse: -+ beq t4, t6, 2f -+ 1: -+ lb t1, -1(a4) -+ addi a4, a4, -1 -+ addi t4, t4, -1 -+ sb t1, 0(t4) -+ bne t4, t6, 1b -+ 2: -+ jalr zero, 0x0(t0) /* Return to multibyte copy loop */ -+ -+/* -+ * Simple byte copy loops. -+ * These will byte copy until they reach the end of data to copy. -+ * At that point, they will call to return from memmove. -+ */ - byte_copy: -- lb t3, 0(a1) -- addi a2, a2, -1 -- sb t3, 0(a0) -- add a1, a1, t4 -- add a0, a0, t4 -- bnez a2, byte_copy -- --exit_memcpy: -- move a0, t0 -- move a1, t1 -- ret --END(__memmove) -+ bltu a1, a0, byte_copy_reverse -+ -+byte_copy_forward: -+ beq t3, t4, 2f -+ 1: -+ lb t1, 0(a1) -+ addi a1, a1, 1 -+ addi t3, t3, 1 -+ sb t1, -1(t3) -+ bne t3, t4, 1b -+ 2: -+ ret -+ -+byte_copy_reverse: -+ beq t4, t3, 2f -+ 1: -+ lb t1, -1(a4) -+ addi a4, a4, -1 -+ addi t4, t4, -1 -+ sb t1, 0(t4) -+ bne t4, t3, 1b -+ 2: -+ -+return_from_memmove: -+ ret -+ -+SYM_FUNC_END(memmove) -+SYM_FUNC_END(__memmove) -diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S -index 63bc691cff91b..4fe436a0eec2c 100644 ---- a/arch/riscv/lib/uaccess.S -+++ b/arch/riscv/lib/uaccess.S -@@ -19,8 +19,11 @@ ENTRY(__asm_copy_from_user) - li t6, SR_SUM - csrs CSR_STATUS, t6 - -- /* Save for return value */ -- mv t5, a2 -+ /* -+ * Save the terminal address which will be used to compute the number -+ * of bytes copied in case of a fixup exception. -+ */ -+ add t5, a0, a2 - - /* - * Register allocation for code below: -@@ -173,6 +176,13 @@ ENTRY(__asm_copy_from_user) - csrc CSR_STATUS, t6 - li a0, 0 - ret -+ -+ /* Exception fixup code */ -+10: -+ /* Disable access to user memory */ -+ csrc CSR_STATUS, t6 -+ sub a0, t5, a0 -+ ret - ENDPROC(__asm_copy_to_user) - ENDPROC(__asm_copy_from_user) - EXPORT_SYMBOL(__asm_copy_to_user) -@@ -218,19 +228,12 @@ ENTRY(__clear_user) - addi a0, a0, 1 - bltu a0, a3, 5b - j 3b --ENDPROC(__clear_user) --EXPORT_SYMBOL(__clear_user) - -- .section .fixup,"ax" -- .balign 4 -- /* Fixup code for __copy_user(10) and __clear_user(11) */ --10: -- /* Disable access to user memory */ -- csrs CSR_STATUS, t6 -- mv a0, t5 -- ret -+ /* Exception fixup code */ - 11: -- csrs CSR_STATUS, t6 -- mv a0, a1 -+ /* Disable access to user memory */ -+ csrc CSR_STATUS, t6 -+ sub a0, a3, a0 - ret -- .previous -+ENDPROC(__clear_user) -+EXPORT_SYMBOL(__clear_user) -diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile -index 7ebaef10ea1b6..ac7a25298a04a 100644 ---- a/arch/riscv/mm/Makefile -+++ b/arch/riscv/mm/Makefile -@@ -24,6 +24,9 @@ obj-$(CONFIG_KASAN) += kasan_init.o - ifdef CONFIG_KASAN - KASAN_SANITIZE_kasan_init.o := n - KASAN_SANITIZE_init.o := n -+ifdef CONFIG_DEBUG_VIRTUAL -+KASAN_SANITIZE_physaddr.o := n -+endif - endif - - obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o -diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c -index 89f81067e09ed..2ae1201cff886 100644 ---- a/arch/riscv/mm/cacheflush.c -+++ b/arch/riscv/mm/cacheflush.c -@@ -85,7 +85,9 @@ void flush_icache_pte(pte_t pte) - { - struct page *page = pte_page(pte); - -- if (!test_and_set_bit(PG_dcache_clean, &page->flags)) -+ if (!test_bit(PG_dcache_clean, &page->flags)) { - flush_icache_all(); -+ set_bit(PG_dcache_clean, &page->flags); -+ } - } - #endif /* CONFIG_MMU */ -diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c -index ee3459cb6750b..64bfb4575f3e6 100644 ---- a/arch/riscv/mm/context.c -+++ b/arch/riscv/mm/context.c -@@ -22,7 +22,7 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator); - - static unsigned long asid_bits; - static unsigned long num_asids; --static unsigned long asid_mask; -+unsigned long asid_mask; - - static atomic_long_t current_version; - -@@ -205,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm) - local_flush_tlb_all(); - } - --static inline void set_mm(struct mm_struct *mm, unsigned int cpu) -+static inline void set_mm(struct mm_struct *prev, -+ struct mm_struct *next, unsigned int cpu) - { -- if (static_branch_unlikely(&use_asid_allocator)) -- set_mm_asid(mm, cpu); -- else -- set_mm_noasid(mm); -+ /* -+ * The mm_cpumask indicates which harts' TLBs contain the virtual -+ * address mapping of the mm. Compared to noasid, using asid -+ * can't guarantee that stale TLB entries are invalidated because -+ * the asid mechanism wouldn't flush TLB for every switch_mm for -+ * performance. So when using asid, keep all CPUs footmarks in -+ * cpumask() until mm reset. -+ */ -+ cpumask_set_cpu(cpu, mm_cpumask(next)); -+ if (static_branch_unlikely(&use_asid_allocator)) { -+ set_mm_asid(next, cpu); -+ } else { -+ cpumask_clear_cpu(cpu, mm_cpumask(prev)); -+ set_mm_noasid(next); -+ } - } - - static int __init asids_init(void) -@@ -262,7 +274,8 @@ static int __init asids_init(void) - } - early_initcall(asids_init); - #else --static inline void set_mm(struct mm_struct *mm, unsigned int cpu) -+static inline void set_mm(struct mm_struct *prev, -+ struct mm_struct *next, unsigned int cpu) - { - /* Nothing to do here when there is no MMU */ - } -@@ -315,10 +328,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, - */ - cpu = smp_processor_id(); - -- cpumask_clear_cpu(cpu, mm_cpumask(prev)); -- cpumask_set_cpu(cpu, mm_cpumask(next)); -- -- set_mm(next, cpu); -+ set_mm(prev, next, cpu); - - flush_icache_deferred(next, cpu); - } -diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c -index aa08dd2f8faec..884a3c76573cf 100644 ---- a/arch/riscv/mm/fault.c -+++ b/arch/riscv/mm/fault.c -@@ -31,7 +31,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr, - - bust_spinlocks(0); - die(regs, "Oops"); -- do_exit(SIGKILL); -+ make_task_dead(SIGKILL); - } - - static inline void no_context(struct pt_regs *regs, unsigned long addr) -@@ -188,7 +188,8 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma) - } - break; - case EXC_LOAD_PAGE_FAULT: -- if (!(vma->vm_flags & VM_READ)) { -+ /* Write implies read */ -+ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) { - return true; - } - break; -@@ -270,10 +271,12 @@ asmlinkage void do_page_fault(struct pt_regs *regs) - if (user_mode(regs)) - flags |= FAULT_FLAG_USER; - -- if (!user_mode(regs) && addr < TASK_SIZE && -- unlikely(!(regs->status & SR_SUM))) -- die_kernel_fault("access to user memory without uaccess routines", -- addr, regs); -+ if (!user_mode(regs) && addr < TASK_SIZE && unlikely(!(regs->status & SR_SUM))) { -+ if (fixup_exception(regs)) -+ return; -+ -+ die_kernel_fault("access to user memory without uaccess routines", addr, regs); -+ } - - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - -diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c -index c0cddf0fc22db..d7115acab3501 100644 ---- a/arch/riscv/mm/init.c -+++ b/arch/riscv/mm/init.c -@@ -49,7 +49,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] - EXPORT_SYMBOL(empty_zero_page); - - extern char _start[]; --#define DTB_EARLY_BASE_VA PGDIR_SIZE - void *_dtb_early_va __initdata; - uintptr_t _dtb_early_pa __initdata; - -@@ -100,6 +99,10 @@ static void __init print_vm_layout(void) - (unsigned long)VMEMMAP_END); - print_mlm("vmalloc", (unsigned long)VMALLOC_START, - (unsigned long)VMALLOC_END); -+#ifdef CONFIG_64BIT -+ print_mlm("modules", (unsigned long)MODULES_VADDR, -+ (unsigned long)MODULES_END); -+#endif - print_mlm("lowmem", (unsigned long)PAGE_OFFSET, - (unsigned long)high_memory); - #ifdef CONFIG_64BIT -@@ -187,10 +190,10 @@ static void __init setup_bootmem(void) - - - phys_ram_end = memblock_end_of_DRAM(); --#ifndef CONFIG_64BIT - #ifndef CONFIG_XIP_KERNEL - phys_ram_base = memblock_start_of_DRAM(); - #endif -+#ifndef CONFIG_64BIT - /* - * memblock allocator is not aware of the fact that last 4K bytes of - * the addressable memory can not be mapped because of IS_ERR_VALUE -@@ -212,6 +215,14 @@ static void __init setup_bootmem(void) - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); - - reserve_initrd_mem(); -+ -+ /* -+ * No allocation should be done before reserving the memory as defined -+ * in the device tree, otherwise the allocation could end up in a -+ * reserved region. -+ */ -+ early_init_fdt_scan_reserved_mem(); -+ - /* - * If DTB is built in, no need to reserve its memblock. - * Otherwise, do reserve it but avoid using -@@ -221,11 +232,9 @@ static void __init setup_bootmem(void) - if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) - memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); - -- early_init_fdt_scan_reserved_mem(); - dma_contiguous_reserve(dma32_phys_limit); - if (IS_ENABLED(CONFIG_64BIT)) - hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); -- memblock_allow_resize(); - } - - #ifdef CONFIG_MMU -@@ -245,9 +254,9 @@ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; - static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; - - pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); --static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); - - #ifdef CONFIG_XIP_KERNEL -+#define riscv_pfn_base (*(unsigned long *)XIP_FIXUP(&riscv_pfn_base)) - #define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) - #define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) - #define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) -@@ -451,6 +460,7 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) - } - - #ifdef CONFIG_XIP_KERNEL -+#define phys_ram_base (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base)) - /* called from head.S with MMU off */ - asmlinkage void __init __copy_data(void) - { -@@ -558,24 +568,27 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early) - * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR - * entry. - */ --static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa) -+static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va, -+ uintptr_t dtb_pa) - { - #ifndef CONFIG_BUILTIN_DTB - uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); - -- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, -- IS_ENABLED(CONFIG_64BIT) ? (uintptr_t)early_dtb_pmd : pa, -- PGDIR_SIZE, -- IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL); -+ /* Make sure the fdt fixmap address is always aligned on PMD size */ -+ BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE)); - -- if (IS_ENABLED(CONFIG_64BIT)) { -- create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA, -+ /* In 32-bit only, the fdt lies in its own PGD */ -+ if (!IS_ENABLED(CONFIG_64BIT)) { -+ create_pgd_mapping(early_pg_dir, fix_fdt_va, -+ pa, MAX_FDT_SIZE, PAGE_KERNEL); -+ } else { -+ create_pmd_mapping(fixmap_pmd, fix_fdt_va, - pa, PMD_SIZE, PAGE_KERNEL); -- create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE, -+ create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE, - pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); - } - -- dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); -+ dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1)); - #else - /* - * For 64-bit kernel, __va can't be used since it would return a linear -@@ -663,7 +676,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) - create_kernel_page_table(early_pg_dir, true); - - /* Setup early mapping for FDT early scan */ -- create_fdt_early_page_table(early_pg_dir, dtb_pa); -+ create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa); - - /* - * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap -@@ -700,6 +713,7 @@ static void __init setup_vm_final(void) - { - uintptr_t va, map_size; - phys_addr_t pa, start, end; -+ unsigned long idx __maybe_unused; - u64 i; - - /** -@@ -713,6 +727,16 @@ static void __init setup_vm_final(void) - pt_ops.get_pmd_virt = get_pmd_virt_fixmap; - #endif - /* Setup swapper PGD for fixmap */ -+#if !defined(CONFIG_64BIT) -+ /* -+ * In 32-bit, the device tree lies in a pgd entry, so it must be copied -+ * directly in swapper_pg_dir in addition to the pgd entry that points -+ * to fixmap_pte. -+ */ -+ idx = pgd_index(__fix_to_virt(FIX_FDT)); -+ -+ set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]); -+#endif - create_pgd_mapping(swapper_pg_dir, FIXADDR_START, - __pa_symbol(fixmap_pgd_next), - PGDIR_SIZE, PAGE_TABLE); -@@ -813,13 +837,22 @@ static void __init reserve_crashkernel(void) - /* - * Current riscv boot protocol requires 2MB alignment for - * RV64 and 4MB alignment for RV32 (hugepage size) -+ * -+ * Try to alloc from 32bit addressible physical memory so that -+ * swiotlb can work on the crash kernel. - */ - crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE, -- search_start, search_end); -+ search_start, -+ min(search_end, (unsigned long)(SZ_4G - 1))); - if (crash_base == 0) { -- pr_warn("crashkernel: couldn't allocate %lldKB\n", -- crash_size >> 10); -- return; -+ /* Try again without restricting region to 32bit addressible memory */ -+ crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE, -+ search_start, search_end); -+ if (crash_base == 0) { -+ pr_warn("crashkernel: couldn't allocate %lldKB\n", -+ crash_size >> 10); -+ return; -+ } - } - - pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n", -@@ -834,6 +867,9 @@ void __init paging_init(void) - { - setup_bootmem(); - setup_vm_final(); -+ -+ /* Depend on that Linear Mapping is ready */ -+ memblock_allow_resize(); - } - - void __init misc_mem_init(void) -diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c -index 54294f83513d1..e26e367a3d9ef 100644 ---- a/arch/riscv/mm/kasan_init.c -+++ b/arch/riscv/mm/kasan_init.c -@@ -22,8 +22,7 @@ asmlinkage void __init kasan_early_init(void) - - for (i = 0; i < PTRS_PER_PTE; ++i) - set_pte(kasan_early_shadow_pte + i, -- mk_pte(virt_to_page(kasan_early_shadow_page), -- PAGE_KERNEL)); -+ pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL)); - - for (i = 0; i < PTRS_PER_PMD; ++i) - set_pmd(kasan_early_shadow_pmd + i, -diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c -index 5e49e4b4a4ccc..ea3d61de065b3 100644 ---- a/arch/riscv/mm/pageattr.c -+++ b/arch/riscv/mm/pageattr.c -@@ -118,10 +118,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, - if (!numpages) - return 0; - -- mmap_read_lock(&init_mm); -+ mmap_write_lock(&init_mm); - ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, - &masks); -- mmap_read_unlock(&init_mm); -+ mmap_write_unlock(&init_mm); - - flush_tlb_kernel_range(start, end); - -@@ -217,18 +217,26 @@ bool kernel_page_present(struct page *page) - pgd = pgd_offset_k(addr); - if (!pgd_present(*pgd)) - return false; -+ if (pgd_leaf(*pgd)) -+ return true; - - p4d = p4d_offset(pgd, addr); - if (!p4d_present(*p4d)) - return false; -+ if (p4d_leaf(*p4d)) -+ return true; - - pud = pud_offset(p4d, addr); - if (!pud_present(*pud)) - return false; -+ if (pud_leaf(*pud)) -+ return true; - - pmd = pmd_offset(pud, addr); - if (!pmd_present(*pmd)) - return false; -+ if (pmd_leaf(*pmd)) -+ return true; - - pte = pte_offset_kernel(pmd, addr); - return pte_present(*pte); -diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c -index 64f8201237c24..39d18fc07b9c6 100644 ---- a/arch/riscv/mm/tlbflush.c -+++ b/arch/riscv/mm/tlbflush.c -@@ -43,7 +43,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, - /* check if the tlbflush needs to be sent to other CPUs */ - broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; - if (static_branch_unlikely(&use_asid_allocator)) { -- unsigned long asid = atomic_long_read(&mm->context.id); -+ unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask; - - if (broadcast) { - riscv_cpuid_to_hartid_mask(cmask, &hmask); -diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h -index 75c1e99968675..ef336fe160044 100644 ---- a/arch/riscv/net/bpf_jit.h -+++ b/arch/riscv/net/bpf_jit.h -@@ -69,6 +69,7 @@ struct rv_jit_context { - struct bpf_prog *prog; - u16 *insns; /* RV insns */ - int ninsns; -+ int prologue_len; - int epilogue_offset; - int *offset; /* BPF to RV */ - unsigned long flags; -@@ -214,8 +215,8 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx) - int from, to; - - off++; /* BPF branch is from PC+1, RV is from PC */ -- from = (insn > 0) ? ctx->offset[insn - 1] : 0; -- to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0; -+ from = (insn > 0) ? ctx->offset[insn - 1] : ctx->prologue_len; -+ to = (insn + off > 0) ? ctx->offset[insn + off - 1] : ctx->prologue_len; - return ninsns_rvoff(to - from); - } - -diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c -index 3af4131c22c7a..2e3f1a626a3af 100644 ---- a/arch/riscv/net/bpf_jit_comp64.c -+++ b/arch/riscv/net/bpf_jit_comp64.c -@@ -120,6 +120,25 @@ static bool in_auipc_jalr_range(s64 val) - val < ((1L << 31) - (1L << 11)); - } - -+/* Emit fixed-length instructions for address */ -+static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx) -+{ -+ u64 ip = (u64)(ctx->insns + ctx->ninsns); -+ s64 off = addr - ip; -+ s64 upper = (off + (1 << 11)) >> 12; -+ s64 lower = off & 0xfff; -+ -+ if (extra_pass && !in_auipc_jalr_range(off)) { -+ pr_err("bpf-jit: target offset 0x%llx is out of range\n", off); -+ return -ERANGE; -+ } -+ -+ emit(rv_auipc(rd, upper), ctx); -+ emit(rv_addi(rd, rd, lower), ctx); -+ return 0; -+} -+ -+/* Emit variable-length instructions for 32-bit and 64-bit imm */ - static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) - { - /* Note that the immediate from the add is sign-extended, -@@ -887,7 +906,15 @@ out_be: - u64 imm64; - - imm64 = (u64)insn1.imm << 32 | (u32)imm; -- emit_imm(rd, imm64, ctx); -+ if (bpf_pseudo_func(insn)) { -+ /* fixed-length insns for extra jit pass */ -+ ret = emit_addr(rd, imm64, extra_pass, ctx); -+ if (ret) -+ return ret; -+ } else { -+ emit_imm(rd, imm64, ctx); -+ } -+ - return 1; - } - -diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c -index 753d85bdfad07..b95c60f663d44 100644 ---- a/arch/riscv/net/bpf_jit_core.c -+++ b/arch/riscv/net/bpf_jit_core.c -@@ -83,6 +83,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) - prog = orig_prog; - goto out_offset; - } -+ -+ if (build_body(ctx, extra_pass, NULL)) { -+ prog = orig_prog; -+ goto out_offset; -+ } -+ - for (i = 0; i < prog->len; i++) { - prev_ninsns += 32; - ctx->offset[i] = prev_ninsns; -@@ -91,11 +97,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) - for (i = 0; i < NR_JIT_ITERATIONS; i++) { - pass++; - ctx->ninsns = 0; -+ -+ bpf_jit_build_prologue(ctx); -+ ctx->prologue_len = ctx->ninsns; -+ - if (build_body(ctx, extra_pass, ctx->offset)) { - prog = orig_prog; - goto out_offset; - } -- bpf_jit_build_prologue(ctx); -+ - ctx->epilogue_offset = ctx->ninsns; - bpf_jit_build_epilogue(ctx); - -@@ -154,6 +164,9 @@ skip_init_ctx: - - if (!prog->is_func || extra_pass) { - bpf_jit_binary_lock_ro(jit_data->header); -+ for (i = 0; i < prog->len; i++) -+ ctx->offset[i] = ninsns_rvoff(ctx->offset[i]); -+ bpf_prog_fill_jited_linfo(prog, ctx->offset); - out_offset: - kfree(ctx->offset); - kfree(jit_data); -diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig -index b86de61b8caa2..e402fa964f235 100644 ---- a/arch/s390/Kconfig -+++ b/arch/s390/Kconfig -@@ -47,7 +47,7 @@ config ARCH_SUPPORTS_UPROBES - config KASAN_SHADOW_OFFSET - hex - depends on KASAN -- default 0x18000000000000 -+ default 0x1C000000000000 - - config S390 - def_bool y -@@ -516,7 +516,6 @@ config KEXEC - config KEXEC_FILE - bool "kexec file based system call" - select KEXEC_CORE -- select BUILD_BIN2C - depends on CRYPTO - depends on CRYPTO_SHA256 - depends on CRYPTO_SHA256_S390 -diff --git a/arch/s390/Makefile b/arch/s390/Makefile -index 450b351dfa8ef..dc840ba0b016a 100644 ---- a/arch/s390/Makefile -+++ b/arch/s390/Makefile -@@ -29,9 +29,20 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbac - KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables - KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding - KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector -+KBUILD_CFLAGS_DECOMPRESSOR += -fPIE - KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member) - KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) - KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) -+ -+ifdef CONFIG_CC_IS_GCC -+ ifeq ($(call cc-ifversion, -ge, 1200, y), y) -+ ifeq ($(call cc-ifversion, -lt, 1300, y), y) -+ KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) -+ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds) -+ endif -+ endif -+endif -+ - UTS_MACHINE := s390x - STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384) - CHECKFLAGS += -D__s390__ -D__s390x__ -@@ -79,10 +90,12 @@ KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y) - KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y) - - ifneq ($(call cc-option,-mstack-size=8192 -mstack-guard=128),) --cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE) --ifeq ($(call cc-option,-mstack-size=8192),) --cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD) --endif -+ CC_FLAGS_CHECK_STACK := -mstack-size=$(STACK_SIZE) -+ ifeq ($(call cc-option,-mstack-size=8192),) -+ CC_FLAGS_CHECK_STACK += -mstack-guard=$(CONFIG_STACK_GUARD) -+ endif -+ export CC_FLAGS_CHECK_STACK -+ cflags-$(CONFIG_CHECK_STACK) += $(CC_FLAGS_CHECK_STACK) - endif - - ifdef CONFIG_EXPOLINE -diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c -index e27c2140d6206..623f6775d01d7 100644 ---- a/arch/s390/boot/compressed/decompressor.c -+++ b/arch/s390/boot/compressed/decompressor.c -@@ -80,6 +80,6 @@ void *decompress_kernel(void) - void *output = (void *)decompress_offset; - - __decompress(_compressed_start, _compressed_end - _compressed_start, -- NULL, NULL, output, 0, NULL, error); -+ NULL, NULL, output, vmlinux.image_size, NULL, error); - return output; - } -diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/compressed/decompressor.h -index a59f75c5b0490..f75cc31a77dd9 100644 ---- a/arch/s390/boot/compressed/decompressor.h -+++ b/arch/s390/boot/compressed/decompressor.h -@@ -24,6 +24,7 @@ struct vmlinux_info { - unsigned long dynsym_start; - unsigned long rela_dyn_start; - unsigned long rela_dyn_end; -+ unsigned long amode31_size; - }; - - /* Symbols defined by linker scripts */ -diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S -index 918e05137d4c6..1686a852534fc 100644 ---- a/arch/s390/boot/compressed/vmlinux.lds.S -+++ b/arch/s390/boot/compressed/vmlinux.lds.S -@@ -93,8 +93,17 @@ SECTIONS - _compressed_start = .; - *(.vmlinux.bin.compressed) - _compressed_end = .; -- FILL(0xff); -- . = ALIGN(4096); -+ } -+ -+#define SB_TRAILER_SIZE 32 -+ /* Trailer needed for Secure Boot */ -+ . += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */ -+ . = ALIGN(4096) - SB_TRAILER_SIZE; -+ .sb.trailer : { -+ QUAD(0) -+ QUAD(0) -+ QUAD(0) -+ QUAD(0x000000207a49504c) - } - _end = .; - -diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c -index 9b14045065b6e..74b5cd2648622 100644 ---- a/arch/s390/boot/ipl_report.c -+++ b/arch/s390/boot/ipl_report.c -@@ -57,11 +57,19 @@ repeat: - if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size && - intersects(initrd_data.start, initrd_data.size, safe_addr, size)) - safe_addr = initrd_data.start + initrd_data.size; -+ if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) { -+ safe_addr = (unsigned long)comps + comps->len; -+ goto repeat; -+ } - for_each_rb_entry(comp, comps) - if (intersects(safe_addr, size, comp->addr, comp->len)) { - safe_addr = comp->addr + comp->len; - goto repeat; - } -+ if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) { -+ safe_addr = (unsigned long)certs + certs->len; -+ goto repeat; -+ } - for_each_rb_entry(cert, certs) - if (intersects(safe_addr, size, cert->addr, cert->len)) { - safe_addr = cert->addr + cert->len; -diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c -index 2f949cd9076b8..17a32707d17e0 100644 ---- a/arch/s390/boot/mem_detect.c -+++ b/arch/s390/boot/mem_detect.c -@@ -165,7 +165,7 @@ static void search_mem_end(void) - - unsigned long detect_memory(void) - { -- unsigned long max_physmem_end; -+ unsigned long max_physmem_end = 0; - - sclp_early_get_memsize(&max_physmem_end); - -diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c -index 6dc8d0a538640..1aa11a8f57dd8 100644 ---- a/arch/s390/boot/startup.c -+++ b/arch/s390/boot/startup.c -@@ -15,6 +15,7 @@ - #include "uv.h" - - unsigned long __bootdata_preserved(__kaslr_offset); -+unsigned long __bootdata(__amode31_base); - unsigned long __bootdata_preserved(VMALLOC_START); - unsigned long __bootdata_preserved(VMALLOC_END); - struct page *__bootdata_preserved(vmemmap); -@@ -148,82 +149,56 @@ static void setup_ident_map_size(unsigned long max_physmem_end) - - static void setup_kernel_memory_layout(void) - { -- bool vmalloc_size_verified = false; -- unsigned long vmemmap_off; -- unsigned long vspace_left; -+ unsigned long vmemmap_start; - unsigned long rte_size; - unsigned long pages; -- unsigned long vmax; - - pages = ident_map_size / PAGE_SIZE; - /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ - vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); - - /* choose kernel address space layout: 4 or 3 levels. */ -- vmemmap_off = round_up(ident_map_size, _REGION3_SIZE); -+ vmemmap_start = round_up(ident_map_size, _REGION3_SIZE); - if (IS_ENABLED(CONFIG_KASAN) || - vmalloc_size > _REGION2_SIZE || -- vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE) -- vmax = _REGION1_SIZE; -- else -- vmax = _REGION2_SIZE; -- -- /* keep vmemmap_off aligned to a top level region table entry */ -- rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE; -- MODULES_END = vmax; -- if (is_prot_virt_host()) { -- /* -- * forcing modules and vmalloc area under the ultravisor -- * secure storage limit, so that any vmalloc allocation -- * we do could be used to back secure guest storage. -- */ -- adjust_to_uv_max(&MODULES_END); -- } -- --#ifdef CONFIG_KASAN -- if (MODULES_END < vmax) { -- /* force vmalloc and modules below kasan shadow */ -- MODULES_END = min(MODULES_END, KASAN_SHADOW_START); -+ vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN > -+ _REGION2_SIZE) { -+ MODULES_END = _REGION1_SIZE; -+ rte_size = _REGION2_SIZE; - } else { -- /* -- * leave vmalloc and modules above kasan shadow but make -- * sure they don't overlap with it -- */ -- vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN); -- vmalloc_size_verified = true; -- vspace_left = KASAN_SHADOW_START; -+ MODULES_END = _REGION2_SIZE; -+ rte_size = _REGION3_SIZE; - } -+ /* -+ * forcing modules and vmalloc area under the ultravisor -+ * secure storage limit, so that any vmalloc allocation -+ * we do could be used to back secure guest storage. -+ */ -+ adjust_to_uv_max(&MODULES_END); -+#ifdef CONFIG_KASAN -+ /* force vmalloc and modules below kasan shadow */ -+ MODULES_END = min(MODULES_END, KASAN_SHADOW_START); - #endif - MODULES_VADDR = MODULES_END - MODULES_LEN; - VMALLOC_END = MODULES_VADDR; - -- if (vmalloc_size_verified) { -- VMALLOC_START = VMALLOC_END - vmalloc_size; -- } else { -- vmemmap_off = round_up(ident_map_size, rte_size); -+ /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ -+ vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE)); -+ VMALLOC_START = VMALLOC_END - vmalloc_size; - -- if (vmemmap_off + vmemmap_size > VMALLOC_END || -- vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) { -- /* -- * allow vmalloc area to occupy up to 1/2 of -- * the rest virtual space left. -- */ -- vmalloc_size = min(vmalloc_size, VMALLOC_END / 2); -- } -- VMALLOC_START = VMALLOC_END - vmalloc_size; -- vspace_left = VMALLOC_START; -- } -- -- pages = vspace_left / (PAGE_SIZE + sizeof(struct page)); -+ /* split remaining virtual space between 1:1 mapping & vmemmap array */ -+ pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); - pages = SECTION_ALIGN_UP(pages); -- vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size); -- /* keep vmemmap left most starting from a fresh region table entry */ -- vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size)); -- /* take care that identity map is lower then vmemmap */ -- ident_map_size = min(ident_map_size, vmemmap_off); -+ /* keep vmemmap_start aligned to a top level region table entry */ -+ vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size); -+ /* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */ -+ vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS); -+ /* make sure identity map doesn't overlay with vmemmap */ -+ ident_map_size = min(ident_map_size, vmemmap_start); - vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); -- VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START); -- vmemmap = (struct page *)vmemmap_off; -+ /* make sure vmemmap doesn't overlay with vmalloc area */ -+ VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START); -+ vmemmap = (struct page *)vmemmap_start; - } - - /* -@@ -259,6 +234,12 @@ static void offset_vmlinux_info(unsigned long offset) - vmlinux.dynsym_start += offset; - } - -+static unsigned long reserve_amode31(unsigned long safe_addr) -+{ -+ __amode31_base = PAGE_ALIGN(safe_addr); -+ return safe_addr + vmlinux.amode31_size; -+} -+ - void startup_kernel(void) - { - unsigned long random_lma; -@@ -273,6 +254,7 @@ void startup_kernel(void) - setup_lpp(); - store_ipl_parmblock(); - safe_addr = mem_safe_offset(); -+ safe_addr = reserve_amode31(safe_addr); - safe_addr = read_ipl_report(safe_addr); - uv_query_info(); - rescue_initrd(safe_addr); -diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c -index 54c7536f2482d..1023e9d43d443 100644 ---- a/arch/s390/crypto/aes_s390.c -+++ b/arch/s390/crypto/aes_s390.c -@@ -701,7 +701,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, - unsigned int nbytes) - { - gw->walk_bytes_remain -= nbytes; -- scatterwalk_unmap(&gw->walk); -+ scatterwalk_unmap(gw->walk_ptr); - scatterwalk_advance(&gw->walk, nbytes); - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); - gw->walk_ptr = NULL; -@@ -776,7 +776,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) - goto out; - } - -- scatterwalk_unmap(&gw->walk); -+ scatterwalk_unmap(gw->walk_ptr); - gw->walk_ptr = NULL; - - gw->ptr = gw->buf; -diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c -index 56007c763902a..1f2d40993c4d2 100644 ---- a/arch/s390/crypto/arch_random.c -+++ b/arch/s390/crypto/arch_random.c -@@ -4,232 +4,15 @@ - * - * Copyright IBM Corp. 2017, 2020 - * Author(s): Harald Freudenberger -- * -- * The s390_arch_random_generate() function may be called from random.c -- * in interrupt context. So this implementation does the best to be very -- * fast. There is a buffer of random data which is asynchronously checked -- * and filled by a workqueue thread. -- * If there are enough bytes in the buffer the s390_arch_random_generate() -- * just delivers these bytes. Otherwise false is returned until the -- * worker thread refills the buffer. -- * The worker fills the rng buffer by pulling fresh entropy from the -- * high quality (but slow) true hardware random generator. This entropy -- * is then spread over the buffer with an pseudo random generator PRNG. -- * As the arch_get_random_seed_long() fetches 8 bytes and the calling -- * function add_interrupt_randomness() counts this as 1 bit entropy the -- * distribution needs to make sure there is in fact 1 bit entropy contained -- * in 8 bytes of the buffer. The current values pull 32 byte entropy -- * and scatter this into a 2048 byte buffer. So 8 byte in the buffer -- * will contain 1 bit of entropy. -- * The worker thread is rescheduled based on the charge level of the -- * buffer but at least with 500 ms delay to avoid too much CPU consumption. -- * So the max. amount of rng data delivered via arch_get_random_seed is -- * limited to 4k bytes per second. - */ - - #include - #include - #include --#include - #include --#include --#include - #include - - DEFINE_STATIC_KEY_FALSE(s390_arch_random_available); - - atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0); - EXPORT_SYMBOL(s390_arch_random_counter); -- --#define ARCH_REFILL_TICKS (HZ/2) --#define ARCH_PRNG_SEED_SIZE 32 --#define ARCH_RNG_BUF_SIZE 2048 -- --static DEFINE_SPINLOCK(arch_rng_lock); --static u8 *arch_rng_buf; --static unsigned int arch_rng_buf_idx; -- --static void arch_rng_refill_buffer(struct work_struct *); --static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer); -- --bool s390_arch_random_generate(u8 *buf, unsigned int nbytes) --{ -- /* max hunk is ARCH_RNG_BUF_SIZE */ -- if (nbytes > ARCH_RNG_BUF_SIZE) -- return false; -- -- /* lock rng buffer */ -- if (!spin_trylock(&arch_rng_lock)) -- return false; -- -- /* try to resolve the requested amount of bytes from the buffer */ -- arch_rng_buf_idx -= nbytes; -- if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) { -- memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes); -- atomic64_add(nbytes, &s390_arch_random_counter); -- spin_unlock(&arch_rng_lock); -- return true; -- } -- -- /* not enough bytes in rng buffer, refill is done asynchronously */ -- spin_unlock(&arch_rng_lock); -- -- return false; --} --EXPORT_SYMBOL(s390_arch_random_generate); -- --static void arch_rng_refill_buffer(struct work_struct *unused) --{ -- unsigned int delay = ARCH_REFILL_TICKS; -- -- spin_lock(&arch_rng_lock); -- if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) { -- /* buffer is exhausted and needs refill */ -- u8 seed[ARCH_PRNG_SEED_SIZE]; -- u8 prng_wa[240]; -- /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */ -- cpacf_trng(NULL, 0, seed, sizeof(seed)); -- /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */ -- memset(prng_wa, 0, sizeof(prng_wa)); -- cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, -- &prng_wa, NULL, 0, seed, sizeof(seed)); -- cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, -- &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0); -- arch_rng_buf_idx = ARCH_RNG_BUF_SIZE; -- } -- delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE; -- spin_unlock(&arch_rng_lock); -- -- /* kick next check */ -- queue_delayed_work(system_long_wq, &arch_rng_work, delay); --} -- --/* -- * Here follows the implementation of s390_arch_get_random_long(). -- * -- * The random longs to be pulled by arch_get_random_long() are -- * prepared in an 4K buffer which is filled from the NIST 800-90 -- * compliant s390 drbg. By default the random long buffer is refilled -- * 256 times before the drbg itself needs a reseed. The reseed of the -- * drbg is done with 32 bytes fetched from the high quality (but slow) -- * trng which is assumed to deliver 100% entropy. So the 32 * 8 = 256 -- * bits of entropy are spread over 256 * 4KB = 1MB serving 131072 -- * arch_get_random_long() invocations before reseeded. -- * -- * How often the 4K random long buffer is refilled with the drbg -- * before the drbg is reseeded can be adjusted. There is a module -- * parameter 's390_arch_rnd_long_drbg_reseed' accessible via -- * /sys/module/arch_random/parameters/rndlong_drbg_reseed -- * or as kernel command line parameter -- * arch_random.rndlong_drbg_reseed= -- * This parameter tells how often the drbg fills the 4K buffer before -- * it is re-seeded by fresh entropy from the trng. -- * A value of 16 results in reseeding the drbg at every 16 * 4 KB = 64 -- * KB with 32 bytes of fresh entropy pulled from the trng. So a value -- * of 16 would result in 256 bits entropy per 64 KB. -- * A value of 256 results in 1MB of drbg output before a reseed of the -- * drbg is done. So this would spread the 256 bits of entropy among 1MB. -- * Setting this parameter to 0 forces the reseed to take place every -- * time the 4K buffer is depleted, so the entropy rises to 256 bits -- * entropy per 4K or 0.5 bit entropy per arch_get_random_long(). With -- * setting this parameter to negative values all this effort is -- * disabled, arch_get_random long() returns false and thus indicating -- * that the arch_get_random_long() feature is disabled at all. -- */ -- --static unsigned long rndlong_buf[512]; --static DEFINE_SPINLOCK(rndlong_lock); --static int rndlong_buf_index; -- --static int rndlong_drbg_reseed = 256; --module_param_named(rndlong_drbg_reseed, rndlong_drbg_reseed, int, 0600); --MODULE_PARM_DESC(rndlong_drbg_reseed, "s390 arch_get_random_long() drbg reseed"); -- --static inline void refill_rndlong_buf(void) --{ -- static u8 prng_ws[240]; -- static int drbg_counter; -- -- if (--drbg_counter < 0) { -- /* need to re-seed the drbg */ -- u8 seed[32]; -- -- /* fetch seed from trng */ -- cpacf_trng(NULL, 0, seed, sizeof(seed)); -- /* seed drbg */ -- memset(prng_ws, 0, sizeof(prng_ws)); -- cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, -- &prng_ws, NULL, 0, seed, sizeof(seed)); -- /* re-init counter for drbg */ -- drbg_counter = rndlong_drbg_reseed; -- } -- -- /* fill the arch_get_random_long buffer from drbg */ -- cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prng_ws, -- (u8 *) rndlong_buf, sizeof(rndlong_buf), -- NULL, 0); --} -- --bool s390_arch_get_random_long(unsigned long *v) --{ -- bool rc = false; -- unsigned long flags; -- -- /* arch_get_random_long() disabled ? */ -- if (rndlong_drbg_reseed < 0) -- return false; -- -- /* try to lock the random long lock */ -- if (!spin_trylock_irqsave(&rndlong_lock, flags)) -- return false; -- -- if (--rndlong_buf_index >= 0) { -- /* deliver next long value from the buffer */ -- *v = rndlong_buf[rndlong_buf_index]; -- rc = true; -- goto out; -- } -- -- /* buffer is depleted and needs refill */ -- if (in_interrupt()) { -- /* delay refill in interrupt context to next caller */ -- rndlong_buf_index = 0; -- goto out; -- } -- -- /* refill random long buffer */ -- refill_rndlong_buf(); -- rndlong_buf_index = ARRAY_SIZE(rndlong_buf); -- -- /* and provide one random long */ -- *v = rndlong_buf[--rndlong_buf_index]; -- rc = true; -- --out: -- spin_unlock_irqrestore(&rndlong_lock, flags); -- return rc; --} --EXPORT_SYMBOL(s390_arch_get_random_long); -- --static int __init s390_arch_random_init(void) --{ -- /* all the needed PRNO subfunctions available ? */ -- if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) && -- cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) { -- -- /* alloc arch random working buffer */ -- arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL); -- if (!arch_rng_buf) -- return -ENOMEM; -- -- /* kick worker queue job to fill the random buffer */ -- queue_delayed_work(system_long_wq, -- &arch_rng_work, ARCH_REFILL_TICKS); -- -- /* enable arch random to the outside world */ -- static_branch_enable(&s390_arch_random_available); -- } -- -- return 0; --} --arch_initcall(s390_arch_random_init); -diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c -index a279b7d23a5e2..621322eb0e681 100644 ---- a/arch/s390/crypto/paes_s390.c -+++ b/arch/s390/crypto/paes_s390.c -@@ -35,7 +35,7 @@ - * and padding is also possible, the limits need to be generous. - */ - #define PAES_MIN_KEYSIZE 16 --#define PAES_MAX_KEYSIZE 320 -+#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE - - static u8 *ctrblk; - static DEFINE_MUTEX(ctrblk_lock); -diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c -index f0bc4dc3e9bf0..6511d15ace45e 100644 ---- a/arch/s390/hypfs/hypfs_diag.c -+++ b/arch/s390/hypfs/hypfs_diag.c -@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void) - int rc; - - if (diag204_probe()) { -- pr_err("The hardware system does not support hypfs\n"); -+ pr_info("The hardware system does not support hypfs\n"); - return -ENODATA; - } - -diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c -index 33f973ff97442..e8f15dbb89d02 100644 ---- a/arch/s390/hypfs/hypfs_vm.c -+++ b/arch/s390/hypfs/hypfs_vm.c -@@ -20,6 +20,7 @@ - - static char local_guest[] = " "; - static char all_guests[] = "* "; -+static char *all_groups = all_guests; - static char *guest_query; - - struct diag2fc_data { -@@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr) - - memcpy(parm_list.userid, query, NAME_LEN); - ASCEBC(parm_list.userid, NAME_LEN); -- parm_list.addr = (unsigned long) addr ; -+ memcpy(parm_list.aci_grp, all_groups, NAME_LEN); -+ ASCEBC(parm_list.aci_grp, NAME_LEN); -+ parm_list.addr = (unsigned long)addr; - parm_list.size = size; - parm_list.fmt = 0x02; -- memset(parm_list.aci_grp, 0x40, NAME_LEN); - rc = -1; - - diag_stat_inc(DIAG_STAT_X2FC); -diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c -index 5c97f48cea91d..ee919bfc81867 100644 ---- a/arch/s390/hypfs/inode.c -+++ b/arch/s390/hypfs/inode.c -@@ -496,9 +496,9 @@ fail_hypfs_sprp_exit: - hypfs_vm_exit(); - fail_hypfs_diag_exit: - hypfs_diag_exit(); -+ pr_err("Initialization of hypfs failed with rc=%i\n", rc); - fail_dbfs_exit: - hypfs_dbfs_exit(); -- pr_err("Initialization of hypfs failed with rc=%i\n", rc); - return rc; - } - device_initcall(hypfs_init) -diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h -index 3afbee21dc1f1..859e6d87b108b 100644 ---- a/arch/s390/include/asm/ap.h -+++ b/arch/s390/include/asm/ap.h -@@ -236,7 +236,10 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, - union { - unsigned long value; - struct ap_qirq_ctrl qirqctrl; -- struct ap_queue_status status; -+ struct { -+ u32 _pad; -+ struct ap_queue_status status; -+ }; - } reg1; - void *reg2 = ind; - -@@ -250,7 +253,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ - : [reg1] "+&d" (reg1) - : [reg0] "d" (reg0), [reg2] "d" (reg2) -- : "cc", "0", "1", "2"); -+ : "cc", "memory", "0", "1", "2"); - - return reg1.status; - } -@@ -287,7 +290,10 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, - unsigned long reg0 = qid | (5UL << 24) | ((ifbit & 0x01) << 22); - union { - unsigned long value; -- struct ap_queue_status status; -+ struct { -+ u32 _pad; -+ struct ap_queue_status status; -+ }; - } reg1; - unsigned long reg2; - -diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h -index 5dc712fde3c7f..4120c428dc378 100644 ---- a/arch/s390/include/asm/archrandom.h -+++ b/arch/s390/include/asm/archrandom.h -@@ -2,7 +2,7 @@ - /* - * Kernel interface for the s390 arch_random_* functions - * -- * Copyright IBM Corp. 2017, 2020 -+ * Copyright IBM Corp. 2017, 2022 - * - * Author: Harald Freudenberger - * -@@ -14,18 +14,15 @@ - #ifdef CONFIG_ARCH_RANDOM - - #include -+#include - #include -+#include - - DECLARE_STATIC_KEY_FALSE(s390_arch_random_available); - extern atomic64_t s390_arch_random_counter; - --bool s390_arch_get_random_long(unsigned long *v); --bool s390_arch_random_generate(u8 *buf, unsigned int nbytes); -- - static inline bool __must_check arch_get_random_long(unsigned long *v) - { -- if (static_branch_likely(&s390_arch_random_available)) -- return s390_arch_get_random_long(v); - return false; - } - -@@ -36,16 +33,22 @@ static inline bool __must_check arch_get_random_int(unsigned int *v) - - static inline bool __must_check arch_get_random_seed_long(unsigned long *v) - { -- if (static_branch_likely(&s390_arch_random_available)) { -- return s390_arch_random_generate((u8 *)v, sizeof(*v)); -+ if (static_branch_likely(&s390_arch_random_available) && -+ in_task()) { -+ cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v)); -+ atomic64_add(sizeof(*v), &s390_arch_random_counter); -+ return true; - } - return false; - } - - static inline bool __must_check arch_get_random_seed_int(unsigned int *v) - { -- if (static_branch_likely(&s390_arch_random_available)) { -- return s390_arch_random_generate((u8 *)v, sizeof(*v)); -+ if (static_branch_likely(&s390_arch_random_available) && -+ in_task()) { -+ cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v)); -+ atomic64_add(sizeof(*v), &s390_arch_random_counter); -+ return true; - } - return false; - } -diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h -index 1effac6a01520..1c4f585dd39b6 100644 ---- a/arch/s390/include/asm/cio.h -+++ b/arch/s390/include/asm/cio.h -@@ -369,7 +369,7 @@ void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev); - struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages); - - /* Function from drivers/s390/cio/chsc.c */ --int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); -+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta); - int chsc_sstpi(void *page, void *result, size_t size); - int chsc_stzi(void *page, void *result, size_t size); - int chsc_sgib(u32 origin); -diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h -index 0d90cbeb89b43..a0914bc6c9bdd 100644 ---- a/arch/s390/include/asm/cpu_mf.h -+++ b/arch/s390/include/asm/cpu_mf.h -@@ -128,19 +128,21 @@ struct hws_combined_entry { - struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ - } __packed; - --struct hws_trailer_entry { -- union { -- struct { -- unsigned int f:1; /* 0 - Block Full Indicator */ -- unsigned int a:1; /* 1 - Alert request control */ -- unsigned int t:1; /* 2 - Timestamp format */ -- unsigned int :29; /* 3 - 31: Reserved */ -- unsigned int bsdes:16; /* 32-47: size of basic SDE */ -- unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ -- }; -- unsigned long long flags; /* 0 - 63: All indicators */ -+union hws_trailer_header { -+ struct { -+ unsigned int f:1; /* 0 - Block Full Indicator */ -+ unsigned int a:1; /* 1 - Alert request control */ -+ unsigned int t:1; /* 2 - Timestamp format */ -+ unsigned int :29; /* 3 - 31: Reserved */ -+ unsigned int bsdes:16; /* 32-47: size of basic SDE */ -+ unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ -+ unsigned long long overflow; /* 64 - Overflow Count */ - }; -- unsigned long long overflow; /* 64 - sample Overflow count */ -+ __uint128_t val; -+}; -+ -+struct hws_trailer_entry { -+ union hws_trailer_header header; /* 0 - 15 Flags + Overflow Count */ - unsigned char timestamp[16]; /* 16 - 31 timestamp */ - unsigned long long reserved1; /* 32 -Reserved */ - unsigned long long reserved2; /* */ -@@ -287,14 +289,11 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi, - return USEC_PER_SEC * qsi->cpu_speed / rate; - } - --#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL --#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL -- - /* Return TOD timestamp contained in an trailer entry */ - static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te) - { - /* TOD in STCKE format */ -- if (te->t) -+ if (te->header.t) - return *((unsigned long long *) &te->timestamp[1]); - - /* TOD in STCK format */ -diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h -index 04dc65f8901dc..80b93c06a2bbe 100644 ---- a/arch/s390/include/asm/ctl_reg.h -+++ b/arch/s390/include/asm/ctl_reg.h -@@ -72,8 +72,17 @@ static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) - __ctl_load(reg, cr, cr); - } - --void smp_ctl_set_bit(int cr, int bit); --void smp_ctl_clear_bit(int cr, int bit); -+void smp_ctl_set_clear_bit(int cr, int bit, bool set); -+ -+static inline void ctl_set_bit(int cr, int bit) -+{ -+ smp_ctl_set_clear_bit(cr, bit, true); -+} -+ -+static inline void ctl_clear_bit(int cr, int bit) -+{ -+ smp_ctl_set_clear_bit(cr, bit, false); -+} - - union ctlreg0 { - unsigned long val; -@@ -128,8 +137,5 @@ union ctlreg15 { - }; - }; - --#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) --#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) -- - #endif /* __ASSEMBLY__ */ - #endif /* __ASM_CTL_REG_H */ -diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h -index 19a55e1e3a0c5..5fc91a90657e7 100644 ---- a/arch/s390/include/asm/debug.h -+++ b/arch/s390/include/asm/debug.h -@@ -4,8 +4,8 @@ - * - * Copyright IBM Corp. 1999, 2020 - */ --#ifndef DEBUG_H --#define DEBUG_H -+#ifndef _ASM_S390_DEBUG_H -+#define _ASM_S390_DEBUG_H - - #include - #include -@@ -487,4 +487,4 @@ void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas); - - #endif /* MODULE */ - --#endif /* DEBUG_H */ -+#endif /* _ASM_S390_DEBUG_H */ -diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h -index 16dc57dd90b30..8511f0e59290f 100644 ---- a/arch/s390/include/asm/extable.h -+++ b/arch/s390/include/asm/extable.h -@@ -69,8 +69,13 @@ static inline void swap_ex_entry_fixup(struct exception_table_entry *a, - { - a->fixup = b->fixup + delta; - b->fixup = tmp.fixup - delta; -- a->handler = b->handler + delta; -- b->handler = tmp.handler - delta; -+ a->handler = b->handler; -+ if (a->handler) -+ a->handler += delta; -+ b->handler = tmp.handler; -+ if (b->handler) -+ b->handler -= delta; - } -+#define swap_ex_entry_fixup swap_ex_entry_fixup - - #endif -diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h -index c22debfcebf12..bf15767b729f9 100644 ---- a/arch/s390/include/asm/futex.h -+++ b/arch/s390/include/asm/futex.h -@@ -16,7 +16,8 @@ - "3: jl 1b\n" \ - " lhi %0,0\n" \ - "4: sacf 768\n" \ -- EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ -+ EX_TABLE(0b,4b) EX_TABLE(1b,4b) \ -+ EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ - : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ - "=m" (*uaddr) \ - : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ -diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h -index 40264f60b0da9..f4073106e1f39 100644 ---- a/arch/s390/include/asm/gmap.h -+++ b/arch/s390/include/asm/gmap.h -@@ -148,4 +148,6 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4], - unsigned long gaddr, unsigned long vmaddr); - int gmap_mark_unmergeable(void); - void s390_reset_acc(struct mm_struct *mm); -+void s390_unlist_old_asce(struct gmap *gmap); -+int s390_replace_asce(struct gmap *gmap); - #endif /* _ASM_S390_GMAP_H */ -diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h -index 60f9241e5e4a6..d3642fb634bd9 100644 ---- a/arch/s390/include/asm/hugetlb.h -+++ b/arch/s390/include/asm/hugetlb.h -@@ -28,9 +28,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) - { -- if (len & ~HPAGE_MASK) -+ struct hstate *h = hstate_file(file); -+ -+ if (len & ~huge_page_mask(h)) - return -EINVAL; -- if (addr & ~HPAGE_MASK) -+ if (addr & ~huge_page_mask(h)) - return -EINVAL; - return 0; - } -diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h -index ea398a05f6432..63098df81c9f2 100644 ---- a/arch/s390/include/asm/kexec.h -+++ b/arch/s390/include/asm/kexec.h -@@ -9,6 +9,8 @@ - #ifndef _S390_KEXEC_H - #define _S390_KEXEC_H - -+#include -+ - #include - #include - #include -@@ -74,7 +76,21 @@ void *kexec_file_add_components(struct kimage *image, - int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val, - unsigned long addr); - -+#define ARCH_HAS_KIMAGE_ARCH -+ -+struct kimage_arch { -+ void *ipl_buf; -+}; -+ - extern const struct kexec_file_ops s390_kexec_image_ops; - extern const struct kexec_file_ops s390_kexec_elf_ops; - -+#ifdef CONFIG_KEXEC_FILE -+struct purgatory_info; -+int arch_kexec_apply_relocations_add(struct purgatory_info *pi, -+ Elf_Shdr *section, -+ const Elf_Shdr *relsec, -+ const Elf_Shdr *symtab); -+#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add -+#endif - #endif /*_S390_KEXEC_H */ -diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h -index 3c89279d2a4b1..147a8d547ef9e 100644 ---- a/arch/s390/include/asm/os_info.h -+++ b/arch/s390/include/asm/os_info.h -@@ -39,7 +39,7 @@ u32 os_info_csum(struct os_info *os_info); - - #ifdef CONFIG_CRASH_DUMP - void *os_info_old_entry(int nr, unsigned long *size); --int copy_oldmem_kernel(void *dst, void *src, size_t count); -+int copy_oldmem_kernel(void *dst, unsigned long src, size_t count); - #else - static inline void *os_info_old_entry(int nr, unsigned long *size) - { -diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h -index e4dc64cc9c555..287bb88f76986 100644 ---- a/arch/s390/include/asm/pci_io.h -+++ b/arch/s390/include/asm/pci_io.h -@@ -14,12 +14,13 @@ - - /* I/O Map */ - #define ZPCI_IOMAP_SHIFT 48 --#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL -+#define ZPCI_IOMAP_ADDR_SHIFT 62 -+#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT) - #define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1) - #define ZPCI_IOMAP_MAX_ENTRIES \ -- ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT)) -+ (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT)) - #define ZPCI_IOMAP_ADDR_IDX_MASK \ -- (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE) -+ ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK) - - struct zpci_iomap_entry { - u32 fh; -diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h -index cb5fc06904354..081837b391e35 100644 ---- a/arch/s390/include/asm/percpu.h -+++ b/arch/s390/include/asm/percpu.h -@@ -31,7 +31,7 @@ - pcp_op_T__ *ptr__; \ - preempt_disable_notrace(); \ - ptr__ = raw_cpu_ptr(&(pcp)); \ -- prev__ = *ptr__; \ -+ prev__ = READ_ONCE(*ptr__); \ - do { \ - old__ = prev__; \ - new__ = old__ op (val); \ -diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h -index d9d5350cc3ec3..bf15da0fedbca 100644 ---- a/arch/s390/include/asm/preempt.h -+++ b/arch/s390/include/asm/preempt.h -@@ -46,10 +46,17 @@ static inline bool test_preempt_need_resched(void) - - static inline void __preempt_count_add(int val) - { -- if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) -- __atomic_add_const(val, &S390_lowcore.preempt_count); -- else -- __atomic_add(val, &S390_lowcore.preempt_count); -+ /* -+ * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES -+ * enabled, gcc 12 fails to handle __builtin_constant_p(). -+ */ -+ if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { -+ if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { -+ __atomic_add_const(val, &S390_lowcore.preempt_count); -+ return; -+ } -+ } -+ __atomic_add(val, &S390_lowcore.preempt_count); - } - - static inline void __preempt_count_sub(int val) -diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h -index 879b8e3f609cd..d7ca76bb2720f 100644 ---- a/arch/s390/include/asm/processor.h -+++ b/arch/s390/include/asm/processor.h -@@ -318,14 +318,21 @@ extern void (*s390_base_pgm_handler_fn)(void); - - #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL - --extern int memcpy_real(void *, void *, size_t); -+extern int memcpy_real(void *, unsigned long, size_t); - extern void memcpy_absolute(void *, void *, size_t); - --#define mem_assign_absolute(dest, val) do { \ -- __typeof__(dest) __tmp = (val); \ -- \ -- BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \ -- memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ -+#define put_abs_lowcore(member, x) do { \ -+ unsigned long __abs_address = offsetof(struct lowcore, member); \ -+ __typeof__(((struct lowcore *)0)->member) __tmp = (x); \ -+ \ -+ memcpy_absolute(__va(__abs_address), &__tmp, sizeof(__tmp)); \ -+} while (0) -+ -+#define get_abs_lowcore(x, member) do { \ -+ unsigned long __abs_address = offsetof(struct lowcore, member); \ -+ __typeof__(((struct lowcore *)0)->member) *__ptr = &(x); \ -+ \ -+ memcpy_absolute(__ptr, __va(__abs_address), sizeof(*__ptr)); \ - } while (0) - - extern int s390_isolate_bp(void); -diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h -index 50d9b04ecbd14..bc50ee0e91ff1 100644 ---- a/arch/s390/include/asm/timex.h -+++ b/arch/s390/include/asm/timex.h -@@ -201,6 +201,7 @@ static inline cycles_t get_cycles(void) - { - return (cycles_t) get_tod_clock() >> 2; - } -+#define get_cycles get_cycles - - int get_phys_clock(unsigned long *clock); - void init_cpu_timer(void); -diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h -index ce550d06abc36..3379694e9a42f 100644 ---- a/arch/s390/include/asm/uaccess.h -+++ b/arch/s390/include/asm/uaccess.h -@@ -245,7 +245,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo - return __clear_user(to, n); - } - --int copy_to_user_real(void __user *dest, void *src, unsigned long count); -+int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count); - void *s390_kernel_write(void *dst, const void *src, size_t size); - - #define HAVE_GET_KERNEL_NOFAULT -diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c -index b57da93385888..9242d7ad71e79 100644 ---- a/arch/s390/kernel/asm-offsets.c -+++ b/arch/s390/kernel/asm-offsets.c -@@ -128,6 +128,8 @@ int main(void) - OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline); - /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ - OFFSET(__LC_DUMP_REIPL, lowcore, ipib); -+ OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info); -+ OFFSET(__LC_OS_INFO, lowcore, os_info); - /* hardware defined lowcore locations 0x1000 - 0x18ff */ - OFFSET(__LC_MCESAD, lowcore, mcesad); - OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2); -diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c -index d72a6df058d79..8722bd07c6079 100644 ---- a/arch/s390/kernel/crash_dump.c -+++ b/arch/s390/kernel/crash_dump.c -@@ -44,7 +44,7 @@ struct save_area { - u64 fprs[16]; - u32 fpc; - u32 prefix; -- u64 todpreg; -+ u32 todpreg; - u64 timer; - u64 todcmp; - u64 vxrs_low[16]; -@@ -132,28 +132,27 @@ static inline void *load_real_addr(void *addr) - /* - * Copy memory of the old, dumped system to a kernel space virtual address - */ --int copy_oldmem_kernel(void *dst, void *src, size_t count) -+int copy_oldmem_kernel(void *dst, unsigned long src, size_t count) - { -- unsigned long from, len; -+ unsigned long len; - void *ra; - int rc; - - while (count) { -- from = __pa(src); -- if (!oldmem_data.start && from < sclp.hsa_size) { -+ if (!oldmem_data.start && src < sclp.hsa_size) { - /* Copy from zfcp/nvme dump HSA area */ -- len = min(count, sclp.hsa_size - from); -- rc = memcpy_hsa_kernel(dst, from, len); -+ len = min(count, sclp.hsa_size - src); -+ rc = memcpy_hsa_kernel(dst, src, len); - if (rc) - return rc; - } else { - /* Check for swapped kdump oldmem areas */ -- if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) { -- from -= oldmem_data.start; -- len = min(count, oldmem_data.size - from); -- } else if (oldmem_data.start && from < oldmem_data.size) { -- len = min(count, oldmem_data.size - from); -- from += oldmem_data.start; -+ if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) { -+ src -= oldmem_data.start; -+ len = min(count, oldmem_data.size - src); -+ } else if (oldmem_data.start && src < oldmem_data.size) { -+ len = min(count, oldmem_data.size - src); -+ src += oldmem_data.start; - } else { - len = count; - } -@@ -163,7 +162,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count) - } else { - ra = dst; - } -- if (memcpy_real(ra, (void *) from, len)) -+ if (memcpy_real(ra, src, len)) - return -EFAULT; - } - dst += len; -@@ -176,31 +175,30 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count) - /* - * Copy memory of the old, dumped system to a user space virtual address - */ --static int copy_oldmem_user(void __user *dst, void *src, size_t count) -+static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count) - { -- unsigned long from, len; -+ unsigned long len; - int rc; - - while (count) { -- from = __pa(src); -- if (!oldmem_data.start && from < sclp.hsa_size) { -+ if (!oldmem_data.start && src < sclp.hsa_size) { - /* Copy from zfcp/nvme dump HSA area */ -- len = min(count, sclp.hsa_size - from); -- rc = memcpy_hsa_user(dst, from, len); -+ len = min(count, sclp.hsa_size - src); -+ rc = memcpy_hsa_user(dst, src, len); - if (rc) - return rc; - } else { - /* Check for swapped kdump oldmem areas */ -- if (oldmem_data.start && from - oldmem_data.size < oldmem_data.size) { -- from -= oldmem_data.size; -- len = min(count, oldmem_data.size - from); -- } else if (oldmem_data.start && from < oldmem_data.size) { -- len = min(count, oldmem_data.size - from); -- from += oldmem_data.start; -+ if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) { -+ src -= oldmem_data.start; -+ len = min(count, oldmem_data.size - src); -+ } else if (oldmem_data.start && src < oldmem_data.size) { -+ len = min(count, oldmem_data.size - src); -+ src += oldmem_data.start; - } else { - len = count; - } -- rc = copy_to_user_real(dst, (void *) from, count); -+ rc = copy_to_user_real(dst, src, len); - if (rc) - return rc; - } -@@ -217,12 +215,12 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count) - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, - unsigned long offset, int userbuf) - { -- void *src; -+ unsigned long src; - int rc; - - if (!csize) - return 0; -- src = (void *) (pfn << PAGE_SHIFT) + offset; -+ src = pfn_to_phys(pfn) + offset; - if (userbuf) - rc = copy_oldmem_user((void __force __user *) buf, src, csize); - else -@@ -429,10 +427,10 @@ static void *nt_prpsinfo(void *ptr) - static void *get_vmcoreinfo_old(unsigned long *size) - { - char nt_name[11], *vmcoreinfo; -+ unsigned long addr; - Elf64_Nhdr note; -- void *addr; - -- if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) -+ if (copy_oldmem_kernel(&addr, __LC_VMCORE_INFO, sizeof(addr))) - return NULL; - memset(nt_name, 0, sizeof(nt_name)); - if (copy_oldmem_kernel(¬e, addr, sizeof(note))) -diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c -index db1bc00229caf..272ef8597e208 100644 ---- a/arch/s390/kernel/dumpstack.c -+++ b/arch/s390/kernel/dumpstack.c -@@ -224,5 +224,5 @@ void die(struct pt_regs *regs, const char *str) - if (panic_on_oops) - panic("Fatal exception: panic_on_oops"); - oops_exit(); -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } -diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S -index 4c9b967290ae0..d530eb4dc413f 100644 ---- a/arch/s390/kernel/entry.S -+++ b/arch/s390/kernel/entry.S -@@ -248,6 +248,10 @@ ENTRY(sie64a) - BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) - .Lsie_entry: - sie 0(%r14) -+# Let the next instruction be NOP to avoid triggering a machine check -+# and handling it in a guest as result of the instruction execution. -+ nopr 7 -+.Lsie_leave: - BPOFF - BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) - .Lsie_skip: -@@ -536,7 +540,7 @@ ENTRY(mcck_int_handler) - jno .Lmcck_panic - #if IS_ENABLED(CONFIG_KVM) - OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f -- OUTSIDE %r9,.Lsie_entry,.Lsie_skip,4f -+ OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f - oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST - j 5f - 4: CHKSTG .Lmcck_panic -diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h -index 7f2696e8d511e..6083090be1f46 100644 ---- a/arch/s390/kernel/entry.h -+++ b/arch/s390/kernel/entry.h -@@ -70,5 +70,6 @@ extern struct exception_table_entry _stop_amode31_ex_table[]; - #define __amode31_data __section(".amode31.data") - #define __amode31_ref __section(".amode31.refs") - extern long _start_amode31_refs[], _end_amode31_refs[]; -+extern unsigned long __amode31_base; - - #endif /* _ENTRY_H */ -diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c -index 1d94ffdf347bb..5d0c45c13b5fa 100644 ---- a/arch/s390/kernel/ftrace.c -+++ b/arch/s390/kernel/ftrace.c -@@ -80,17 +80,6 @@ asm( - - #ifdef CONFIG_MODULES - static char *ftrace_plt; -- --asm( -- " .data\n" -- "ftrace_plt_template:\n" -- " basr %r1,%r0\n" -- " lg %r1,0f-.(%r1)\n" -- " br %r1\n" -- "0: .quad ftrace_caller\n" -- "ftrace_plt_template_end:\n" -- " .previous\n" --); - #endif /* CONFIG_MODULES */ - - static const char *ftrace_shared_hotpatch_trampoline(const char **end) -@@ -116,7 +105,7 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end) - - bool ftrace_need_init_nop(void) - { -- return ftrace_shared_hotpatch_trampoline(NULL); -+ return true; - } - - int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) -@@ -175,28 +164,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, - return 0; - } - --static void ftrace_generate_nop_insn(struct ftrace_insn *insn) --{ -- /* brcl 0,0 */ -- insn->opc = 0xc004; -- insn->disp = 0; --} -- --static void ftrace_generate_call_insn(struct ftrace_insn *insn, -- unsigned long ip) --{ -- unsigned long target; -- -- /* brasl r0,ftrace_caller */ -- target = FTRACE_ADDR; --#ifdef CONFIG_MODULES -- if (is_module_addr((void *)ip)) -- target = (unsigned long)ftrace_plt; --#endif /* CONFIG_MODULES */ -- insn->opc = 0xc005; -- insn->disp = (target - ip) / 2; --} -- - static void brcl_disable(void *brcl) - { - u8 op = 0x04; /* set mask field to zero */ -@@ -207,23 +174,7 @@ static void brcl_disable(void *brcl) - int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, - unsigned long addr) - { -- struct ftrace_insn orig, new, old; -- -- if (ftrace_shared_hotpatch_trampoline(NULL)) { -- brcl_disable((void *)rec->ip); -- return 0; -- } -- -- if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old))) -- return -EFAULT; -- /* Replace ftrace call with a nop. */ -- ftrace_generate_call_insn(&orig, rec->ip); -- ftrace_generate_nop_insn(&new); -- -- /* Verify that the to be replaced code matches what we expect. */ -- if (memcmp(&orig, &old, sizeof(old))) -- return -EINVAL; -- s390_kernel_write((void *) rec->ip, &new, sizeof(new)); -+ brcl_disable((void *)rec->ip); - return 0; - } - -@@ -236,23 +187,7 @@ static void brcl_enable(void *brcl) - - int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) - { -- struct ftrace_insn orig, new, old; -- -- if (ftrace_shared_hotpatch_trampoline(NULL)) { -- brcl_enable((void *)rec->ip); -- return 0; -- } -- -- if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old))) -- return -EFAULT; -- /* Replace nop with an ftrace call. */ -- ftrace_generate_nop_insn(&orig); -- ftrace_generate_call_insn(&new, rec->ip); -- -- /* Verify that the to be replaced code matches what we expect. */ -- if (memcmp(&orig, &old, sizeof(old))) -- return -EINVAL; -- s390_kernel_write((void *) rec->ip, &new, sizeof(new)); -+ brcl_enable((void *)rec->ip); - return 0; - } - -@@ -269,10 +204,7 @@ int __init ftrace_dyn_arch_init(void) - - void arch_ftrace_update_code(int command) - { -- if (ftrace_shared_hotpatch_trampoline(NULL)) -- ftrace_modify_all_code(command); -- else -- ftrace_run_stop_machine(command); -+ ftrace_modify_all_code(command); - } - - static void __ftrace_sync(void *dummy) -@@ -281,10 +213,8 @@ static void __ftrace_sync(void *dummy) - - int ftrace_arch_code_modify_post_process(void) - { -- if (ftrace_shared_hotpatch_trampoline(NULL)) { -- /* Send SIGP to the other CPUs, so they see the new code. */ -- smp_call_function(__ftrace_sync, NULL, 1); -- } -+ /* Send SIGP to the other CPUs, so they see the new code. */ -+ smp_call_function(__ftrace_sync, NULL, 1); - return 0; - } - -@@ -299,10 +229,6 @@ static int __init ftrace_plt_init(void) - panic("cannot allocate ftrace plt\n"); - - start = ftrace_shared_hotpatch_trampoline(&end); -- if (!start) { -- start = ftrace_plt_template; -- end = ftrace_plt_template_end; -- } - memcpy(ftrace_plt, start, end - start); - set_memory_ro((unsigned long)ftrace_plt, 1); - return 0; -diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c -index 4bf1ee293f2b3..a0da049e73609 100644 ---- a/arch/s390/kernel/idle.c -+++ b/arch/s390/kernel/idle.c -@@ -44,7 +44,7 @@ void account_idle_time_irq(void) - S390_lowcore.last_update_timer = idle->timer_idle_exit; - } - --void arch_cpu_idle(void) -+void noinstr arch_cpu_idle(void) - { - struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); - unsigned long idle_time; -diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c -index e2cc35775b996..834b1ec5dd7a0 100644 ---- a/arch/s390/kernel/ipl.c -+++ b/arch/s390/kernel/ipl.c -@@ -502,6 +502,8 @@ static struct attribute_group ipl_ccw_attr_group_lpar = { - - static struct attribute *ipl_unknown_attrs[] = { - &sys_ipl_type_attr.attr, -+ &sys_ipl_secure_attr.attr, -+ &sys_ipl_has_secure_attr.attr, - NULL, - }; - -@@ -1646,8 +1648,8 @@ static void dump_reipl_run(struct shutdown_trigger *trigger) - - csum = (__force unsigned int) - csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); -- mem_assign_absolute(S390_lowcore.ipib, ipib); -- mem_assign_absolute(S390_lowcore.ipib_checksum, csum); -+ put_abs_lowcore(ipib, ipib); -+ put_abs_lowcore(ipib_checksum, csum); - dump_run(trigger); - } - -@@ -2156,7 +2158,7 @@ void *ipl_report_finish(struct ipl_report *report) - - buf = vzalloc(report->size); - if (!buf) -- return ERR_PTR(-ENOMEM); -+ goto out; - ptr = buf; - - memcpy(ptr, report->ipib, report->ipib->hdr.len); -@@ -2195,6 +2197,7 @@ void *ipl_report_finish(struct ipl_report *report) - } - - BUG_ON(ptr > buf + report->size); -+out: - return buf; - } - -diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c -index 3a3145c4a3ba4..be5d432b902e0 100644 ---- a/arch/s390/kernel/irq.c -+++ b/arch/s390/kernel/irq.c -@@ -138,7 +138,7 @@ void noinstr do_io_irq(struct pt_regs *regs) - struct pt_regs *old_regs = set_irq_regs(regs); - int from_idle; - -- irq_enter(); -+ irq_enter_rcu(); - - if (user_mode(regs)) - update_timer_sys(); -@@ -155,7 +155,8 @@ void noinstr do_io_irq(struct pt_regs *regs) - do_irq_async(regs, IO_INTERRUPT); - } while (MACHINE_IS_LPAR && irq_pending(regs)); - -- irq_exit(); -+ irq_exit_rcu(); -+ - set_irq_regs(old_regs); - irqentry_exit(regs, state); - -@@ -169,7 +170,7 @@ void noinstr do_ext_irq(struct pt_regs *regs) - struct pt_regs *old_regs = set_irq_regs(regs); - int from_idle; - -- irq_enter(); -+ irq_enter_rcu(); - - if (user_mode(regs)) - update_timer_sys(); -@@ -184,7 +185,7 @@ void noinstr do_ext_irq(struct pt_regs *regs) - - do_irq_async(regs, EXT_INTERRUPT); - -- irq_exit(); -+ irq_exit_rcu(); - set_irq_regs(old_regs); - irqentry_exit(regs, state); - -diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c -index 52d056a5f89fc..fbc0bf417ec66 100644 ---- a/arch/s390/kernel/kprobes.c -+++ b/arch/s390/kernel/kprobes.c -@@ -7,6 +7,8 @@ - * s390 port, used ppc64 as template. Mike Grundy - */ - -+#define pr_fmt(fmt) "kprobes: " fmt -+ - #include - #include - #include -@@ -231,6 +233,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb) - { - __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); - kcb->kprobe_status = kcb->prev_kprobe.status; -+ kcb->prev_kprobe.kp = NULL; - } - NOKPROBE_SYMBOL(pop_kprobe); - -@@ -259,7 +262,7 @@ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) - * is a BUG. The code path resides in the .kprobes.text - * section and is executed with interrupts disabled. - */ -- pr_err("Invalid kprobe detected.\n"); -+ pr_err("Failed to recover from reentered kprobes.\n"); - dump_kprobe(p); - BUG(); - } -@@ -392,12 +395,11 @@ static int post_kprobe_handler(struct pt_regs *regs) - if (!p) - return 0; - -+ resume_execution(p, regs); - if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { - kcb->kprobe_status = KPROBE_HIT_SSDONE; - p->post_handler(p, regs, 0); - } -- -- resume_execution(p, regs); - pop_kprobe(kcb); - preempt_enable_no_resched(); - -diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c -index 0505e55a62979..4b95684fbe46e 100644 ---- a/arch/s390/kernel/machine_kexec.c -+++ b/arch/s390/kernel/machine_kexec.c -@@ -227,7 +227,7 @@ void arch_crash_save_vmcoreinfo(void) - vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31); - vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31); - vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); -- mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); -+ put_abs_lowcore(vmcore_info, paddr_vmcoreinfo_note()); - } - - void machine_shutdown(void) -diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c -index f9e4baa64b675..c7fd818512890 100644 ---- a/arch/s390/kernel/machine_kexec_file.c -+++ b/arch/s390/kernel/machine_kexec_file.c -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -28,6 +29,7 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len) - const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1; - struct module_signature *ms; - unsigned long sig_len; -+ int ret; - - /* Skip signature verification when not secure IPLed. */ - if (!ipl_secure_flag) -@@ -62,11 +64,18 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len) - return -EBADMSG; - } - -- return verify_pkcs7_signature(kernel, kernel_len, -- kernel + kernel_len, sig_len, -- VERIFY_USE_PLATFORM_KEYRING, -- VERIFYING_MODULE_SIGNATURE, -- NULL, NULL); -+ ret = verify_pkcs7_signature(kernel, kernel_len, -+ kernel + kernel_len, sig_len, -+ VERIFY_USE_SECONDARY_KEYRING, -+ VERIFYING_MODULE_SIGNATURE, -+ NULL, NULL); -+ if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) -+ ret = verify_pkcs7_signature(kernel, kernel_len, -+ kernel + kernel_len, sig_len, -+ VERIFY_USE_PLATFORM_KEYRING, -+ VERIFYING_MODULE_SIGNATURE, -+ NULL, NULL); -+ return ret; - } - #endif /* CONFIG_KEXEC_SIG */ - -@@ -170,13 +179,12 @@ static int kexec_file_add_ipl_report(struct kimage *image, - struct kexec_buf buf; - unsigned long addr; - void *ptr, *end; -+ int ret; - - buf.image = image; - - data->memsz = ALIGN(data->memsz, PAGE_SIZE); - buf.mem = data->memsz; -- if (image->type == KEXEC_TYPE_CRASH) -- buf.mem += crashk_res.start; - - ptr = (void *)ipl_cert_list_addr; - end = ptr + ipl_cert_list_size; -@@ -199,9 +207,13 @@ static int kexec_file_add_ipl_report(struct kimage *image, - ptr += len; - } - -+ ret = -ENOMEM; - buf.buffer = ipl_report_finish(data->report); -+ if (!buf.buffer) -+ goto out; - buf.bufsz = data->report->size; - buf.memsz = buf.bufsz; -+ image->arch.ipl_buf = buf.buffer; - - data->memsz += buf.memsz; - -@@ -209,7 +221,12 @@ static int kexec_file_add_ipl_report(struct kimage *image, - data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr); - *lc_ipl_parmblock_ptr = (__u32)buf.mem; - -- return kexec_add_buffer(&buf); -+ if (image->type == KEXEC_TYPE_CRASH) -+ buf.mem += crashk_res.start; -+ -+ ret = kexec_add_buffer(&buf); -+out: -+ return ret; - } - - void *kexec_file_add_components(struct kimage *image, -@@ -269,6 +286,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, - { - Elf_Rela *relas; - int i, r_type; -+ int ret; - - relas = (void *)pi->ehdr + relsec->sh_offset; - -@@ -303,7 +321,15 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, - addr = section->sh_addr + relas[i].r_offset; - - r_type = ELF64_R_TYPE(relas[i].r_info); -- arch_kexec_do_relocs(r_type, loc, val, addr); -+ -+ if (r_type == R_390_PLT32DBL) -+ r_type = R_390_PC32DBL; -+ -+ ret = arch_kexec_do_relocs(r_type, loc, val, addr); -+ if (ret) { -+ pr_err("Unknown rela relocation: %d\n", r_type); -+ return -ENOEXEC; -+ } - } - return 0; - } -@@ -321,3 +347,11 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - - return kexec_image_probe_default(image, buf, buf_len); - } -+ -+int arch_kimage_file_post_load_cleanup(struct kimage *image) -+{ -+ vfree(image->arch.ipl_buf); -+ image->arch.ipl_buf = NULL; -+ -+ return kexec_image_post_load_cleanup_default(image); -+} -diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c -index b01ba460b7cad..b032e556eeb71 100644 ---- a/arch/s390/kernel/module.c -+++ b/arch/s390/kernel/module.c -@@ -33,18 +33,19 @@ - #define DEBUGP(fmt , ...) - #endif - --#define PLT_ENTRY_SIZE 20 -+#define PLT_ENTRY_SIZE 22 - - void *module_alloc(unsigned long size) - { -+ gfp_t gfp_mask = GFP_KERNEL; - void *p; - - if (PAGE_ALIGN(size) > MODULES_LEN) - return NULL; - p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END, -- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, -+ gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE, - __builtin_return_address(0)); -- if (p && (kasan_module_alloc(p, size) < 0)) { -+ if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) { - vfree(p); - return NULL; - } -@@ -340,27 +341,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ - case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ - if (info->plt_initialized == 0) { -- unsigned int insn[5]; -- unsigned int *ip = me->core_layout.base + -- me->arch.plt_offset + -- info->plt_offset; -- -- insn[0] = 0x0d10e310; /* basr 1,0 */ -- insn[1] = 0x100a0004; /* lg 1,10(1) */ -+ unsigned char insn[PLT_ENTRY_SIZE]; -+ char *plt_base; -+ char *ip; -+ -+ plt_base = me->core_layout.base + me->arch.plt_offset; -+ ip = plt_base + info->plt_offset; -+ *(int *)insn = 0x0d10e310; /* basr 1,0 */ -+ *(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */ - if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { -- unsigned int *ij; -- ij = me->core_layout.base + -- me->arch.plt_offset + -- me->arch.plt_size - PLT_ENTRY_SIZE; -- insn[2] = 0xa7f40000 + /* j __jump_r1 */ -- (unsigned int)(u16) -- (((unsigned long) ij - 8 - -- (unsigned long) ip) / 2); -+ char *jump_r1; -+ -+ jump_r1 = plt_base + me->arch.plt_size - -+ PLT_ENTRY_SIZE; -+ /* brcl 0xf,__jump_r1 */ -+ *(short *)&insn[8] = 0xc0f4; -+ *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2; - } else { -- insn[2] = 0x07f10000; /* br %r1 */ -+ *(int *)&insn[8] = 0x07f10000; /* br %r1 */ - } -- insn[3] = (unsigned int) (val >> 32); -- insn[4] = (unsigned int) val; -+ *(long *)&insn[14] = val; - - write(ip, insn, sizeof(insn)); - info->plt_initialized = 1; -diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c -index 20f8e1868853f..d4f071e73a0a6 100644 ---- a/arch/s390/kernel/nmi.c -+++ b/arch/s390/kernel/nmi.c -@@ -62,7 +62,7 @@ static inline unsigned long nmi_get_mcesa_size(void) - * The structure is required for machine check happening early in - * the boot process. - */ --static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE); -+static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE); - - void __init nmi_alloc_boot_cpu(struct lowcore *lc) - { -@@ -175,7 +175,7 @@ void __s390_handle_mcck(void) - "malfunction (code 0x%016lx).\n", mcck.mcck_code); - printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", - current->comm, current->pid); -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - } - -@@ -273,7 +273,14 @@ static int notrace s390_validate_registers(union mci mci, int umode) - /* Validate vector registers */ - union ctlreg0 cr0; - -- if (!mci.vr) { -+ /* -+ * The vector validity must only be checked if not running a -+ * KVM guest. For KVM guests the machine check is forwarded by -+ * KVM and it is the responsibility of the guest to take -+ * appropriate actions. The host vector or FPU values have been -+ * saved by KVM and will be restored by KVM. -+ */ -+ if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) { - /* - * Vector registers can't be restored. If the kernel - * currently uses vector registers the system is -@@ -316,11 +323,21 @@ static int notrace s390_validate_registers(union mci mci, int umode) - if (cr2.gse) { - if (!mci.gs) { - /* -- * Guarded storage register can't be restored and -- * the current processes uses guarded storage. -- * It has to be terminated. -+ * 2 cases: -+ * - machine check in kernel or userspace -+ * - machine check while running SIE (KVM guest) -+ * For kernel or userspace the userspace values of -+ * guarded storage control can not be recreated, the -+ * process must be terminated. -+ * For SIE the guest values of guarded storage can not -+ * be recreated. This is either due to a bug or due to -+ * GS being disabled in the guest. The guest will be -+ * notified by KVM code and the guests machine check -+ * handling must take care of this. The host values -+ * are saved by KVM and are not affected. - */ -- kill_task = 1; -+ if (!test_cpu_flag(CIF_MCCK_GUEST)) -+ kill_task = 1; - } else { - load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area); - } -diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c -index 4bef35b79b938..1acc2e05d70f0 100644 ---- a/arch/s390/kernel/os_info.c -+++ b/arch/s390/kernel/os_info.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - - /* - * OS info structure has to be page aligned -@@ -45,7 +46,7 @@ void os_info_crashkernel_add(unsigned long base, unsigned long size) - */ - void os_info_entry_add(int nr, void *ptr, u64 size) - { -- os_info.entry[nr].addr = (u64)(unsigned long)ptr; -+ os_info.entry[nr].addr = __pa(ptr); - os_info.entry[nr].size = size; - os_info.entry[nr].csum = (__force u32)csum_partial(ptr, size, 0); - os_info.csum = os_info_csum(&os_info); -@@ -62,7 +63,7 @@ void __init os_info_init(void) - os_info.version_minor = OS_INFO_VERSION_MINOR; - os_info.magic = OS_INFO_MAGIC; - os_info.csum = os_info_csum(&os_info); -- mem_assign_absolute(S390_lowcore.os_info, (unsigned long) ptr); -+ put_abs_lowcore(os_info, __pa(ptr)); - } - - #ifdef CONFIG_CRASH_DUMP -@@ -90,7 +91,7 @@ static void os_info_old_alloc(int nr, int align) - goto fail; - } - buf_align = PTR_ALIGN(buf, align); -- if (copy_oldmem_kernel(buf_align, (void *) addr, size)) { -+ if (copy_oldmem_kernel(buf_align, addr, size)) { - msg = "copy failed"; - goto fail_free; - } -@@ -123,15 +124,14 @@ static void os_info_old_init(void) - return; - if (!oldmem_data.start) - goto fail; -- if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr))) -+ if (copy_oldmem_kernel(&addr, __LC_OS_INFO, sizeof(addr))) - goto fail; - if (addr == 0 || addr % PAGE_SIZE) - goto fail; - os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL); - if (!os_info_old) - goto fail; -- if (copy_oldmem_kernel(os_info_old, (void *) addr, -- sizeof(*os_info_old))) -+ if (copy_oldmem_kernel(os_info_old, addr, sizeof(*os_info_old))) - goto fail_free; - if (os_info_old->magic != OS_INFO_MAGIC) - goto fail_free; -diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c -index 4a99154fe6514..d2a2a18b55808 100644 ---- a/arch/s390/kernel/perf_cpum_cf.c -+++ b/arch/s390/kernel/perf_cpum_cf.c -@@ -516,6 +516,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type) - return err; - } - -+/* Events CPU_CYLCES and INSTRUCTIONS can be submitted with two different -+ * attribute::type values: -+ * - PERF_TYPE_HARDWARE: -+ * - pmu->type: -+ * Handle both type of invocations identical. They address the same hardware. -+ * The result is different when event modifiers exclude_kernel and/or -+ * exclude_user are also set. -+ */ -+static int cpumf_pmu_event_type(struct perf_event *event) -+{ -+ u64 ev = event->attr.config; -+ -+ if (cpumf_generic_events_basic[PERF_COUNT_HW_CPU_CYCLES] == ev || -+ cpumf_generic_events_basic[PERF_COUNT_HW_INSTRUCTIONS] == ev || -+ cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev || -+ cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev) -+ return PERF_TYPE_HARDWARE; -+ return PERF_TYPE_RAW; -+} -+ - static int cpumf_pmu_event_init(struct perf_event *event) - { - unsigned int type = event->attr.type; -@@ -525,7 +545,7 @@ static int cpumf_pmu_event_init(struct perf_event *event) - err = __hw_perf_event_init(event, type); - else if (event->pmu->type == type) - /* Registered as unknown PMU */ -- err = __hw_perf_event_init(event, PERF_TYPE_RAW); -+ err = __hw_perf_event_init(event, cpumf_pmu_event_type(event)); - else - return -ENOENT; - -@@ -687,8 +707,10 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags) - false); - if (cfdiag_diffctr(cpuhw, event->hw.config_base)) - cfdiag_push_sample(event, cpuhw); -- } else -+ } else if (cpuhw->flags & PMU_F_RESERVED) { -+ /* Only update when PMU not hotplugged off */ - hw_perf_event_update(event); -+ } - hwc->state |= PERF_HES_UPTODATE; - } - } -diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c -index db62def4ef28e..4e6fadaeaa1a6 100644 ---- a/arch/s390/kernel/perf_cpum_sf.c -+++ b/arch/s390/kernel/perf_cpum_sf.c -@@ -163,14 +163,15 @@ static void free_sampling_buffer(struct sf_buffer *sfb) - - static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) - { -- unsigned long sdb, *trailer; -+ struct hws_trailer_entry *te; -+ unsigned long sdb; - - /* Allocate and initialize sample-data-block */ - sdb = get_zeroed_page(gfp_flags); - if (!sdb) - return -ENOMEM; -- trailer = trailer_entry_ptr(sdb); -- *trailer = SDB_TE_ALERT_REQ_MASK; -+ te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb); -+ te->header.a = 1; - - /* Link SDB into the sample-data-block-table */ - *sdbt = sdb; -@@ -1206,7 +1207,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, - "%s: Found unknown" - " sampling data entry: te->f %i" - " basic.def %#4x (%p)\n", __func__, -- te->f, sample->def, sample); -+ te->header.f, sample->def, sample); - /* Sample slot is not yet written or other record. - * - * This condition can occur if the buffer was reused -@@ -1217,7 +1218,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, - * that are not full. Stop processing if the first - * invalid format was detected. - */ -- if (!te->f) -+ if (!te->header.f) - break; - } - -@@ -1227,6 +1228,16 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, - } - } - -+static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t new) -+{ -+ asm volatile( -+ " cdsg %[old],%[new],%[ptr]\n" -+ : [old] "+d" (old), [ptr] "+QS" (*ptr) -+ : [new] "d" (new) -+ : "memory", "cc"); -+ return old; -+} -+ - /* hw_perf_event_update() - Process sampling buffer - * @event: The perf event - * @flush_all: Flag to also flush partially filled sample-data-blocks -@@ -1243,10 +1254,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, - */ - static void hw_perf_event_update(struct perf_event *event, int flush_all) - { -+ unsigned long long event_overflow, sampl_overflow, num_sdb; -+ union hws_trailer_header old, prev, new; - struct hw_perf_event *hwc = &event->hw; - struct hws_trailer_entry *te; - unsigned long *sdbt; -- unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; - int done; - - /* -@@ -1266,25 +1278,25 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) - te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); - - /* Leave loop if no more work to do (block full indicator) */ -- if (!te->f) { -+ if (!te->header.f) { - done = 1; - if (!flush_all) - break; - } - - /* Check the sample overflow count */ -- if (te->overflow) -+ if (te->header.overflow) - /* Account sample overflows and, if a particular limit - * is reached, extend the sampling buffer. - * For details, see sfb_account_overflows(). - */ -- sampl_overflow += te->overflow; -+ sampl_overflow += te->header.overflow; - - /* Timestamps are valid for full sample-data-blocks only */ - debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx " - "overflow %llu timestamp %#llx\n", -- __func__, (unsigned long)sdbt, te->overflow, -- (te->f) ? trailer_timestamp(te) : 0ULL); -+ __func__, (unsigned long)sdbt, te->header.overflow, -+ (te->header.f) ? trailer_timestamp(te) : 0ULL); - - /* Collect all samples from a single sample-data-block and - * flag if an (perf) event overflow happened. If so, the PMU -@@ -1294,12 +1306,16 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) - num_sdb++; - - /* Reset trailer (using compare-double-and-swap) */ -+ /* READ_ONCE() 16 byte header */ -+ prev.val = __cdsg(&te->header.val, 0, 0); - do { -- te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; -- te_flags |= SDB_TE_ALERT_REQ_MASK; -- } while (!cmpxchg_double(&te->flags, &te->overflow, -- te->flags, te->overflow, -- te_flags, 0ULL)); -+ old.val = prev.val; -+ new.val = prev.val; -+ new.f = 0; -+ new.a = 1; -+ new.overflow = 0; -+ prev.val = __cdsg(&te->header.val, old.val, new.val); -+ } while (prev.val != old.val); - - /* Advance to next sample-data-block */ - sdbt++; -@@ -1384,7 +1400,7 @@ static void aux_output_end(struct perf_output_handle *handle) - range_scan = AUX_SDB_NUM_ALERT(aux); - for (i = 0, idx = aux->head; i < range_scan; i++, idx++) { - te = aux_sdb_trailer(aux, idx); -- if (!(te->flags & SDB_TE_BUFFER_FULL_MASK)) -+ if (!te->header.f) - break; - } - /* i is num of SDBs which are full */ -@@ -1392,7 +1408,7 @@ static void aux_output_end(struct perf_output_handle *handle) - - /* Remove alert indicators in the buffer */ - te = aux_sdb_trailer(aux, aux->alert_mark); -- te->flags &= ~SDB_TE_ALERT_REQ_MASK; -+ te->header.a = 0; - - debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n", - __func__, i, range_scan, aux->head); -@@ -1437,9 +1453,9 @@ static int aux_output_begin(struct perf_output_handle *handle, - idx = aux->empty_mark + 1; - for (i = 0; i < range_scan; i++, idx++) { - te = aux_sdb_trailer(aux, idx); -- te->flags &= ~(SDB_TE_BUFFER_FULL_MASK | -- SDB_TE_ALERT_REQ_MASK); -- te->overflow = 0; -+ te->header.f = 0; -+ te->header.a = 0; -+ te->header.overflow = 0; - } - /* Save the position of empty SDBs */ - aux->empty_mark = aux->head + range - 1; -@@ -1448,7 +1464,7 @@ static int aux_output_begin(struct perf_output_handle *handle, - /* Set alert indicator */ - aux->alert_mark = aux->head + range/2 - 1; - te = aux_sdb_trailer(aux, aux->alert_mark); -- te->flags = te->flags | SDB_TE_ALERT_REQ_MASK; -+ te->header.a = 1; - - /* Reset hardware buffer head */ - head = AUX_SDB_INDEX(aux, aux->head); -@@ -1475,14 +1491,17 @@ static int aux_output_begin(struct perf_output_handle *handle, - static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, - unsigned long long *overflow) - { -- unsigned long long orig_overflow, orig_flags, new_flags; -+ union hws_trailer_header old, prev, new; - struct hws_trailer_entry *te; - - te = aux_sdb_trailer(aux, alert_index); -+ /* READ_ONCE() 16 byte header */ -+ prev.val = __cdsg(&te->header.val, 0, 0); - do { -- orig_flags = te->flags; -- *overflow = orig_overflow = te->overflow; -- if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { -+ old.val = prev.val; -+ new.val = prev.val; -+ *overflow = old.overflow; -+ if (old.f) { - /* - * SDB is already set by hardware. - * Abort and try to set somewhere -@@ -1490,10 +1509,10 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, - */ - return false; - } -- new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK; -- } while (!cmpxchg_double(&te->flags, &te->overflow, -- orig_flags, orig_overflow, -- new_flags, 0ULL)); -+ new.a = 1; -+ new.overflow = 0; -+ prev.val = __cdsg(&te->header.val, old.val, new.val); -+ } while (prev.val != old.val); - return true; - } - -@@ -1522,8 +1541,9 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, - static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, - unsigned long long *overflow) - { -- unsigned long long orig_overflow, orig_flags, new_flags; - unsigned long i, range_scan, idx, idx_old; -+ union hws_trailer_header old, prev, new; -+ unsigned long long orig_overflow; - struct hws_trailer_entry *te; - - debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld " -@@ -1554,17 +1574,20 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, - idx_old = idx = aux->empty_mark + 1; - for (i = 0; i < range_scan; i++, idx++) { - te = aux_sdb_trailer(aux, idx); -+ /* READ_ONCE() 16 byte header */ -+ prev.val = __cdsg(&te->header.val, 0, 0); - do { -- orig_flags = te->flags; -- orig_overflow = te->overflow; -- new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK; -+ old.val = prev.val; -+ new.val = prev.val; -+ orig_overflow = old.overflow; -+ new.f = 0; -+ new.overflow = 0; - if (idx == aux->alert_mark) -- new_flags |= SDB_TE_ALERT_REQ_MASK; -+ new.a = 1; - else -- new_flags &= ~SDB_TE_ALERT_REQ_MASK; -- } while (!cmpxchg_double(&te->flags, &te->overflow, -- orig_flags, orig_overflow, -- new_flags, 0ULL)); -+ new.a = 0; -+ prev.val = __cdsg(&te->header.val, old.val, new.val); -+ } while (prev.val != old.val); - *overflow += orig_overflow; - } - -diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c -index ea7729bebaa07..a7f8db73984b0 100644 ---- a/arch/s390/kernel/perf_event.c -+++ b/arch/s390/kernel/perf_event.c -@@ -30,7 +30,7 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) - if (!stack) - return NULL; - -- return (struct kvm_s390_sie_block *) stack->empty1[0]; -+ return (struct kvm_s390_sie_block *)stack->empty1[1]; - } - - static bool is_in_guest(struct pt_regs *regs) -diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c -index 350e94d0cac23..d015cb1027fa1 100644 ---- a/arch/s390/kernel/process.c -+++ b/arch/s390/kernel/process.c -@@ -91,6 +91,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) - - memcpy(dst, src, arch_task_struct_size); - dst->thread.fpu.regs = dst->thread.fpu.fprs; -+ -+ /* -+ * Don't transfer over the runtime instrumentation or the guarded -+ * storage control block pointers. These fields are cleared here instead -+ * of in copy_thread() to avoid premature freeing of associated memory -+ * on fork() failure. Wait to clear the RI flag because ->stack still -+ * refers to the source thread. -+ */ -+ dst->thread.ri_cb = NULL; -+ dst->thread.gs_cb = NULL; -+ dst->thread.gs_bc_cb = NULL; -+ - return 0; - } - -@@ -149,13 +161,11 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, - frame->childregs.flags = 0; - if (new_stackp) - frame->childregs.gprs[15] = new_stackp; -- -- /* Don't copy runtime instrumentation info */ -- p->thread.ri_cb = NULL; -+ /* -+ * Clear the runtime instrumentation flag after the above childregs -+ * copy. The CB pointer was already cleared in arch_dup_task_struct(). -+ */ - frame->childregs.psw.mask &= ~PSW_MASK_RI; -- /* Don't copy guarded storage control block */ -- p->thread.gs_cb = NULL; -- p->thread.gs_bc_cb = NULL; - - /* Set a new TLS ? */ - if (clone_flags & CLONE_SETTLS) { -diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c -index 0ea3d02b378de..516c21baf3ad3 100644 ---- a/arch/s390/kernel/ptrace.c -+++ b/arch/s390/kernel/ptrace.c -@@ -481,9 +481,7 @@ long arch_ptrace(struct task_struct *child, long request, - } - return 0; - case PTRACE_GET_LAST_BREAK: -- put_user(child->thread.last_break, -- (unsigned long __user *) data); -- return 0; -+ return put_user(child->thread.last_break, (unsigned long __user *)data); - case PTRACE_ENABLE_TE: - if (!MACHINE_HAS_TE) - return -EIO; -@@ -837,9 +835,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, - } - return 0; - case PTRACE_GET_LAST_BREAK: -- put_user(child->thread.last_break, -- (unsigned int __user *) data); -- return 0; -+ return put_user(child->thread.last_break, (unsigned int __user *)data); - } - return compat_ptrace_request(child, request, addr, data); - } -diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c -index 67e5fff96ee06..4dfe37b068898 100644 ---- a/arch/s390/kernel/setup.c -+++ b/arch/s390/kernel/setup.c -@@ -95,10 +95,10 @@ EXPORT_SYMBOL(console_irq); - * relocated above 2 GB, because it has to use 31 bit addresses. - * Such code and data is part of the .amode31 section. - */ --unsigned long __amode31_ref __samode31 = __pa(&_samode31); --unsigned long __amode31_ref __eamode31 = __pa(&_eamode31); --unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31); --unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31); -+unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31; -+unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31; -+unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31; -+unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31; - struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table; - struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table; - -@@ -149,6 +149,7 @@ struct mem_detect_info __bootdata(mem_detect); - struct initrd_data __bootdata(initrd_data); - - unsigned long __bootdata_preserved(__kaslr_offset); -+unsigned long __bootdata(__amode31_base); - unsigned int __bootdata_preserved(zlib_dfltcc_support); - EXPORT_SYMBOL(zlib_dfltcc_support); - u64 __bootdata_preserved(stfle_fac_list[16]); -@@ -478,11 +479,12 @@ static void __init setup_lowcore_dat_off(void) - lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET; - - /* Setup absolute zero lowcore */ -- mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack); -- mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn); -- mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data); -- mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); -- mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); -+ put_abs_lowcore(restart_stack, lc->restart_stack); -+ put_abs_lowcore(restart_fn, lc->restart_fn); -+ put_abs_lowcore(restart_data, lc->restart_data); -+ put_abs_lowcore(restart_source, lc->restart_source); -+ put_abs_lowcore(restart_psw, lc->restart_psw); -+ put_abs_lowcore(mcesad, lc->mcesad); - - lc->spinlock_lockval = arch_spin_lockval(0); - lc->spinlock_index = 0; -@@ -499,6 +501,7 @@ static void __init setup_lowcore_dat_off(void) - static void __init setup_lowcore_dat_on(void) - { - struct lowcore *lc = lowcore_ptr[0]; -+ int cr; - - __ctl_clear_bit(0, 28); - S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; -@@ -507,10 +510,10 @@ static void __init setup_lowcore_dat_on(void) - S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT; - __ctl_store(S390_lowcore.cregs_save_area, 0, 15); - __ctl_set_bit(0, 28); -- mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS); -- mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw); -- memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area, -- sizeof(S390_lowcore.cregs_save_area)); -+ put_abs_lowcore(restart_flags, RESTART_FLAG_CTLREGS); -+ put_abs_lowcore(program_new_psw, lc->program_new_psw); -+ for (cr = 0; cr < ARRAY_SIZE(lc->cregs_save_area); cr++) -+ put_abs_lowcore(cregs_save_area[cr], lc->cregs_save_area[cr]); - } - - static struct resource code_resource = { -@@ -633,14 +636,6 @@ static struct notifier_block kdump_mem_nb = { - - #endif - --/* -- * Make sure that the area above identity mapping is protected -- */ --static void __init reserve_above_ident_map(void) --{ -- memblock_reserve(ident_map_size, ULONG_MAX); --} -- - /* - * Reserve memory for kdump kernel to be loaded with kexec - */ -@@ -804,12 +799,12 @@ static void __init check_initrd(void) - */ - static void __init reserve_kernel(void) - { -- unsigned long start_pfn = PFN_UP(__pa(_end)); -- - memblock_reserve(0, STARTUP_NORMAL_OFFSET); -- memblock_reserve((unsigned long)sclp_early_sccb, EXT_SCCB_READ_SCP); -- memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) -- - (unsigned long)_stext); -+ memblock_reserve(OLDMEM_BASE, sizeof(unsigned long)); -+ memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long)); -+ memblock_reserve(__amode31_base, __eamode31 - __samode31); -+ memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP); -+ memblock_reserve(__pa(_stext), _end - _stext); - } - - static void __init setup_memory(void) -@@ -824,27 +819,18 @@ static void __init setup_memory(void) - storage_key_init_range(start, end); - - psw_set_key(PAGE_DEFAULT_KEY); -- -- /* Only cosmetics */ -- memblock_enforce_memory_limit(memblock_end_of_DRAM()); - } - - static void __init relocate_amode31_section(void) - { -- unsigned long amode31_addr, amode31_size; -- long amode31_offset; -+ unsigned long amode31_size = __eamode31 - __samode31; -+ long amode31_offset = __amode31_base - __samode31; - long *ptr; - -- /* Allocate a new AMODE31 capable memory region */ -- amode31_size = __eamode31 - __samode31; - pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); -- amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE); -- if (!amode31_addr) -- panic("Failed to allocate memory for AMODE31 section\n"); -- amode31_offset = amode31_addr - __samode31; - - /* Move original AMODE31 section to the new one */ -- memmove((void *)amode31_addr, (void *)__samode31, amode31_size); -+ memmove((void *)__amode31_base, (void *)__samode31, amode31_size); - /* Zero out the old AMODE31 section to catch invalid accesses within it */ - memset((void *)__samode31, 0, amode31_size); - -@@ -891,6 +877,11 @@ static void __init setup_randomness(void) - if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) - add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); - memblock_free((unsigned long) vmms, PAGE_SIZE); -+ -+#ifdef CONFIG_ARCH_RANDOM -+ if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) -+ static_branch_enable(&s390_arch_random_available); -+#endif - } - - /* -@@ -1005,11 +996,11 @@ void __init setup_arch(char **cmdline_p) - setup_control_program_code(); - - /* Do some memory reservations *before* memory is added to memblock */ -- reserve_above_ident_map(); - reserve_kernel(); - reserve_initrd(); - reserve_certificate_list(); - reserve_mem_detect_info(); -+ memblock_set_current_limit(ident_map_size); - memblock_allow_resize(); - - /* Get information about *all* installed memory */ -diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c -index 1a04e5bdf6555..35af70ed58fc7 100644 ---- a/arch/s390/kernel/smp.c -+++ b/arch/s390/kernel/smp.c -@@ -328,10 +328,17 @@ static void pcpu_delegate(struct pcpu *pcpu, - /* Stop target cpu (if func returns this stops the current cpu). */ - pcpu_sigp_retry(pcpu, SIGP_STOP, 0); - /* Restart func on the target cpu and stop the current cpu. */ -- mem_assign_absolute(lc->restart_stack, stack); -- mem_assign_absolute(lc->restart_fn, (unsigned long) func); -- mem_assign_absolute(lc->restart_data, (unsigned long) data); -- mem_assign_absolute(lc->restart_source, source_cpu); -+ if (lc) { -+ lc->restart_stack = stack; -+ lc->restart_fn = (unsigned long)func; -+ lc->restart_data = (unsigned long)data; -+ lc->restart_source = source_cpu; -+ } else { -+ put_abs_lowcore(restart_stack, stack); -+ put_abs_lowcore(restart_fn, (unsigned long)func); -+ put_abs_lowcore(restart_data, (unsigned long)data); -+ put_abs_lowcore(restart_source, source_cpu); -+ } - __bpon(); - asm volatile( - "0: sigp 0,%0,%2 # sigp restart to target cpu\n" -@@ -572,39 +579,27 @@ static void smp_ctl_bit_callback(void *info) - } - - static DEFINE_SPINLOCK(ctl_lock); --static unsigned long ctlreg; - --/* -- * Set a bit in a control register of all cpus -- */ --void smp_ctl_set_bit(int cr, int bit) -+void smp_ctl_set_clear_bit(int cr, int bit, bool set) - { -- struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; -- -- spin_lock(&ctl_lock); -- memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg)); -- __set_bit(bit, &ctlreg); -- memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg)); -- spin_unlock(&ctl_lock); -- on_each_cpu(smp_ctl_bit_callback, &parms, 1); --} --EXPORT_SYMBOL(smp_ctl_set_bit); -- --/* -- * Clear a bit in a control register of all cpus -- */ --void smp_ctl_clear_bit(int cr, int bit) --{ -- struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; -+ struct ec_creg_mask_parms parms = { .cr = cr, }; -+ u64 ctlreg; - -+ if (set) { -+ parms.orval = 1UL << bit; -+ parms.andval = -1UL; -+ } else { -+ parms.orval = 0; -+ parms.andval = ~(1UL << bit); -+ } - spin_lock(&ctl_lock); -- memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg)); -- __clear_bit(bit, &ctlreg); -- memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg)); -+ get_abs_lowcore(ctlreg, cregs_save_area[cr]); -+ ctlreg = (ctlreg & parms.andval) | parms.orval; -+ put_abs_lowcore(cregs_save_area[cr], ctlreg); - spin_unlock(&ctl_lock); - on_each_cpu(smp_ctl_bit_callback, &parms, 1); - } --EXPORT_SYMBOL(smp_ctl_clear_bit); -+EXPORT_SYMBOL(smp_ctl_set_clear_bit); - - #ifdef CONFIG_CRASH_DUMP - -@@ -675,7 +670,7 @@ static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr, - void *regs = (void *) page; - - if (is_boot_cpu) -- copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512); -+ copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512); - else - __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page); - save_area_add_regs(sa, regs); -diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c -index 4d141e2c132e5..2ea7f208f0e73 100644 ---- a/arch/s390/kernel/sthyi.c -+++ b/arch/s390/kernel/sthyi.c -@@ -459,9 +459,9 @@ static int sthyi_update_cache(u64 *rc) - * - * Fills the destination with system information returned by the STHYI - * instruction. The data is generated by emulation or execution of STHYI, -- * if available. The return value is the condition code that would be -- * returned, the rc parameter is the return code which is passed in -- * register R2 + 1. -+ * if available. The return value is either a negative error value or -+ * the condition code that would be returned, the rc parameter is the -+ * return code which is passed in register R2 + 1. - */ - int sthyi_fill(void *dst, u64 *rc) - { -diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c -index 326cb8f75f58e..f0a1484ee00b0 100644 ---- a/arch/s390/kernel/time.c -+++ b/arch/s390/kernel/time.c -@@ -364,7 +364,7 @@ static inline int check_sync_clock(void) - * Apply clock delta to the global data structures. - * This is called once on the CPU that performed the clock sync. - */ --static void clock_sync_global(unsigned long delta) -+static void clock_sync_global(long delta) - { - unsigned long now, adj; - struct ptff_qto qto; -@@ -400,7 +400,7 @@ static void clock_sync_global(unsigned long delta) - * Apply clock delta to the per-CPU data structures of this CPU. - * This is called for each online CPU after the call to clock_sync_global. - */ --static void clock_sync_local(unsigned long delta) -+static void clock_sync_local(long delta) - { - /* Add the delta to the clock comparator. */ - if (S390_lowcore.clock_comparator != clock_comparator_max) { -@@ -424,7 +424,7 @@ static void __init time_init_wq(void) - struct clock_sync_data { - atomic_t cpus; - int in_sync; -- unsigned long clock_delta; -+ long clock_delta; - }; - - /* -@@ -544,7 +544,7 @@ static int stpinfo_valid(void) - static int stp_sync_clock(void *data) - { - struct clock_sync_data *sync = data; -- u64 clock_delta, flags; -+ long clock_delta, flags; - static int first; - int rc; - -diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c -index 58f8291950cbf..1f4f37a26c26d 100644 ---- a/arch/s390/kernel/topology.c -+++ b/arch/s390/kernel/topology.c -@@ -96,7 +96,7 @@ out: - static void cpu_thread_map(cpumask_t *dst, unsigned int cpu) - { - static cpumask_t mask; -- int i; -+ unsigned int max_cpu; - - cpumask_clear(&mask); - if (!cpumask_test_cpu(cpu, &cpu_setup_mask)) -@@ -105,9 +105,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu) - if (topology_mode != TOPOLOGY_MODE_HW) - goto out; - cpu -= cpu % (smp_cpu_mtid + 1); -- for (i = 0; i <= smp_cpu_mtid; i++) { -- if (cpumask_test_cpu(cpu + i, &cpu_setup_mask)) -- cpumask_set_cpu(cpu + i, &mask); -+ max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); -+ for (; cpu <= max_cpu; cpu++) { -+ if (cpumask_test_cpu(cpu, &cpu_setup_mask)) -+ cpumask_set_cpu(cpu, &mask); - } - out: - cpumask_copy(dst, &mask); -@@ -124,25 +125,26 @@ static void add_cpus_to_mask(struct topology_core *tl_core, - unsigned int core; - - for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) { -- unsigned int rcore; -- int lcpu, i; -+ unsigned int max_cpu, rcore; -+ int cpu; - - rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; -- lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); -- if (lcpu < 0) -+ cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); -+ if (cpu < 0) - continue; -- for (i = 0; i <= smp_cpu_mtid; i++) { -- topo = &cpu_topology[lcpu + i]; -+ max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); -+ for (; cpu <= max_cpu; cpu++) { -+ topo = &cpu_topology[cpu]; - topo->drawer_id = drawer->id; - topo->book_id = book->id; - topo->socket_id = socket->id; - topo->core_id = rcore; -- topo->thread_id = lcpu + i; -+ topo->thread_id = cpu; - topo->dedicated = tl_core->d; -- cpumask_set_cpu(lcpu + i, &drawer->mask); -- cpumask_set_cpu(lcpu + i, &book->mask); -- cpumask_set_cpu(lcpu + i, &socket->mask); -- smp_cpu_set_polarization(lcpu + i, tl_core->pp); -+ cpumask_set_cpu(cpu, &drawer->mask); -+ cpumask_set_cpu(cpu, &book->mask); -+ cpumask_set_cpu(cpu, &socket->mask); -+ smp_cpu_set_polarization(cpu, tl_core->pp); - } - } - } -diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c -index bcefc2173de45..4044826d72ae5 100644 ---- a/arch/s390/kernel/traps.c -+++ b/arch/s390/kernel/traps.c -@@ -84,7 +84,7 @@ static void default_trap_handler(struct pt_regs *regs) - { - if (user_mode(regs)) { - report_user_fault(regs, SIGSEGV, 0); -- do_exit(SIGSEGV); -+ force_exit_sig(SIGSEGV); - } else - die(regs, "Unknown program exception"); - } -@@ -142,10 +142,10 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc) - do_trap(regs, SIGFPE, si_code, "floating point exception"); - } - --static void translation_exception(struct pt_regs *regs) -+static void translation_specification_exception(struct pt_regs *regs) - { - /* May never happen. */ -- panic("Translation exception"); -+ panic("Translation-Specification Exception"); - } - - static void illegal_op(struct pt_regs *regs) -@@ -374,7 +374,7 @@ static void (*pgm_check_table[128])(struct pt_regs *regs) = { - [0x0f] = hfp_divide_exception, - [0x10] = do_dat_exception, - [0x11] = do_dat_exception, -- [0x12] = translation_exception, -+ [0x12] = translation_specification_exception, - [0x13] = special_op_exception, - [0x14] = default_trap_handler, - [0x15] = operand_exception, -diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c -index 5a656c7b7a67a..f95ccbd396925 100644 ---- a/arch/s390/kernel/uv.c -+++ b/arch/s390/kernel/uv.c -@@ -212,7 +212,7 @@ again: - uaddr = __gmap_translate(gmap, gaddr); - if (IS_ERR_VALUE(uaddr)) - goto out; -- vma = find_vma(gmap->mm, uaddr); -+ vma = vma_lookup(gmap->mm, uaddr); - if (!vma) - goto out; - /* -diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile -index e3e6ac5686df5..245bddfe9bc0e 100644 ---- a/arch/s390/kernel/vdso32/Makefile -+++ b/arch/s390/kernel/vdso32/Makefile -@@ -22,7 +22,7 @@ KBUILD_AFLAGS_32 += -m31 -s - KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) - KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin - --LDFLAGS_vdso32.so.dbg += -fPIC -shared -nostdlib -soname=linux-vdso32.so.1 \ -+LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \ - --hash-style=both --build-id=sha1 -melf_s390 -T - - $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) -diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile -index 6568de2367010..1605ba45ac4c0 100644 ---- a/arch/s390/kernel/vdso64/Makefile -+++ b/arch/s390/kernel/vdso64/Makefile -@@ -8,8 +8,9 @@ ARCH_REL_TYPE_ABS += R_390_GOT|R_390_PLT - include $(srctree)/lib/vdso/Makefile - obj-vdso64 = vdso_user_wrapper.o note.o - obj-cvdso64 = vdso64_generic.o getcpu.o --CFLAGS_REMOVE_getcpu.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) --CFLAGS_REMOVE_vdso64_generic.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) -+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) $(CC_FLAGS_CHECK_STACK) -+CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE) -+CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE) - - # Build rules - -@@ -24,8 +25,8 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS)) - KBUILD_AFLAGS_64 += -m64 -s - - KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) --KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin --ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \ -+KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -+ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \ - --hash-style=both --build-id=sha1 -T - - $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) -diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S -index 63bdb9e1bfc13..853b80770c6df 100644 ---- a/arch/s390/kernel/vmlinux.lds.S -+++ b/arch/s390/kernel/vmlinux.lds.S -@@ -17,6 +17,8 @@ - /* Handle ro_after_init data on our own. */ - #define RO_AFTER_INIT_DATA - -+#define RUNTIME_DISCARD_EXIT -+ - #define EMITS_PT_NOTE - - #include -@@ -80,6 +82,7 @@ SECTIONS - _end_amode31_refs = .; - } - -+ . = ALIGN(PAGE_SIZE); - _edata = .; /* End of data section */ - - /* will be freed after init */ -@@ -132,6 +135,7 @@ SECTIONS - /* - * Table with the patch locations to undo expolines - */ -+ . = ALIGN(4); - .nospec_call_table : { - __nospec_call_start = . ; - *(.s390_indirect*) -@@ -193,6 +197,7 @@ SECTIONS - - BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE) - -+ . = ALIGN(PAGE_SIZE); - _end = . ; - - /* -@@ -212,6 +217,7 @@ SECTIONS - QUAD(__dynsym_start) /* dynsym_start */ - QUAD(__rela_dyn_start) /* rela_dyn_start */ - QUAD(__rela_dyn_end) /* rela_dyn_end */ -+ QUAD(_eamode31 - _samode31) /* amode31_size */ - } :NONE - - /* Debugging sections. */ -@@ -223,5 +229,6 @@ SECTIONS - DISCARDS - /DISCARD/ : { - *(.eh_frame) -+ *(.interp) - } - } -diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c -index 807fa9da1e721..3c65b8258ae67 100644 ---- a/arch/s390/kvm/diag.c -+++ b/arch/s390/kvm/diag.c -@@ -166,6 +166,7 @@ static int diag9c_forwarding_overrun(void) - static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) - { - struct kvm_vcpu *tcpu; -+ int tcpu_cpu; - int tid; - - tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; -@@ -181,14 +182,15 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) - goto no_yield; - - /* target guest VCPU already running */ -- if (READ_ONCE(tcpu->cpu) >= 0) { -+ tcpu_cpu = READ_ONCE(tcpu->cpu); -+ if (tcpu_cpu >= 0) { - if (!diag9c_forwarding_hz || diag9c_forwarding_overrun()) - goto no_yield; - - /* target host CPU already running */ -- if (!vcpu_is_preempted(tcpu->cpu)) -+ if (!vcpu_is_preempted(tcpu_cpu)) - goto no_yield; -- smp_yield_cpu(tcpu->cpu); -+ smp_yield_cpu(tcpu_cpu); - VCPU_EVENT(vcpu, 5, - "diag time slice end directed to %d: yield forwarded", - tid); -diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c -index 2bd8f854f1b41..458b42b50b8cb 100644 ---- a/arch/s390/kvm/intercept.c -+++ b/arch/s390/kvm/intercept.c -@@ -271,10 +271,18 @@ static int handle_prog(struct kvm_vcpu *vcpu) - * handle_external_interrupt - used for external interruption interceptions - * @vcpu: virtual cpu - * -- * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if -- * the new PSW does not have external interrupts disabled. In the first case, -- * we've got to deliver the interrupt manually, and in the second case, we -- * drop to userspace to handle the situation there. -+ * This interception occurs if: -+ * - the CPUSTAT_EXT_INT bit was already set when the external interrupt -+ * occurred. In this case, the interrupt needs to be injected manually to -+ * preserve interrupt priority. -+ * - the external new PSW has external interrupts enabled, which will cause an -+ * interruption loop. We drop to userspace in this case. -+ * -+ * The latter case can be detected by inspecting the external mask bit in the -+ * external new psw. -+ * -+ * Under PV, only the latter case can occur, since interrupt priorities are -+ * handled in the ultravisor. - */ - static int handle_external_interrupt(struct kvm_vcpu *vcpu) - { -@@ -285,10 +293,18 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) - - vcpu->stat.exit_external_interrupt++; - -- rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); -- if (rc) -- return rc; -- /* We can not handle clock comparator or timer interrupt with bad PSW */ -+ if (kvm_s390_pv_cpu_is_protected(vcpu)) { -+ newpsw = vcpu->arch.sie_block->gpsw; -+ } else { -+ rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); -+ if (rc) -+ return rc; -+ } -+ -+ /* -+ * Clock comparator or timer interrupt with external interrupt enabled -+ * will cause interrupt loop. Drop to userspace. -+ */ - if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) && - (newpsw.mask & PSW_MASK_EXT)) - return -EOPNOTSUPP; -@@ -373,8 +389,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) - */ - int handle_sthyi(struct kvm_vcpu *vcpu) - { -- int reg1, reg2, r = 0; -- u64 code, addr, cc = 0, rc = 0; -+ int reg1, reg2, cc = 0, r = 0; -+ u64 code, addr, rc = 0; - struct sthyi_sctns *sctns = NULL; - - if (!test_kvm_facility(vcpu->kvm, 74)) -@@ -405,7 +421,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu) - return -ENOMEM; - - cc = sthyi_fill(sctns, &rc); -- -+ if (cc < 0) { -+ free_page((unsigned long)sctns); -+ return cc; -+ } - out: - if (!cc) { - if (kvm_s390_pv_cpu_is_protected(vcpu)) { -@@ -523,12 +542,27 @@ static int handle_pv_uvc(struct kvm_vcpu *vcpu) - - static int handle_pv_notification(struct kvm_vcpu *vcpu) - { -+ int ret; -+ - if (vcpu->arch.sie_block->ipa == 0xb210) - return handle_pv_spx(vcpu); - if (vcpu->arch.sie_block->ipa == 0xb220) - return handle_pv_sclp(vcpu); - if (vcpu->arch.sie_block->ipa == 0xb9a4) - return handle_pv_uvc(vcpu); -+ if (vcpu->arch.sie_block->ipa >> 8 == 0xae) { -+ /* -+ * Besides external call, other SIGP orders also cause a -+ * 108 (pv notify) intercept. In contrast to external call, -+ * these orders need to be emulated and hence the appropriate -+ * place to handle them is in handle_instruction(). -+ * So first try kvm_s390_handle_sigp_pei() and if that isn't -+ * successful, go on with handle_instruction(). -+ */ -+ ret = kvm_s390_handle_sigp_pei(vcpu); -+ if (!ret) -+ return ret; -+ } - - return handle_instruction(vcpu); - } -diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c -index 2245f4b8d3629..ca7d09f098092 100644 ---- a/arch/s390/kvm/interrupt.c -+++ b/arch/s390/kvm/interrupt.c -@@ -81,8 +81,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) - struct esca_block *sca = vcpu->kvm->arch.sca; - union esca_sigp_ctrl *sigp_ctrl = - &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); -- union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; -+ union esca_sigp_ctrl new_val = {0}, old_val; - -+ old_val = READ_ONCE(*sigp_ctrl); - new_val.scn = src_id; - new_val.c = 1; - old_val.c = 0; -@@ -93,8 +94,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) - struct bsca_block *sca = vcpu->kvm->arch.sca; - union bsca_sigp_ctrl *sigp_ctrl = - &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); -- union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; -+ union bsca_sigp_ctrl new_val = {0}, old_val; - -+ old_val = READ_ONCE(*sigp_ctrl); - new_val.scn = src_id; - new_val.c = 1; - old_val.c = 0; -@@ -124,16 +126,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu) - struct esca_block *sca = vcpu->kvm->arch.sca; - union esca_sigp_ctrl *sigp_ctrl = - &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); -- union esca_sigp_ctrl old = *sigp_ctrl; -+ union esca_sigp_ctrl old; - -+ old = READ_ONCE(*sigp_ctrl); - expect = old.value; - rc = cmpxchg(&sigp_ctrl->value, old.value, 0); - } else { - struct bsca_block *sca = vcpu->kvm->arch.sca; - union bsca_sigp_ctrl *sigp_ctrl = - &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); -- union bsca_sigp_ctrl old = *sigp_ctrl; -+ union bsca_sigp_ctrl old; - -+ old = READ_ONCE(*sigp_ctrl); - expect = old.value; - rc = cmpxchg(&sigp_ctrl->value, old.value, 0); - } -@@ -2115,6 +2119,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) - return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); - } - -+int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu) -+{ -+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; -+ -+ return test_bit(IRQ_PEND_RESTART, &li->pending_irqs); -+} -+ - void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) - { - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; -diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c -index 1c97493d21e10..eb97db59b2365 100644 ---- a/arch/s390/kvm/kvm-s390.c -+++ b/arch/s390/kvm/kvm-s390.c -@@ -1117,6 +1117,8 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm, - return 0; - } - -+static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); -+ - static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) - { - struct kvm_s390_vm_tod_clock gtod; -@@ -1126,7 +1128,7 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) - - if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) - return -EINVAL; -- kvm_s390_set_tod_clock(kvm, >od); -+ __kvm_s390_set_tod_clock(kvm, >od); - - VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", - gtod.epoch_idx, gtod.tod); -@@ -1157,7 +1159,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) - sizeof(gtod.tod))) - return -EFAULT; - -- kvm_s390_set_tod_clock(kvm, >od); -+ __kvm_s390_set_tod_clock(kvm, >od); - VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); - return 0; - } -@@ -1169,6 +1171,16 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) - if (attr->flags) - return -EINVAL; - -+ mutex_lock(&kvm->lock); -+ /* -+ * For protected guests, the TOD is managed by the ultravisor, so trying -+ * to change it will never bring the expected results. -+ */ -+ if (kvm_s390_pv_is_protected(kvm)) { -+ ret = -EOPNOTSUPP; -+ goto out_unlock; -+ } -+ - switch (attr->attr) { - case KVM_S390_VM_TOD_EXT: - ret = kvm_s390_set_tod_ext(kvm, attr); -@@ -1183,6 +1195,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) - ret = -ENXIO; - break; - } -+ -+out_unlock: -+ mutex_unlock(&kvm->lock); - return ret; - } - -@@ -2015,6 +2030,10 @@ static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots, - ms = slots->memslots + slotidx; - ofs = 0; - } -+ -+ if (cur_gfn < ms->base_gfn) -+ ofs = 0; -+ - ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); - while ((slotidx > 0) && (ofs >= ms->npages)) { - slotidx--; -@@ -3447,7 +3466,7 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) - { - /* do not poll with more than halt_poll_max_steal percent of steal time */ - if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >= -- halt_poll_max_steal) { -+ READ_ONCE(halt_poll_max_steal)) { - vcpu->stat.halt_no_poll_steal++; - return true; - } -@@ -3913,14 +3932,12 @@ retry: - return 0; - } - --void kvm_s390_set_tod_clock(struct kvm *kvm, -- const struct kvm_s390_vm_tod_clock *gtod) -+static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) - { - struct kvm_vcpu *vcpu; - union tod_clock clk; - int i; - -- mutex_lock(&kvm->lock); - preempt_disable(); - - store_tod_clock_ext(&clk); -@@ -3941,7 +3958,15 @@ void kvm_s390_set_tod_clock(struct kvm *kvm, - - kvm_s390_vcpu_unblock_all(kvm); - preempt_enable(); -+} -+ -+int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) -+{ -+ if (!mutex_trylock(&kvm->lock)) -+ return 0; -+ __kvm_s390_set_tod_clock(kvm, gtod); - mutex_unlock(&kvm->lock); -+ return 1; - } - - /** -@@ -4642,10 +4667,15 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) - } - } - -- /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ -+ /* -+ * Set the VCPU to STOPPED and THEN clear the interrupt flag, -+ * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders -+ * have been fully processed. This will ensure that the VCPU -+ * is kept BUSY if another VCPU is inquiring with SIGP SENSE. -+ */ -+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); - kvm_s390_clear_stop_irq(vcpu); - -- kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); - __disable_ibs_on_vcpu(vcpu); - - for (i = 0; i < online_vcpus; i++) { -@@ -4703,6 +4733,8 @@ static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu, - return -EINVAL; - if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) - return -E2BIG; -+ if (!kvm_s390_pv_cpu_is_protected(vcpu)) -+ return -EINVAL; - - switch (mop->op) { - case KVM_S390_MEMOP_SIDA_READ: -@@ -5038,6 +5070,23 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, - /* When we are protected, we should not change the memory slots */ - if (kvm_s390_pv_get_handle(kvm)) - return -EINVAL; -+ -+ if (!kvm->arch.migration_mode) -+ return 0; -+ -+ /* -+ * Turn off migration mode when: -+ * - userspace creates a new memslot with dirty logging off, -+ * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and -+ * dirty logging is turned off. -+ * Migration mode expects dirty page logging being enabled to store -+ * its dirty bitmap. -+ */ -+ if (change != KVM_MR_DELETE && -+ !(mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) -+ WARN(kvm_s390_vm_stop_migration(kvm), -+ "Failed to stop migration mode"); -+ - return 0; - } - -diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h -index 52bc8fbaa60ac..a2fde6d69057b 100644 ---- a/arch/s390/kvm/kvm-s390.h -+++ b/arch/s390/kvm/kvm-s390.h -@@ -326,8 +326,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); - int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); - - /* implemented in kvm-s390.c */ --void kvm_s390_set_tod_clock(struct kvm *kvm, -- const struct kvm_s390_vm_tod_clock *gtod); -+int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); - long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); - int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); - int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); -@@ -418,6 +417,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm); - int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); - extern struct kvm_device_ops kvm_flic_ops; - int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); -+int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu); - void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); - int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, - void __user *buf, int len); -diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c -index 53da4ceb16a3a..6a765fe22eafc 100644 ---- a/arch/s390/kvm/priv.c -+++ b/arch/s390/kvm/priv.c -@@ -102,7 +102,20 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) - return kvm_s390_inject_prog_cond(vcpu, rc); - - VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); -- kvm_s390_set_tod_clock(vcpu->kvm, >od); -+ /* -+ * To set the TOD clock the kvm lock must be taken, but the vcpu lock -+ * is already held in handle_set_clock. The usual lock order is the -+ * opposite. As SCK is deprecated and should not be used in several -+ * cases, for example when the multiple epoch facility or TOD clock -+ * steering facility is installed (see Principles of Operation), a -+ * slow path can be used. If the lock can not be taken via try_lock, -+ * the instruction will be retried via -EAGAIN at a later point in -+ * time. -+ */ -+ if (!kvm_s390_try_set_tod_clock(vcpu->kvm, >od)) { -+ kvm_s390_retry_instr(vcpu); -+ return -EAGAIN; -+ } - - kvm_s390_set_psw_cc(vcpu, 0); - return 0; -@@ -397,6 +410,8 @@ static int handle_sske(struct kvm_vcpu *vcpu) - mmap_read_unlock(current->mm); - if (rc == -EFAULT) - return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); -+ if (rc == -EAGAIN) -+ continue; - if (rc < 0) - return rc; - start += PAGE_SIZE; -diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c -index c8841f476e913..b906658ffc2ed 100644 ---- a/arch/s390/kvm/pv.c -+++ b/arch/s390/kvm/pv.c -@@ -16,18 +16,17 @@ - - int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) - { -- int cc = 0; -+ int cc; - -- if (kvm_s390_pv_cpu_get_handle(vcpu)) { -- cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), -- UVC_CMD_DESTROY_SEC_CPU, rc, rrc); -+ if (!kvm_s390_pv_cpu_get_handle(vcpu)) -+ return 0; -+ -+ cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc); -+ -+ KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", -+ vcpu->vcpu_id, *rc, *rrc); -+ WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc); - -- KVM_UV_EVENT(vcpu->kvm, 3, -- "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", -- vcpu->vcpu_id, *rc, *rrc); -- WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", -- *rc, *rrc); -- } - /* Intended memory leak for something that should never happen. */ - if (!cc) - free_pages(vcpu->arch.pv.stor_base, -@@ -169,10 +168,13 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) - atomic_set(&kvm->mm->context.is_protected, 0); - KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc); - WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc); -- /* Inteded memory leak on "impossible" error */ -- if (!cc) -+ /* Intended memory leak on "impossible" error */ -+ if (!cc) { - kvm_s390_pv_dealloc_vm(kvm); -- return cc ? -EIO : 0; -+ return 0; -+ } -+ s390_replace_asce(kvm->arch.gmap); -+ return -EIO; - } - - int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) -@@ -196,7 +198,7 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) - uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base; - uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; - -- cc = uv_call(0, (u64)&uvcb); -+ cc = uv_call_sched(0, (u64)&uvcb); - *rc = uvcb.header.rc; - *rrc = uvcb.header.rrc; - KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x", -diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c -index 683036c1c92a8..52800279686c0 100644 ---- a/arch/s390/kvm/sigp.c -+++ b/arch/s390/kvm/sigp.c -@@ -288,6 +288,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, - if (!dst_vcpu) - return SIGP_CC_NOT_OPERATIONAL; - -+ /* -+ * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders -+ * are processed asynchronously. Until the affected VCPU finishes -+ * its work and calls back into KVM to clear the (RESTART or STOP) -+ * interrupt, we need to return any new non-reset orders "busy". -+ * -+ * This is important because a single VCPU could issue: -+ * 1) SIGP STOP $DESTINATION -+ * 2) SIGP SENSE $DESTINATION -+ * -+ * If the SIGP SENSE would not be rejected as "busy", it could -+ * return an incorrect answer as to whether the VCPU is STOPPED -+ * or OPERATING. -+ */ -+ if (order_code != SIGP_INITIAL_CPU_RESET && -+ order_code != SIGP_CPU_RESET) { -+ /* -+ * Lockless check. Both SIGP STOP and SIGP (RE)START -+ * properly synchronize everything while processing -+ * their orders, while the guest cannot observe a -+ * difference when issuing other orders from two -+ * different VCPUs. -+ */ -+ if (kvm_s390_is_stop_irq_pending(dst_vcpu) || -+ kvm_s390_is_restart_irq_pending(dst_vcpu)) -+ return SIGP_CC_BUSY; -+ } -+ - switch (order_code) { - case SIGP_SENSE: - vcpu->stat.instruction_sigp_sense++; -@@ -464,9 +492,9 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) - struct kvm_vcpu *dest_vcpu; - u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); - -- trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); -- - if (order_code == SIGP_EXTERNAL_CALL) { -+ trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); -+ - dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); - BUG_ON(dest_vcpu == NULL); - -diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c -index acda4b6fc8518..e07bc0d3df6ff 100644 ---- a/arch/s390/kvm/vsie.c -+++ b/arch/s390/kvm/vsie.c -@@ -169,7 +169,8 @@ static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s, - sizeof(struct kvm_s390_apcb0))) - return -EFAULT; - -- bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0)); -+ bitmap_and(apcb_s, apcb_s, apcb_h, -+ BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0)); - - return 0; - } -@@ -191,7 +192,8 @@ static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s, - sizeof(struct kvm_s390_apcb1))) - return -EFAULT; - -- bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1)); -+ bitmap_and(apcb_s, apcb_s, apcb_h, -+ BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1)); - - return 0; - } -@@ -538,8 +540,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) - if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI)) - scb_s->eca |= scb_o->eca & ECA_CEI; - /* Epoch Extension */ -- if (test_kvm_facility(vcpu->kvm, 139)) -+ if (test_kvm_facility(vcpu->kvm, 139)) { - scb_s->ecd |= scb_o->ecd & ECD_MEF; -+ scb_s->epdx = scb_o->epdx; -+ } - - /* etoken */ - if (test_kvm_facility(vcpu->kvm, 156)) -diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c -index ecf327d743a03..c0635cf787e31 100644 ---- a/arch/s390/lib/test_unwind.c -+++ b/arch/s390/lib/test_unwind.c -@@ -171,10 +171,11 @@ static noinline int unwindme_func4(struct unwindme *u) - } - - /* -- * trigger specification exception -+ * Trigger operation exception; use insn notation to bypass -+ * llvm's integrated assembler sanity checks. - */ - asm volatile( -- " mvcl %%r1,%%r1\n" -+ " .insn e,0x0000\n" /* illegal opcode */ - "0: nopr %%r7\n" - EX_TABLE(0b, 0b) - :); -diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c -index a596e69d3c474..25be1424d393b 100644 ---- a/arch/s390/lib/uaccess.c -+++ b/arch/s390/lib/uaccess.c -@@ -212,7 +212,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size - asm volatile( - " llilh 0,%[spec]\n" - "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" -- " jz 4f\n" -+ "6: jz 4f\n" - "1: algr %0,%2\n" - " slgr %1,%2\n" - " j 0b\n" -@@ -222,12 +222,12 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size - " clgr %0,%3\n" /* copy crosses next page boundary? */ - " jnh 5f\n" - "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" -- " slgr %0,%3\n" -+ "7: slgr %0,%3\n" - " j 5f\n" - "4: slgr %0,%0\n" - "5:\n" -- EX_TABLE(0b,2b) EX_TABLE(3b,5b) -- : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) -+ EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b) -+ : "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2) - : "a" (empty_zero_page), [spec] "K" (0x81UL) - : "cc", "memory", "0"); - return size; -diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c -index 5060956b8e7d6..1bc42ce265990 100644 ---- a/arch/s390/mm/extmem.c -+++ b/arch/s390/mm/extmem.c -@@ -289,15 +289,17 @@ segment_overlaps_others (struct dcss_segment *seg) - - /* - * real segment loading function, called from segment_load -+ * Must return either an error code < 0, or the segment type code >= 0 - */ - static int - __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end) - { - unsigned long start_addr, end_addr, dummy; - struct dcss_segment *seg; -- int rc, diag_cc; -+ int rc, diag_cc, segtype; - - start_addr = end_addr = 0; -+ segtype = -1; - seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA); - if (seg == NULL) { - rc = -ENOMEM; -@@ -326,9 +328,9 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long - seg->res_name[8] = '\0'; - strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name)); - seg->res->name = seg->res_name; -- rc = seg->vm_segtype; -- if (rc == SEG_TYPE_SC || -- ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared)) -+ segtype = seg->vm_segtype; -+ if (segtype == SEG_TYPE_SC || -+ ((segtype == SEG_TYPE_SR || segtype == SEG_TYPE_ER) && !do_nonshared)) - seg->res->flags |= IORESOURCE_READONLY; - - /* Check for overlapping resources before adding the mapping. */ -@@ -386,7 +388,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long - out_free: - kfree(seg); - out: -- return rc; -+ return rc < 0 ? rc : segtype; - } - - /* -diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c -index 212632d57db9c..c930dff312df3 100644 ---- a/arch/s390/mm/fault.c -+++ b/arch/s390/mm/fault.c -@@ -397,7 +397,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) - flags = FAULT_FLAG_DEFAULT; - if (user_mode(regs)) - flags |= FAULT_FLAG_USER; -- if (access == VM_WRITE || is_write) -+ if (is_write) -+ access = VM_WRITE; -+ if (access == VM_WRITE) - flags |= FAULT_FLAG_WRITE; - mmap_read_lock(mm); - -diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c -index 4d3b33ce81c62..a2c872de29a66 100644 ---- a/arch/s390/mm/gmap.c -+++ b/arch/s390/mm/gmap.c -@@ -672,6 +672,7 @@ EXPORT_SYMBOL_GPL(gmap_fault); - */ - void __gmap_zap(struct gmap *gmap, unsigned long gaddr) - { -+ struct vm_area_struct *vma; - unsigned long vmaddr; - spinlock_t *ptl; - pte_t *ptep; -@@ -681,11 +682,17 @@ void __gmap_zap(struct gmap *gmap, unsigned long gaddr) - gaddr >> PMD_SHIFT); - if (vmaddr) { - vmaddr |= gaddr & ~PMD_MASK; -+ -+ vma = vma_lookup(gmap->mm, vmaddr); -+ if (!vma || is_vm_hugetlb_page(vma)) -+ return; -+ - /* Get pointer to the page table entry */ - ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); -- if (likely(ptep)) -+ if (likely(ptep)) { - ptep_zap_unused(gmap->mm, vmaddr, ptep, 0); -- pte_unmap_unlock(ptep, ptl); -+ pte_unmap_unlock(ptep, ptl); -+ } - } - } - EXPORT_SYMBOL_GPL(__gmap_zap); -@@ -2594,6 +2601,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr, - return 0; - } - -+/* -+ * Give a chance to schedule after setting a key to 256 pages. -+ * We only hold the mm lock, which is a rwsem and the kvm srcu. -+ * Both can sleep. -+ */ -+static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr, -+ unsigned long next, struct mm_walk *walk) -+{ -+ cond_resched(); -+ return 0; -+} -+ - static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, - unsigned long hmask, unsigned long next, - struct mm_walk *walk) -@@ -2616,12 +2635,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, - end = start + HPAGE_SIZE - 1; - __storage_key_init_range(start, end); - set_bit(PG_arch_1, &page->flags); -+ cond_resched(); - return 0; - } - - static const struct mm_walk_ops enable_skey_walk_ops = { - .hugetlb_entry = __s390_enable_skey_hugetlb, - .pte_entry = __s390_enable_skey_pte, -+ .pmd_entry = __s390_enable_skey_pmd, - }; - - int s390_enable_skey(void) -@@ -2705,3 +2726,90 @@ void s390_reset_acc(struct mm_struct *mm) - mmput(mm); - } - EXPORT_SYMBOL_GPL(s390_reset_acc); -+ -+/** -+ * s390_unlist_old_asce - Remove the topmost level of page tables from the -+ * list of page tables of the gmap. -+ * @gmap: the gmap whose table is to be removed -+ * -+ * On s390x, KVM keeps a list of all pages containing the page tables of the -+ * gmap (the CRST list). This list is used at tear down time to free all -+ * pages that are now not needed anymore. -+ * -+ * This function removes the topmost page of the tree (the one pointed to by -+ * the ASCE) from the CRST list. -+ * -+ * This means that it will not be freed when the VM is torn down, and needs -+ * to be handled separately by the caller, unless a leak is actually -+ * intended. Notice that this function will only remove the page from the -+ * list, the page will still be used as a top level page table (and ASCE). -+ */ -+void s390_unlist_old_asce(struct gmap *gmap) -+{ -+ struct page *old; -+ -+ old = virt_to_page(gmap->table); -+ spin_lock(&gmap->guest_table_lock); -+ list_del(&old->lru); -+ /* -+ * Sometimes the topmost page might need to be "removed" multiple -+ * times, for example if the VM is rebooted into secure mode several -+ * times concurrently, or if s390_replace_asce fails after calling -+ * s390_remove_old_asce and is attempted again later. In that case -+ * the old asce has been removed from the list, and therefore it -+ * will not be freed when the VM terminates, but the ASCE is still -+ * in use and still pointed to. -+ * A subsequent call to replace_asce will follow the pointer and try -+ * to remove the same page from the list again. -+ * Therefore it's necessary that the page of the ASCE has valid -+ * pointers, so list_del can work (and do nothing) without -+ * dereferencing stale or invalid pointers. -+ */ -+ INIT_LIST_HEAD(&old->lru); -+ spin_unlock(&gmap->guest_table_lock); -+} -+EXPORT_SYMBOL_GPL(s390_unlist_old_asce); -+ -+/** -+ * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy -+ * @gmap: the gmap whose ASCE needs to be replaced -+ * -+ * If the allocation of the new top level page table fails, the ASCE is not -+ * replaced. -+ * In any case, the old ASCE is always removed from the gmap CRST list. -+ * Therefore the caller has to make sure to save a pointer to it -+ * beforehand, unless a leak is actually intended. -+ */ -+int s390_replace_asce(struct gmap *gmap) -+{ -+ unsigned long asce; -+ struct page *page; -+ void *table; -+ -+ s390_unlist_old_asce(gmap); -+ -+ page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); -+ if (!page) -+ return -ENOMEM; -+ page->index = 0; -+ table = page_to_virt(page); -+ memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT)); -+ -+ /* -+ * The caller has to deal with the old ASCE, but here we make sure -+ * the new one is properly added to the CRST list, so that -+ * it will be freed when the VM is torn down. -+ */ -+ spin_lock(&gmap->guest_table_lock); -+ list_add(&page->lru, &gmap->crst_list); -+ spin_unlock(&gmap->guest_table_lock); -+ -+ /* Set new table origin while preserving existing ASCE control bits */ -+ asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table); -+ WRITE_ONCE(gmap->asce, asce); -+ WRITE_ONCE(gmap->mm->context.gmap_asce, asce); -+ WRITE_ONCE(gmap->table, table); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(s390_replace_asce); -diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c -index 9663ce3625bcd..2ed198b4f7d02 100644 ---- a/arch/s390/mm/maccess.c -+++ b/arch/s390/mm/maccess.c -@@ -123,7 +123,7 @@ static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest, - /* - * Copy memory in real mode (kernel to kernel) - */ --int memcpy_real(void *dest, void *src, size_t count) -+int memcpy_real(void *dest, unsigned long src, size_t count) - { - unsigned long _dest = (unsigned long)dest; - unsigned long _src = (unsigned long)src; -@@ -175,7 +175,7 @@ void memcpy_absolute(void *dest, void *src, size_t count) - /* - * Copy memory from kernel (real) to user (virtual) - */ --int copy_to_user_real(void __user *dest, void *src, unsigned long count) -+int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count) - { - int offs = 0, size, rc; - char *buf; -diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c -index 781965f7210eb..91e478e09b54b 100644 ---- a/arch/s390/mm/pgalloc.c -+++ b/arch/s390/mm/pgalloc.c -@@ -244,13 +244,15 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) - /* Free 2K page table fragment of a 4K page */ - bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); - spin_lock_bh(&mm->context.lock); -- mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24)); -+ mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); - mask >>= 24; - if (mask & 3) - list_add(&page->lru, &mm->context.pgtable_list); - else - list_del(&page->lru); - spin_unlock_bh(&mm->context.lock); -+ mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24)); -+ mask >>= 24; - if (mask != 0) - return; - } else { -diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c -index 034721a68d8fd..df0adb7e2fe8e 100644 ---- a/arch/s390/mm/pgtable.c -+++ b/arch/s390/mm/pgtable.c -@@ -429,22 +429,36 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, - } - - #ifdef CONFIG_PGSTE --static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) -+static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp) - { -+ struct vm_area_struct *vma; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; -- pmd_t *pmd; -+ -+ /* We need a valid VMA, otherwise this is clearly a fault. */ -+ vma = vma_lookup(mm, addr); -+ if (!vma) -+ return -EFAULT; - - pgd = pgd_offset(mm, addr); -- p4d = p4d_alloc(mm, pgd, addr); -- if (!p4d) -- return NULL; -- pud = pud_alloc(mm, p4d, addr); -- if (!pud) -- return NULL; -- pmd = pmd_alloc(mm, pud, addr); -- return pmd; -+ if (!pgd_present(*pgd)) -+ return -ENOENT; -+ -+ p4d = p4d_offset(pgd, addr); -+ if (!p4d_present(*p4d)) -+ return -ENOENT; -+ -+ pud = pud_offset(p4d, addr); -+ if (!pud_present(*pud)) -+ return -ENOENT; -+ -+ /* Large PUDs are not supported yet. */ -+ if (pud_large(*pud)) -+ return -EFAULT; -+ -+ *pmdp = pmd_offset(pud, addr); -+ return 0; - } - #endif - -@@ -734,7 +748,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) - pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT; - ptev = pte_val(*ptep); - if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) -- page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); -+ page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0); - pgste_set_unlock(ptep, pgste); - preempt_enable(); - } -@@ -778,8 +792,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp; - pte_t *ptep; - -- pmdp = pmd_alloc_map(mm, addr); -- if (unlikely(!pmdp)) -+ if (pmd_lookup(mm, addr, &pmdp)) - return -EFAULT; - - ptl = pmd_lock(mm, pmdp); -@@ -881,8 +894,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) - pte_t *ptep; - int cc = 0; - -- pmdp = pmd_alloc_map(mm, addr); -- if (unlikely(!pmdp)) -+ if (pmd_lookup(mm, addr, &pmdp)) - return -EFAULT; - - ptl = pmd_lock(mm, pmdp); -@@ -935,15 +947,24 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp; - pte_t *ptep; - -- pmdp = pmd_alloc_map(mm, addr); -- if (unlikely(!pmdp)) -+ /* -+ * If we don't have a PTE table and if there is no huge page mapped, -+ * the storage key is 0. -+ */ -+ *key = 0; -+ -+ switch (pmd_lookup(mm, addr, &pmdp)) { -+ case -ENOENT: -+ return 0; -+ case 0: -+ break; -+ default: - return -EFAULT; -+ } - - ptl = pmd_lock(mm, pmdp); - if (!pmd_present(*pmdp)) { -- /* Not yet mapped memory has a zero key */ - spin_unlock(ptl); -- *key = 0; - return 0; - } - -@@ -988,6 +1009,7 @@ EXPORT_SYMBOL(get_guest_storage_key); - int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, - unsigned long *oldpte, unsigned long *oldpgste) - { -+ struct vm_area_struct *vma; - unsigned long pgstev; - spinlock_t *ptl; - pgste_t pgste; -@@ -997,6 +1019,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, - WARN_ON_ONCE(orc > ESSA_MAX); - if (unlikely(orc > ESSA_MAX)) - return -EINVAL; -+ -+ vma = vma_lookup(mm, hva); -+ if (!vma || is_vm_hugetlb_page(vma)) -+ return -EFAULT; - ptep = get_locked_pte(mm, hva, &ptl); - if (unlikely(!ptep)) - return -EFAULT; -@@ -1089,10 +1115,14 @@ EXPORT_SYMBOL(pgste_perform_essa); - int set_pgste_bits(struct mm_struct *mm, unsigned long hva, - unsigned long bits, unsigned long value) - { -+ struct vm_area_struct *vma; - spinlock_t *ptl; - pgste_t new; - pte_t *ptep; - -+ vma = vma_lookup(mm, hva); -+ if (!vma || is_vm_hugetlb_page(vma)) -+ return -EFAULT; - ptep = get_locked_pte(mm, hva, &ptl); - if (unlikely(!ptep)) - return -EFAULT; -@@ -1117,9 +1147,13 @@ EXPORT_SYMBOL(set_pgste_bits); - */ - int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep) - { -+ struct vm_area_struct *vma; - spinlock_t *ptl; - pte_t *ptep; - -+ vma = vma_lookup(mm, hva); -+ if (!vma || is_vm_hugetlb_page(vma)) -+ return -EFAULT; - ptep = get_locked_pte(mm, hva, &ptl); - if (unlikely(!ptep)) - return -EFAULT; -diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c -index 2b1c6d916cf9c..39912629b0619 100644 ---- a/arch/s390/mm/vmem.c -+++ b/arch/s390/mm/vmem.c -@@ -297,7 +297,7 @@ static void try_free_pmd_table(pud_t *pud, unsigned long start) - if (end > VMALLOC_START) - return; - #ifdef CONFIG_KASAN -- if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end) -+ if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START) - return; - #endif - pmd = pmd_offset(pud, start); -@@ -372,7 +372,7 @@ static void try_free_pud_table(p4d_t *p4d, unsigned long start) - if (end > VMALLOC_START) - return; - #ifdef CONFIG_KASAN -- if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end) -+ if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START) - return; - #endif - -@@ -426,7 +426,7 @@ static void try_free_p4d_table(pgd_t *pgd, unsigned long start) - if (end > VMALLOC_START) - return; - #ifdef CONFIG_KASAN -- if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end) -+ if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START) - return; - #endif - -diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c -index b833155ce8381..56c4cecdbbf9e 100644 ---- a/arch/s390/pci/pci.c -+++ b/arch/s390/pci/pci.c -@@ -69,6 +69,7 @@ struct zpci_dev *get_zdev_by_fid(u32 fid) - list_for_each_entry(tmp, &zpci_list, entry) { - if (tmp->fid == fid) { - zdev = tmp; -+ zpci_zdev_get(zdev); - break; - } - } -@@ -502,8 +503,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start, - return r; - } - --int zpci_setup_bus_resources(struct zpci_dev *zdev, -- struct list_head *resources) -+int zpci_setup_bus_resources(struct zpci_dev *zdev) - { - unsigned long addr, size, flags; - struct resource *res; -@@ -539,7 +539,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev, - return -ENOMEM; - } - zdev->bars[i].res = res; -- pci_add_resource(resources, res); - } - zdev->has_resources = 1; - -@@ -548,17 +547,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev, - - static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) - { -+ struct resource *res; - int i; - -+ pci_lock_rescan_remove(); - for (i = 0; i < PCI_STD_NUM_BARS; i++) { -- if (!zdev->bars[i].size || !zdev->bars[i].res) -+ res = zdev->bars[i].res; -+ if (!res) - continue; - -+ release_resource(res); -+ pci_bus_remove_resource(zdev->zbus->bus, res); - zpci_free_iomap(zdev, zdev->bars[i].map_idx); -- release_resource(zdev->bars[i].res); -- kfree(zdev->bars[i].res); -+ zdev->bars[i].res = NULL; -+ kfree(res); - } - zdev->has_resources = 0; -+ pci_unlock_rescan_remove(); - } - - int pcibios_add_device(struct pci_dev *pdev) -diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c -index 5d77acbd1c872..cc7e5b22ccfb3 100644 ---- a/arch/s390/pci/pci_bus.c -+++ b/arch/s390/pci/pci_bus.c -@@ -41,9 +41,7 @@ static int zpci_nb_devices; - */ - static int zpci_bus_prepare_device(struct zpci_dev *zdev) - { -- struct resource_entry *window, *n; -- struct resource *res; -- int rc; -+ int rc, i; - - if (!zdev_enabled(zdev)) { - rc = zpci_enable_device(zdev); -@@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev) - } - - if (!zdev->has_resources) { -- zpci_setup_bus_resources(zdev, &zdev->zbus->resources); -- resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) { -- res = window->res; -- pci_bus_add_resource(zdev->zbus->bus, res, 0); -+ zpci_setup_bus_resources(zdev); -+ for (i = 0; i < PCI_STD_NUM_BARS; i++) { -+ if (zdev->bars[i].res) -+ pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0); - } - } - -diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h -index e359d2686178b..c5aa9a2e5e3e5 100644 ---- a/arch/s390/pci/pci_bus.h -+++ b/arch/s390/pci/pci_bus.h -@@ -19,7 +19,8 @@ void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error); - void zpci_release_device(struct kref *kref); - static inline void zpci_zdev_put(struct zpci_dev *zdev) - { -- kref_put(&zdev->kref, zpci_release_device); -+ if (zdev) -+ kref_put(&zdev->kref, zpci_release_device); - } - - static inline void zpci_zdev_get(struct zpci_dev *zdev) -@@ -29,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev) - - int zpci_alloc_domain(int domain); - void zpci_free_domain(int domain); --int zpci_setup_bus_resources(struct zpci_dev *zdev, -- struct list_head *resources); -+int zpci_setup_bus_resources(struct zpci_dev *zdev); - - static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus, - unsigned int devfn) -diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c -index be077b39da336..5011d27461fd3 100644 ---- a/arch/s390/pci/pci_clp.c -+++ b/arch/s390/pci/pci_clp.c -@@ -22,6 +22,8 @@ - #include - #include - -+#include "pci_bus.h" -+ - bool zpci_unique_uid; - - void update_uid_checking(bool new) -@@ -403,8 +405,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data) - return; - - zdev = get_zdev_by_fid(entry->fid); -- if (!zdev) -- zpci_create_device(entry->fid, entry->fh, entry->config_state); -+ if (zdev) { -+ zpci_zdev_put(zdev); -+ return; -+ } -+ zpci_create_device(entry->fid, entry->fh, entry->config_state); - } - - int clp_scan_pci_devices(void) -diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c -index 5b8d647523f96..6d57625b8ed99 100644 ---- a/arch/s390/pci/pci_event.c -+++ b/arch/s390/pci/pci_event.c -@@ -62,10 +62,12 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) - pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); - - if (!pdev) -- return; -+ goto no_pdev; - - pdev->error_state = pci_channel_io_perm_failure; - pci_dev_put(pdev); -+no_pdev: -+ zpci_zdev_put(zdev); - } - - void zpci_event_error(void *data) -@@ -94,6 +96,7 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh) - static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) - { - struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); -+ bool existing_zdev = !!zdev; - enum zpci_state state; - - zpci_err("avail CCDF:\n"); -@@ -156,6 +159,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) - default: - break; - } -+ if (existing_zdev) -+ zpci_zdev_put(zdev); - } - - void zpci_event_availability(void *data) -diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c -index c5b35ea129cfa..b94163ee5632c 100644 ---- a/arch/s390/pci/pci_mmio.c -+++ b/arch/s390/pci/pci_mmio.c -@@ -63,7 +63,7 @@ static inline int __pcistg_mio_inuser( - asm volatile ( - " sacf 256\n" - "0: llgc %[tmp],0(%[src])\n" -- " sllg %[val],%[val],8\n" -+ "4: sllg %[val],%[val],8\n" - " aghi %[src],1\n" - " ogr %[val],%[tmp]\n" - " brctg %[cnt],0b\n" -@@ -71,7 +71,7 @@ static inline int __pcistg_mio_inuser( - "2: ipm %[cc]\n" - " srl %[cc],28\n" - "3: sacf 768\n" -- EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) -+ EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) - : - [src] "+a" (src), [cnt] "+d" (cnt), - [val] "+d" (val), [tmp] "=d" (tmp), -@@ -214,10 +214,10 @@ static inline int __pcilg_mio_inuser( - "2: ahi %[shift],-8\n" - " srlg %[tmp],%[val],0(%[shift])\n" - "3: stc %[tmp],0(%[dst])\n" -- " aghi %[dst],1\n" -+ "5: aghi %[dst],1\n" - " brctg %[cnt],2b\n" - "4: sacf 768\n" -- EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) -+ EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b) - : - [ioaddr_len] "+&d" (ioaddr_len.pair), - [cc] "+d" (cc), [val] "=d" (val), -diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile -index 360ada80d20c3..d22ec8acb13c5 100644 ---- a/arch/s390/purgatory/Makefile -+++ b/arch/s390/purgatory/Makefile -@@ -26,6 +26,7 @@ KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare - KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding - KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common - KBUILD_CFLAGS += -fno-stack-protector -+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING - KBUILD_CFLAGS += $(CLANG_FLAGS) - KBUILD_CFLAGS += $(call cc-option,-fno-PIE) - KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS)) -diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig -index 6904f4bdbf004..101b95f26a91c 100644 ---- a/arch/sh/Kconfig -+++ b/arch/sh/Kconfig -@@ -7,6 +7,7 @@ config SUPERH - select ARCH_HAVE_CUSTOM_GPIO_H - select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) - select ARCH_HAS_BINFMT_FLAT if !MMU -+ select ARCH_HAS_CPU_FINALIZE_INIT - select ARCH_HAS_GIGANTIC_PAGE - select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_HAS_PTE_SPECIAL -diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug -index 958f790273ab9..c449e7c1b20ff 100644 ---- a/arch/sh/Kconfig.debug -+++ b/arch/sh/Kconfig.debug -@@ -15,7 +15,7 @@ config SH_STANDARD_BIOS - - config STACK_DEBUG - bool "Check for stack overflows" -- depends on DEBUG_KERNEL -+ depends on DEBUG_KERNEL && PRINTK - help - This option will cause messages to be printed if free stack space - drops below a certain limit. Saying Y here will add overhead to -@@ -54,6 +54,7 @@ config DUMP_CODE - - config DWARF_UNWINDER - bool "Enable the DWARF unwinder for stacktraces" -+ depends on DEBUG_KERNEL - select FRAME_POINTER - default n - help -diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c -index bac8a058ebd7c..05bd42dde107b 100644 ---- a/arch/sh/boards/mach-ap325rxa/setup.c -+++ b/arch/sh/boards/mach-ap325rxa/setup.c -@@ -530,7 +530,7 @@ static int __init ap325rxa_devices_setup(void) - device_initialize(&ap325rxa_ceu_device.dev); - dma_declare_coherent_memory(&ap325rxa_ceu_device.dev, - ceu_dma_membase, ceu_dma_membase, -- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1); -+ CEU_BUFFER_MEMORY_SIZE); - - platform_device_add(&ap325rxa_ceu_device); - -diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c -index bab91a99124e1..9730a992dab33 100644 ---- a/arch/sh/boards/mach-ecovec24/setup.c -+++ b/arch/sh/boards/mach-ecovec24/setup.c -@@ -1454,15 +1454,13 @@ static int __init arch_setup(void) - device_initialize(&ecovec_ceu_devices[0]->dev); - dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev, - ceu0_dma_membase, ceu0_dma_membase, -- ceu0_dma_membase + -- CEU_BUFFER_MEMORY_SIZE - 1); -+ CEU_BUFFER_MEMORY_SIZE); - platform_device_add(ecovec_ceu_devices[0]); - - device_initialize(&ecovec_ceu_devices[1]->dev); - dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev, - ceu1_dma_membase, ceu1_dma_membase, -- ceu1_dma_membase + -- CEU_BUFFER_MEMORY_SIZE - 1); -+ CEU_BUFFER_MEMORY_SIZE); - platform_device_add(ecovec_ceu_devices[1]); - - gpiod_add_lookup_table(&cn12_power_gpiod_table); -diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c -index eeb5ce341efdd..4a1caa3e7cf5a 100644 ---- a/arch/sh/boards/mach-kfr2r09/setup.c -+++ b/arch/sh/boards/mach-kfr2r09/setup.c -@@ -603,7 +603,7 @@ static int __init kfr2r09_devices_setup(void) - device_initialize(&kfr2r09_ceu_device.dev); - dma_declare_coherent_memory(&kfr2r09_ceu_device.dev, - ceu_dma_membase, ceu_dma_membase, -- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1); -+ CEU_BUFFER_MEMORY_SIZE); - - platform_device_add(&kfr2r09_ceu_device); - -diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c -index 6703a2122c0d6..bd4ccd9f8dd06 100644 ---- a/arch/sh/boards/mach-migor/setup.c -+++ b/arch/sh/boards/mach-migor/setup.c -@@ -604,7 +604,7 @@ static int __init migor_devices_setup(void) - device_initialize(&migor_ceu_device.dev); - dma_declare_coherent_memory(&migor_ceu_device.dev, - ceu_dma_membase, ceu_dma_membase, -- ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1); -+ CEU_BUFFER_MEMORY_SIZE); - - platform_device_add(&migor_ceu_device); - -diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c -index 8d6541ba01865..edc7712e4a804 100644 ---- a/arch/sh/boards/mach-se/7724/setup.c -+++ b/arch/sh/boards/mach-se/7724/setup.c -@@ -940,15 +940,13 @@ static int __init devices_setup(void) - device_initialize(&ms7724se_ceu_devices[0]->dev); - dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev, - ceu0_dma_membase, ceu0_dma_membase, -- ceu0_dma_membase + -- CEU_BUFFER_MEMORY_SIZE - 1); -+ CEU_BUFFER_MEMORY_SIZE); - platform_device_add(ms7724se_ceu_devices[0]); - - device_initialize(&ms7724se_ceu_devices[1]->dev); - dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev, - ceu1_dma_membase, ceu1_dma_membase, -- ceu1_dma_membase + -- CEU_BUFFER_MEMORY_SIZE - 1); -+ CEU_BUFFER_MEMORY_SIZE); - platform_device_add(ms7724se_ceu_devices[1]); - - return platform_add_devices(ms7724se_devices, -diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig -index ba887f1351be6..cd5c58916c65a 100644 ---- a/arch/sh/configs/titan_defconfig -+++ b/arch/sh/configs/titan_defconfig -@@ -242,7 +242,6 @@ CONFIG_NFSD=y - CONFIG_NFSD_V3=y - CONFIG_SMB_FS=m - CONFIG_CIFS=m --CONFIG_CIFS_WEAK_PW_HASH=y - CONFIG_PARTITION_ADVANCED=y - CONFIG_NLS_CODEPAGE_437=m - CONFIG_NLS_ASCII=m -diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c -index 96c626c2cd0a4..306fba1564e5e 100644 ---- a/arch/sh/drivers/dma/dma-sh.c -+++ b/arch/sh/drivers/dma/dma-sh.c -@@ -18,6 +18,18 @@ - #include - #include - -+/* -+ * Some of the SoCs feature two DMAC modules. In such a case, the channels are -+ * distributed equally among them. -+ */ -+#ifdef SH_DMAC_BASE1 -+#define SH_DMAC_NR_MD_CH (CONFIG_NR_ONCHIP_DMA_CHANNELS / 2) -+#else -+#define SH_DMAC_NR_MD_CH CONFIG_NR_ONCHIP_DMA_CHANNELS -+#endif -+ -+#define SH_DMAC_CH_SZ 0x10 -+ - /* - * Define the default configuration for dual address memory-memory transfer. - * The 0x400 value represents auto-request, external->external. -@@ -29,7 +41,7 @@ static unsigned long dma_find_base(unsigned int chan) - unsigned long base = SH_DMAC_BASE0; - - #ifdef SH_DMAC_BASE1 -- if (chan >= 6) -+ if (chan >= SH_DMAC_NR_MD_CH) - base = SH_DMAC_BASE1; - #endif - -@@ -40,13 +52,13 @@ static unsigned long dma_base_addr(unsigned int chan) - { - unsigned long base = dma_find_base(chan); - -- /* Normalize offset calculation */ -- if (chan >= 9) -- chan -= 6; -- if (chan >= 4) -- base += 0x10; -+ chan = (chan % SH_DMAC_NR_MD_CH) * SH_DMAC_CH_SZ; -+ -+ /* DMAOR is placed inside the channel register space. Step over it. */ -+ if (chan >= DMAOR) -+ base += SH_DMAC_CH_SZ; - -- return base + (chan * 0x10); -+ return base + chan; - } - - #ifdef CONFIG_SH_DMA_IRQ_MULTI -@@ -250,12 +262,11 @@ static int sh_dmac_get_dma_residue(struct dma_channel *chan) - #define NR_DMAOR 1 - #endif - --/* -- * DMAOR bases are broken out amongst channel groups. DMAOR0 manages -- * channels 0 - 5, DMAOR1 6 - 11 (optional). -- */ --#define dmaor_read_reg(n) __raw_readw(dma_find_base((n)*6)) --#define dmaor_write_reg(n, data) __raw_writew(data, dma_find_base(n)*6) -+#define dmaor_read_reg(n) __raw_readw(dma_find_base((n) * \ -+ SH_DMAC_NR_MD_CH) + DMAOR) -+#define dmaor_write_reg(n, data) __raw_writew(data, \ -+ dma_find_base((n) * \ -+ SH_DMAC_NR_MD_CH) + DMAOR) - - static inline int dmaor_reset(int no) - { -diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h -deleted file mode 100644 -index fe52abb69cea3..0000000000000 ---- a/arch/sh/include/asm/bugs.h -+++ /dev/null -@@ -1,74 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 */ --#ifndef __ASM_SH_BUGS_H --#define __ASM_SH_BUGS_H -- --/* -- * This is included by init/main.c to check for architecture-dependent bugs. -- * -- * Needs: -- * void check_bugs(void); -- */ -- --/* -- * I don't know of any Super-H bugs yet. -- */ -- --#include -- --extern void select_idle_routine(void); -- --static void __init check_bugs(void) --{ -- extern unsigned long loops_per_jiffy; -- char *p = &init_utsname()->machine[2]; /* "sh" */ -- -- select_idle_routine(); -- -- current_cpu_data.loops_per_jiffy = loops_per_jiffy; -- -- switch (current_cpu_data.family) { -- case CPU_FAMILY_SH2: -- *p++ = '2'; -- break; -- case CPU_FAMILY_SH2A: -- *p++ = '2'; -- *p++ = 'a'; -- break; -- case CPU_FAMILY_SH3: -- *p++ = '3'; -- break; -- case CPU_FAMILY_SH4: -- *p++ = '4'; -- break; -- case CPU_FAMILY_SH4A: -- *p++ = '4'; -- *p++ = 'a'; -- break; -- case CPU_FAMILY_SH4AL_DSP: -- *p++ = '4'; -- *p++ = 'a'; -- *p++ = 'l'; -- *p++ = '-'; -- *p++ = 'd'; -- *p++ = 's'; -- *p++ = 'p'; -- break; -- case CPU_FAMILY_UNKNOWN: -- /* -- * Specifically use CPU_FAMILY_UNKNOWN rather than -- * default:, so we're able to have the compiler whine -- * about unhandled enumerations. -- */ -- break; -- } -- -- printk("CPU: %s\n", get_cpu_subtype(¤t_cpu_data)); -- --#ifndef __LITTLE_ENDIAN__ -- /* 'eb' means 'Endian Big' */ -- *p++ = 'e'; -- *p++ = 'b'; --#endif -- *p = '\0'; --} --#endif /* __ASM_SH_BUGS_H */ -diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h -index cf9a3ec32406f..fba90e670ed41 100644 ---- a/arch/sh/include/asm/io.h -+++ b/arch/sh/include/asm/io.h -@@ -271,8 +271,12 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - #endif /* CONFIG_HAVE_IOREMAP_PROT */ - - #else /* CONFIG_MMU */ --#define iounmap(addr) do { } while (0) --#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset)) -+static inline void __iomem *ioremap(phys_addr_t offset, size_t size) -+{ -+ return (void __iomem *)(unsigned long)offset; -+} -+ -+static inline void iounmap(volatile void __iomem *addr) { } - #endif /* CONFIG_MMU */ - - #define ioremap_uc ioremap -diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h -index 3820d698846e0..97af2d9b02693 100644 ---- a/arch/sh/include/asm/processor.h -+++ b/arch/sh/include/asm/processor.h -@@ -167,6 +167,8 @@ extern unsigned int instruction_size(unsigned int insn); - #define instruction_size(insn) (2) - #endif - -+void select_idle_routine(void); -+ - #endif /* __ASSEMBLY__ */ - - #include -diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h -index aa92cc933889d..6c7966e627758 100644 ---- a/arch/sh/include/asm/processor_32.h -+++ b/arch/sh/include/asm/processor_32.h -@@ -50,6 +50,7 @@ - #define SR_FD 0x00008000 - #define SR_MD 0x40000000 - -+#define SR_USER_MASK 0x00000303 // M, Q, S, T bits - /* - * DSP structure and data - */ -diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h -index 8edb824049b9e..0cb0ca149ac34 100644 ---- a/arch/sh/include/asm/sections.h -+++ b/arch/sh/include/asm/sections.h -@@ -4,7 +4,7 @@ - - #include - --extern long __machvec_start, __machvec_end; -+extern char __machvec_start[], __machvec_end[]; - extern char __uncached_start, __uncached_end; - extern char __start_eh_frame[], __stop_eh_frame[]; - -diff --git a/arch/sh/include/asm/sfp-machine.h b/arch/sh/include/asm/sfp-machine.h -index cbc7cf8c97ce6..2d2423478b71d 100644 ---- a/arch/sh/include/asm/sfp-machine.h -+++ b/arch/sh/include/asm/sfp-machine.h -@@ -13,6 +13,14 @@ - #ifndef _SFP_MACHINE_H - #define _SFP_MACHINE_H - -+#ifdef __BIG_ENDIAN__ -+#define __BYTE_ORDER __BIG_ENDIAN -+#define __LITTLE_ENDIAN 0 -+#else -+#define __BYTE_ORDER __LITTLE_ENDIAN -+#define __BIG_ENDIAN 0 -+#endif -+ - #define _FP_W_TYPE_SIZE 32 - #define _FP_W_TYPE unsigned long - #define _FP_WS_TYPE signed long -diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c -index ae354a2931e7e..fd6db0ab19288 100644 ---- a/arch/sh/kernel/cpu/fpu.c -+++ b/arch/sh/kernel/cpu/fpu.c -@@ -62,18 +62,20 @@ void fpu_state_restore(struct pt_regs *regs) - } - - if (!tsk_used_math(tsk)) { -- local_irq_enable(); -+ int ret; - /* - * does a slab alloc which can sleep - */ -- if (init_fpu(tsk)) { -+ local_irq_enable(); -+ ret = init_fpu(tsk); -+ local_irq_disable(); -+ if (ret) { - /* - * ran out of memory! - */ -- do_group_exit(SIGKILL); -+ force_sig(SIGKILL); - return; - } -- local_irq_disable(); - } - - grab_fpu(regs); -diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c -index d342ea08843f6..70a07f4f2142f 100644 ---- a/arch/sh/kernel/cpu/sh2/probe.c -+++ b/arch/sh/kernel/cpu/sh2/probe.c -@@ -21,7 +21,7 @@ static int __init scan_cache(unsigned long node, const char *uname, - if (!of_flat_dt_is_compatible(node, "jcore,cache")) - return 0; - -- j2_ccr_base = (u32 __iomem *)of_flat_dt_translate_address(node); -+ j2_ccr_base = ioremap(of_flat_dt_translate_address(node), 4); - - return 1; - } -diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c -index d432164b23b7c..c31ec0fea3003 100644 ---- a/arch/sh/kernel/cpu/sh4/sq.c -+++ b/arch/sh/kernel/cpu/sh4/sq.c -@@ -381,7 +381,7 @@ static int __init sq_api_init(void) - if (unlikely(!sq_cache)) - return ret; - -- sq_bitmap = kzalloc(size, GFP_KERNEL); -+ sq_bitmap = kcalloc(size, sizeof(long), GFP_KERNEL); - if (unlikely(!sq_bitmap)) - goto out; - -diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c -index f8a2bec0f260b..1261dc7b84e8b 100644 ---- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c -+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c -@@ -73,8 +73,9 @@ static void shx3_prepare_cpus(unsigned int max_cpus) - BUILD_BUG_ON(SMP_MSG_NR >= 8); - - for (i = 0; i < SMP_MSG_NR; i++) -- request_irq(104 + i, ipi_interrupt_handler, -- IRQF_PERCPU, "IPI", (void *)(long)i); -+ if (request_irq(104 + i, ipi_interrupt_handler, -+ IRQF_PERCPU, "IPI", (void *)(long)i)) -+ pr_err("Failed to request irq %d\n", i); - - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); -diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S -index 4adbd4ade3194..b603b7968b388 100644 ---- a/arch/sh/kernel/head_32.S -+++ b/arch/sh/kernel/head_32.S -@@ -64,7 +64,7 @@ ENTRY(_stext) - ldc r0, r6_bank - #endif - --#ifdef CONFIG_OF_FLATTREE -+#ifdef CONFIG_OF_EARLY_FLATTREE - mov r4, r12 ! Store device tree blob pointer in r12 - #endif - -@@ -315,7 +315,7 @@ ENTRY(_stext) - 10: - #endif - --#ifdef CONFIG_OF_FLATTREE -+#ifdef CONFIG_OF_EARLY_FLATTREE - mov.l 8f, r0 ! Make flat device tree available early. - jsr @r0 - mov r12, r4 -@@ -346,7 +346,7 @@ ENTRY(stack_start) - 5: .long start_kernel - 6: .long cpu_init - 7: .long init_thread_union --#if defined(CONFIG_OF_FLATTREE) -+#if defined(CONFIG_OF_EARLY_FLATTREE) - 8: .long sh_fdt_init - #endif - -diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c -index f59814983bd59..a80b2a5b25c7f 100644 ---- a/arch/sh/kernel/idle.c -+++ b/arch/sh/kernel/idle.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - -diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c -index d606679a211e1..57efaf5b82ae0 100644 ---- a/arch/sh/kernel/machvec.c -+++ b/arch/sh/kernel/machvec.c -@@ -20,8 +20,8 @@ - #define MV_NAME_SIZE 32 - - #define for_each_mv(mv) \ -- for ((mv) = (struct sh_machine_vector *)&__machvec_start; \ -- (mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \ -+ for ((mv) = (struct sh_machine_vector *)__machvec_start; \ -+ (mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \ - (mv)++) - - static struct sh_machine_vector * __init get_mv_byname(const char *name) -@@ -87,8 +87,8 @@ void __init sh_mv_setup(void) - if (!machvec_selected) { - unsigned long machvec_size; - -- machvec_size = ((unsigned long)&__machvec_end - -- (unsigned long)&__machvec_start); -+ machvec_size = ((unsigned long)__machvec_end - -+ (unsigned long)__machvec_start); - - /* - * Sanity check for machvec section alignment. Ensure -@@ -102,7 +102,7 @@ void __init sh_mv_setup(void) - * vector (usually the only one) from .machvec.init. - */ - if (machvec_size >= sizeof(struct sh_machine_vector)) -- sh_mv = *(struct sh_machine_vector *)&__machvec_start; -+ sh_mv = *(struct sh_machine_vector *)__machvec_start; - } - - pr_notice("Booting machvec: %s\n", get_system_type()); -diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c -index 11777867c6f5f..a212b645b4cf8 100644 ---- a/arch/sh/kernel/nmi_debug.c -+++ b/arch/sh/kernel/nmi_debug.c -@@ -49,7 +49,7 @@ static int __init nmi_debug_setup(char *str) - register_die_notifier(&nmi_debug_nb); - - if (*str != '=') -- return 0; -+ return 1; - - for (p = str + 1; *p; p = sep + 1) { - sep = strchr(p, ','); -@@ -70,6 +70,6 @@ static int __init nmi_debug_setup(char *str) - break; - } - -- return 0; -+ return 1; - } - __setup("nmi_debug", nmi_debug_setup); -diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c -index 1fcb6659822a3..cf7c0f72f2935 100644 ---- a/arch/sh/kernel/setup.c -+++ b/arch/sh/kernel/setup.c -@@ -43,6 +43,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -244,7 +245,7 @@ void __init __weak plat_early_device_setup(void) - { - } - --#ifdef CONFIG_OF_FLATTREE -+#ifdef CONFIG_OF_EARLY_FLATTREE - void __ref sh_fdt_init(phys_addr_t dt_phys) - { - static int done = 0; -@@ -326,7 +327,7 @@ void __init setup_arch(char **cmdline_p) - /* Let earlyprintk output early console messages */ - sh_early_platform_driver_probe("earlyprintk", 1, 1); - --#ifdef CONFIG_OF_FLATTREE -+#ifdef CONFIG_OF_EARLY_FLATTREE - #ifdef CONFIG_USE_BUILTIN_DTB - unflatten_and_copy_device_tree(); - #else -@@ -354,3 +355,57 @@ int test_mode_pin(int pin) - { - return sh_mv.mv_mode_pins() & pin; - } -+ -+void __init arch_cpu_finalize_init(void) -+{ -+ char *p = &init_utsname()->machine[2]; /* "sh" */ -+ -+ select_idle_routine(); -+ -+ current_cpu_data.loops_per_jiffy = loops_per_jiffy; -+ -+ switch (current_cpu_data.family) { -+ case CPU_FAMILY_SH2: -+ *p++ = '2'; -+ break; -+ case CPU_FAMILY_SH2A: -+ *p++ = '2'; -+ *p++ = 'a'; -+ break; -+ case CPU_FAMILY_SH3: -+ *p++ = '3'; -+ break; -+ case CPU_FAMILY_SH4: -+ *p++ = '4'; -+ break; -+ case CPU_FAMILY_SH4A: -+ *p++ = '4'; -+ *p++ = 'a'; -+ break; -+ case CPU_FAMILY_SH4AL_DSP: -+ *p++ = '4'; -+ *p++ = 'a'; -+ *p++ = 'l'; -+ *p++ = '-'; -+ *p++ = 'd'; -+ *p++ = 's'; -+ *p++ = 'p'; -+ break; -+ case CPU_FAMILY_UNKNOWN: -+ /* -+ * Specifically use CPU_FAMILY_UNKNOWN rather than -+ * default:, so we're able to have the compiler whine -+ * about unhandled enumerations. -+ */ -+ break; -+ } -+ -+ pr_info("CPU: %s\n", get_cpu_subtype(¤t_cpu_data)); -+ -+#ifndef __LITTLE_ENDIAN__ -+ /* 'eb' means 'Endian Big' */ -+ *p++ = 'e'; -+ *p++ = 'b'; -+#endif -+ *p = '\0'; -+} -diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c -index dd3092911efad..dc13702003f0f 100644 ---- a/arch/sh/kernel/signal_32.c -+++ b/arch/sh/kernel/signal_32.c -@@ -115,6 +115,7 @@ static int - restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p) - { - unsigned int err = 0; -+ unsigned int sr = regs->sr & ~SR_USER_MASK; - - #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) - COPY(regs[1]); -@@ -130,6 +131,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p - COPY(sr); COPY(pc); - #undef COPY - -+ regs->sr = (regs->sr & SR_USER_MASK) | sr; -+ - #ifdef CONFIG_SH_FPU - if (boot_cpu_data.flags & CPU_HAS_FPU) { - int owned_fp; -diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c -index e76b221570999..361b764700b74 100644 ---- a/arch/sh/kernel/traps.c -+++ b/arch/sh/kernel/traps.c -@@ -57,7 +57,7 @@ void die(const char *str, struct pt_regs *regs, long err) - if (panic_on_oops) - panic("Fatal exception"); - -- do_exit(SIGSEGV); -+ make_task_dead(SIGSEGV); - } - - void die_if_kernel(const char *str, struct pt_regs *regs, long err) -diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S -index 3161b9ccd2a57..b6276a3521d73 100644 ---- a/arch/sh/kernel/vmlinux.lds.S -+++ b/arch/sh/kernel/vmlinux.lds.S -@@ -4,6 +4,7 @@ - * Written by Niibe Yutaka and Paul Mundt - */ - OUTPUT_ARCH(sh) -+#define RUNTIME_DISCARD_EXIT - #include - #include - #include -diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c -index e8be0eca0444a..615ba932c398e 100644 ---- a/arch/sh/math-emu/math.c -+++ b/arch/sh/math-emu/math.c -@@ -467,109 +467,6 @@ static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_reg - return id_sys(fregs, regs, code); - } - --/** -- * denormal_to_double - Given denormalized float number, -- * store double float -- * -- * @fpu: Pointer to sh_fpu_soft structure -- * @n: Index to FP register -- */ --static void denormal_to_double(struct sh_fpu_soft_struct *fpu, int n) --{ -- unsigned long du, dl; -- unsigned long x = fpu->fpul; -- int exp = 1023 - 126; -- -- if (x != 0 && (x & 0x7f800000) == 0) { -- du = (x & 0x80000000); -- while ((x & 0x00800000) == 0) { -- x <<= 1; -- exp--; -- } -- x &= 0x007fffff; -- du |= (exp << 20) | (x >> 3); -- dl = x << 29; -- -- fpu->fp_regs[n] = du; -- fpu->fp_regs[n+1] = dl; -- } --} -- --/** -- * ieee_fpe_handler - Handle denormalized number exception -- * -- * @regs: Pointer to register structure -- * -- * Returns 1 when it's handled (should not cause exception). -- */ --static int ieee_fpe_handler(struct pt_regs *regs) --{ -- unsigned short insn = *(unsigned short *)regs->pc; -- unsigned short finsn; -- unsigned long nextpc; -- int nib[4] = { -- (insn >> 12) & 0xf, -- (insn >> 8) & 0xf, -- (insn >> 4) & 0xf, -- insn & 0xf}; -- -- if (nib[0] == 0xb || -- (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */ -- regs->pr = regs->pc + 4; -- -- if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */ -- nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3); -- finsn = *(unsigned short *) (regs->pc + 2); -- } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */ -- if (regs->sr & 1) -- nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); -- else -- nextpc = regs->pc + 4; -- finsn = *(unsigned short *) (regs->pc + 2); -- } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */ -- if (regs->sr & 1) -- nextpc = regs->pc + 4; -- else -- nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); -- finsn = *(unsigned short *) (regs->pc + 2); -- } else if (nib[0] == 0x4 && nib[3] == 0xb && -- (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */ -- nextpc = regs->regs[nib[1]]; -- finsn = *(unsigned short *) (regs->pc + 2); -- } else if (nib[0] == 0x0 && nib[3] == 0x3 && -- (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */ -- nextpc = regs->pc + 4 + regs->regs[nib[1]]; -- finsn = *(unsigned short *) (regs->pc + 2); -- } else if (insn == 0x000b) { /* rts */ -- nextpc = regs->pr; -- finsn = *(unsigned short *) (regs->pc + 2); -- } else { -- nextpc = regs->pc + 2; -- finsn = insn; -- } -- -- if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ -- struct task_struct *tsk = current; -- -- if ((tsk->thread.xstate->softfpu.fpscr & (1 << 17))) { -- /* FPU error */ -- denormal_to_double (&tsk->thread.xstate->softfpu, -- (finsn >> 8) & 0xf); -- tsk->thread.xstate->softfpu.fpscr &= -- ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); -- task_thread_info(tsk)->status |= TS_USEDFPU; -- } else { -- force_sig_fault(SIGFPE, FPE_FLTINV, -- (void __user *)regs->pc); -- } -- -- regs->pc = nextpc; -- return 1; -- } -- -- return 0; --} -- - /** - * fpu_init - Initialize FPU registers - * @fpu: Pointer to software emulated FPU registers. -diff --git a/arch/sh/math-emu/sfp-util.h b/arch/sh/math-emu/sfp-util.h -index 784f541344f36..bda50762b3d33 100644 ---- a/arch/sh/math-emu/sfp-util.h -+++ b/arch/sh/math-emu/sfp-util.h -@@ -67,7 +67,3 @@ - } while (0) - - #define abort() return 0 -- --#define __BYTE_ORDER __LITTLE_ENDIAN -- -- -diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig -index b120ed947f50b..1176f0de6a0f4 100644 ---- a/arch/sparc/Kconfig -+++ b/arch/sparc/Kconfig -@@ -52,6 +52,7 @@ config SPARC - config SPARC32 - def_bool !64BIT - select ARCH_32BIT_OFF_T -+ select ARCH_HAS_CPU_FINALIZE_INIT if !SMP - select ARCH_HAS_SYNC_DMA_FOR_CPU - select GENERIC_ATOMIC64 - select CLZ_TAB -@@ -286,7 +287,7 @@ config FORCE_MAX_ZONEORDER - This config option is actually maximum order plus one. For example, - a value of 13 means that the largest free memory block is 2^12 pages. - --if SPARC64 -+if SPARC64 || COMPILE_TEST - source "kernel/power/Kconfig" - endif - -diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile -index 849236d4eca48..45e5c76d449ea 100644 ---- a/arch/sparc/boot/Makefile -+++ b/arch/sparc/boot/Makefile -@@ -22,7 +22,7 @@ ifeq ($(CONFIG_SPARC64),y) - - # Actual linking - --$(obj)/zImage: $(obj)/image -+$(obj)/zImage: $(obj)/image FORCE - $(call if_changed,gzip) - @echo ' kernel: $@ is ready' - -@@ -31,7 +31,7 @@ $(obj)/vmlinux.aout: vmlinux FORCE - @echo ' kernel: $@ is ready' - else - --$(obj)/zImage: $(obj)/image -+$(obj)/zImage: $(obj)/image FORCE - $(call if_changed,strip) - @echo ' kernel: $@ is ready' - -@@ -44,7 +44,7 @@ OBJCOPYFLAGS_image.bin := -S -O binary -R .note -R .comment - $(obj)/image.bin: $(obj)/image FORCE - $(call if_changed,objcopy) - --$(obj)/image.gz: $(obj)/image.bin -+$(obj)/image.gz: $(obj)/image.bin FORCE - $(call if_changed,gzip) - - UIMAGE_LOADADDR = $(CONFIG_UBOOT_LOAD_ADDR) -@@ -56,7 +56,7 @@ quiet_cmd_uimage.o = UIMAGE.O $@ - -r -b binary $@ -o $@.o - - targets += uImage --$(obj)/uImage: $(obj)/image.gz -+$(obj)/uImage: $(obj)/image.gz FORCE - $(call if_changed,uimage) - $(call if_changed,uimage.o) - @echo ' Image $@ is ready' -diff --git a/arch/sparc/include/asm/bugs.h b/arch/sparc/include/asm/bugs.h -deleted file mode 100644 -index 02fa369b9c21f..0000000000000 ---- a/arch/sparc/include/asm/bugs.h -+++ /dev/null -@@ -1,18 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 */ --/* include/asm/bugs.h: Sparc probes for various bugs. -- * -- * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) -- */ -- --#ifdef CONFIG_SPARC32 --#include --#endif -- --extern unsigned long loops_per_jiffy; -- --static void __init check_bugs(void) --{ --#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) -- cpu_data(0).udelay_val = loops_per_jiffy; --#endif --} -diff --git a/arch/sparc/include/asm/timex_32.h b/arch/sparc/include/asm/timex_32.h -index 542915b462097..f86326a6f89e0 100644 ---- a/arch/sparc/include/asm/timex_32.h -+++ b/arch/sparc/include/asm/timex_32.h -@@ -9,8 +9,6 @@ - - #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ - --/* XXX Maybe do something better at some point... -DaveM */ --typedef unsigned long cycles_t; --#define get_cycles() (0) -+#include - - #endif -diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c -index c8e0dd99f3700..c9d1ba4f311b9 100644 ---- a/arch/sparc/kernel/setup_32.c -+++ b/arch/sparc/kernel/setup_32.c -@@ -412,3 +412,10 @@ static int __init topology_init(void) - } - - subsys_initcall(topology_init); -+ -+#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) -+void __init arch_cpu_finalize_init(void) -+{ -+ cpu_data(0).udelay_val = loops_per_jiffy; -+} -+#endif -diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c -index 6cc124a3bb98a..90ff7ff94ea7f 100644 ---- a/arch/sparc/kernel/signal32.c -+++ b/arch/sparc/kernel/signal32.c -@@ -780,5 +780,6 @@ static_assert(offsetof(compat_siginfo_t, si_upper) == 0x18); - static_assert(offsetof(compat_siginfo_t, si_pkey) == 0x14); - static_assert(offsetof(compat_siginfo_t, si_perf_data) == 0x10); - static_assert(offsetof(compat_siginfo_t, si_perf_type) == 0x14); -+static_assert(offsetof(compat_siginfo_t, si_perf_flags) == 0x18); - static_assert(offsetof(compat_siginfo_t, si_band) == 0x0c); - static_assert(offsetof(compat_siginfo_t, si_fd) == 0x10); -diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c -index 02f3ad55dfe31..74f80443b195f 100644 ---- a/arch/sparc/kernel/signal_32.c -+++ b/arch/sparc/kernel/signal_32.c -@@ -65,7 +65,7 @@ struct rt_signal_frame { - */ - static inline bool invalid_frame_pointer(void __user *fp, int fplen) - { -- if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) -+ if ((((unsigned long) fp) & 15) || !access_ok(fp, fplen)) - return true; - - return false; -@@ -244,7 +244,7 @@ static int setup_frame(struct ksignal *ksig, struct pt_regs *regs, - get_sigframe(ksig, regs, sigframe_size); - - if (invalid_frame_pointer(sf, sigframe_size)) { -- do_exit(SIGILL); -+ force_exit_sig(SIGILL); - return -EINVAL; - } - -@@ -336,7 +336,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs, - sf = (struct rt_signal_frame __user *) - get_sigframe(ksig, regs, sigframe_size); - if (invalid_frame_pointer(sf, sigframe_size)) { -- do_exit(SIGILL); -+ force_exit_sig(SIGILL); - return -EINVAL; - } - -diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c -index 2a78d2af12655..6eeb766987d1a 100644 ---- a/arch/sparc/kernel/signal_64.c -+++ b/arch/sparc/kernel/signal_64.c -@@ -590,5 +590,6 @@ static_assert(offsetof(siginfo_t, si_upper) == 0x28); - static_assert(offsetof(siginfo_t, si_pkey) == 0x20); - static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); - static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); -+static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); - static_assert(offsetof(siginfo_t, si_band) == 0x10); - static_assert(offsetof(siginfo_t, si_fd) == 0x14); -diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c -index 5630e5a395e0d..179aabfa712ea 100644 ---- a/arch/sparc/kernel/traps_32.c -+++ b/arch/sparc/kernel/traps_32.c -@@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) - } - printk("Instruction DUMP:"); - instruction_dump ((unsigned long *) regs->pc); -- if(regs->psr & PSR_PS) -- do_exit(SIGKILL); -- do_exit(SIGSEGV); -+ make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV); - } - - void do_hw_interrupt(struct pt_regs *regs, unsigned long type) -diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c -index 6863025ed56d2..21077821f4272 100644 ---- a/arch/sparc/kernel/traps_64.c -+++ b/arch/sparc/kernel/traps_64.c -@@ -2559,9 +2559,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) - } - if (panic_on_oops) - panic("Fatal exception"); -- if (regs->tstate & TSTATE_PRIV) -- do_exit(SIGKILL); -- do_exit(SIGSEGV); -+ make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV); - } - EXPORT_SYMBOL(die_if_kernel); - -diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c -index 69a6ba6e92937..8f20862ccc83e 100644 ---- a/arch/sparc/kernel/windows.c -+++ b/arch/sparc/kernel/windows.c -@@ -121,8 +121,10 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) - - if ((sp & 7) || - copy_to_user((char __user *) sp, &tp->reg_window[window], -- sizeof(struct reg_window32))) -- do_exit(SIGILL); -+ sizeof(struct reg_window32))) { -+ force_exit_sig(SIGILL); -+ return; -+ } - } - tp->w_saved = 0; - } -diff --git a/arch/um/.gitignore b/arch/um/.gitignore -index 6323e5571887e..d69ea5b562cee 100644 ---- a/arch/um/.gitignore -+++ b/arch/um/.gitignore -@@ -2,3 +2,4 @@ - kernel/config.c - kernel/config.tmp - kernel/vmlinux.lds -+kernel/capflags.c -diff --git a/arch/um/Kconfig b/arch/um/Kconfig -index c18b45f75d41f..b0584453d2a0b 100644 ---- a/arch/um/Kconfig -+++ b/arch/um/Kconfig -@@ -6,6 +6,7 @@ config UML - bool - default y - select ARCH_EPHEMERAL_INODES -+ select ARCH_HAS_CPU_FINALIZE_INIT - select ARCH_HAS_KCOV - select ARCH_HAS_STRNCPY_FROM_USER - select ARCH_HAS_STRNLEN_USER -diff --git a/arch/um/Makefile b/arch/um/Makefile -index f2fe63bfd819f..3dbd0e3b660ea 100644 ---- a/arch/um/Makefile -+++ b/arch/um/Makefile -@@ -132,15 +132,23 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT) - # The wrappers will select whether using "malloc" or the kernel allocator. - LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc - -+# Avoid binutils 2.39+ warnings by marking the stack non-executable and -+# ignorning warnings for the kallsyms sections. -+LDFLAGS_EXECSTACK = -z noexecstack -+ifeq ($(CONFIG_LD_IS_BFD),y) -+LDFLAGS_EXECSTACK += $(call ld-option,--no-warn-rwx-segments) -+endif -+ - LD_FLAGS_CMDLINE = $(foreach opt,$(KBUILD_LDFLAGS),-Wl,$(opt)) - - # Used by link-vmlinux.sh which has special support for um link - export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE) -+export LDFLAGS_vmlinux := $(LDFLAGS_EXECSTACK) - - # When cleaning we don't include .config, so we don't include - # TT or skas makefiles and don't clean skas_ptregs.h. - CLEAN_FILES += linux x.i gmon.out --MRPROPER_FILES += arch/$(SUBARCH)/include/generated -+MRPROPER_FILES += $(HOST_DIR)/include/generated - - archclean: - @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ -diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig -index fb51bd206dbed..4d7f99a02c1eb 100644 ---- a/arch/um/configs/i386_defconfig -+++ b/arch/um/configs/i386_defconfig -@@ -35,6 +35,7 @@ CONFIG_TTY_CHAN=y - CONFIG_XTERM_CHAN=y - CONFIG_CON_CHAN="pts" - CONFIG_SSL_CHAN="pts" -+CONFIG_SOUND=m - CONFIG_UML_SOUND=m - CONFIG_DEVTMPFS=y - CONFIG_DEVTMPFS_MOUNT=y -diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig -index 477b873174243..4bdd83008f623 100644 ---- a/arch/um/configs/x86_64_defconfig -+++ b/arch/um/configs/x86_64_defconfig -@@ -33,6 +33,7 @@ CONFIG_TTY_CHAN=y - CONFIG_XTERM_CHAN=y - CONFIG_CON_CHAN="pts" - CONFIG_SSL_CHAN="pts" -+CONFIG_SOUND=m - CONFIG_UML_SOUND=m - CONFIG_DEVTMPFS=y - CONFIG_DEVTMPFS_MOUNT=y -diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig -index f145842c40b94..3dd74d369f995 100644 ---- a/arch/um/drivers/Kconfig -+++ b/arch/um/drivers/Kconfig -@@ -104,24 +104,14 @@ config SSL_CHAN - - config UML_SOUND - tristate "Sound support" -+ depends on SOUND -+ select SOUND_OSS_CORE - help - This option enables UML sound support. If enabled, it will pull in -- soundcore and the UML hostaudio relay, which acts as a intermediary -+ the UML hostaudio relay, which acts as a intermediary - between the host's dsp and mixer devices and the UML sound system. - It is safe to say 'Y' here. - --config SOUND -- tristate -- default UML_SOUND -- --config SOUND_OSS_CORE -- bool -- default UML_SOUND -- --config HOSTAUDIO -- tristate -- default UML_SOUND -- - endmenu - - menu "UML Network Devices" -diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile -index 803666e85414a..dc0e6fe77de10 100644 ---- a/arch/um/drivers/Makefile -+++ b/arch/um/drivers/Makefile -@@ -16,7 +16,8 @@ mconsole-objs := mconsole_kern.o mconsole_user.o - hostaudio-objs := hostaudio_kern.o - ubd-objs := ubd_kern.o ubd_user.o - port-objs := port_kern.o port_user.o --harddog-objs := harddog_kern.o harddog_user.o -+harddog-objs := harddog_kern.o -+harddog-builtin-$(CONFIG_UML_WATCHDOG) := harddog_user.o harddog_user_exp.o - rtc-objs := rtc_kern.o rtc_user.o - - LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a) -@@ -53,13 +54,14 @@ obj-$(CONFIG_UML_NET) += net.o - obj-$(CONFIG_MCONSOLE) += mconsole.o - obj-$(CONFIG_MMAPPER) += mmapper_kern.o - obj-$(CONFIG_BLK_DEV_UBD) += ubd.o --obj-$(CONFIG_HOSTAUDIO) += hostaudio.o -+obj-$(CONFIG_UML_SOUND) += hostaudio.o - obj-$(CONFIG_NULL_CHAN) += null.o - obj-$(CONFIG_PORT_CHAN) += port.o - obj-$(CONFIG_PTY_CHAN) += pty.o - obj-$(CONFIG_TTY_CHAN) += tty.o - obj-$(CONFIG_XTERM_CHAN) += xterm.o xterm_kern.o - obj-$(CONFIG_UML_WATCHDOG) += harddog.o -+obj-y += $(harddog-builtin-y) $(harddog-builtin-m) - obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o - obj-$(CONFIG_UML_RANDOM) += random.o - obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o -diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c -index 62997055c4547..26a702a065154 100644 ---- a/arch/um/drivers/chan_kern.c -+++ b/arch/um/drivers/chan_kern.c -@@ -133,7 +133,7 @@ static void line_timer_cb(struct work_struct *work) - struct line *line = container_of(work, struct line, task.work); - - if (!line->throttled) -- chan_interrupt(line, line->driver->read_irq); -+ chan_interrupt(line, line->read_irq); - } - - int enable_chan(struct line *line) -@@ -195,9 +195,9 @@ void free_irqs(void) - chan = list_entry(ele, struct chan, free_list); - - if (chan->input && chan->enabled) -- um_free_irq(chan->line->driver->read_irq, chan); -+ um_free_irq(chan->line->read_irq, chan); - if (chan->output && chan->enabled) -- um_free_irq(chan->line->driver->write_irq, chan); -+ um_free_irq(chan->line->write_irq, chan); - chan->enabled = 0; - } - } -@@ -215,9 +215,9 @@ static void close_one_chan(struct chan *chan, int delay_free_irq) - spin_unlock_irqrestore(&irqs_to_free_lock, flags); - } else { - if (chan->input && chan->enabled) -- um_free_irq(chan->line->driver->read_irq, chan); -+ um_free_irq(chan->line->read_irq, chan); - if (chan->output && chan->enabled) -- um_free_irq(chan->line->driver->write_irq, chan); -+ um_free_irq(chan->line->write_irq, chan); - chan->enabled = 0; - } - if (chan->ops->close != NULL) -diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c -index 6040817c036f3..25727ed648b72 100644 ---- a/arch/um/drivers/chan_user.c -+++ b/arch/um/drivers/chan_user.c -@@ -220,7 +220,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out, - unsigned long *stack_out) - { - struct winch_data data; -- int fds[2], n, err; -+ int fds[2], n, err, pid; - char c; - - err = os_pipe(fds, 1, 1); -@@ -238,8 +238,9 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out, - * problem with /dev/net/tun, which if held open by this - * thread, prevents the TUN/TAP device from being reused. - */ -- err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out); -- if (err < 0) { -+ pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out); -+ if (pid < 0) { -+ err = pid; - printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n", - -err); - goto out_close; -@@ -263,7 +264,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out, - goto out_close; - } - -- return err; -+ return pid; - - out_close: - close(fds[1]); -diff --git a/arch/um/drivers/harddog.h b/arch/um/drivers/harddog.h -new file mode 100644 -index 0000000000000..6d9ea60e7133e ---- /dev/null -+++ b/arch/um/drivers/harddog.h -@@ -0,0 +1,9 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+#ifndef UM_WATCHDOG_H -+#define UM_WATCHDOG_H -+ -+int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock); -+void stop_watchdog(int in_fd, int out_fd); -+int ping_watchdog(int fd); -+ -+#endif /* UM_WATCHDOG_H */ -diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c -index e6d4f43deba82..60d1c6cab8a95 100644 ---- a/arch/um/drivers/harddog_kern.c -+++ b/arch/um/drivers/harddog_kern.c -@@ -47,6 +47,7 @@ - #include - #include - #include "mconsole.h" -+#include "harddog.h" - - MODULE_LICENSE("GPL"); - -@@ -60,8 +61,6 @@ static int harddog_out_fd = -1; - * Allow only one person to hold it open - */ - --extern int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock); -- - static int harddog_open(struct inode *inode, struct file *file) - { - int err = -EBUSY; -@@ -92,8 +91,6 @@ err: - return err; - } - --extern void stop_watchdog(int in_fd, int out_fd); -- - static int harddog_release(struct inode *inode, struct file *file) - { - /* -@@ -112,8 +109,6 @@ static int harddog_release(struct inode *inode, struct file *file) - return 0; - } - --extern int ping_watchdog(int fd); -- - static ssize_t harddog_write(struct file *file, const char __user *data, size_t len, - loff_t *ppos) - { -diff --git a/arch/um/drivers/harddog_user.c b/arch/um/drivers/harddog_user.c -index 070468d22e394..9ed89304975ed 100644 ---- a/arch/um/drivers/harddog_user.c -+++ b/arch/um/drivers/harddog_user.c -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include "harddog.h" - - struct dog_data { - int stdin_fd; -diff --git a/arch/um/drivers/harddog_user_exp.c b/arch/um/drivers/harddog_user_exp.c -new file mode 100644 -index 0000000000000..c74d4b815d143 ---- /dev/null -+++ b/arch/um/drivers/harddog_user_exp.c -@@ -0,0 +1,9 @@ -+// SPDX-License-Identifier: GPL-2.0 -+#include -+#include "harddog.h" -+ -+#if IS_MODULE(CONFIG_UML_WATCHDOG) -+EXPORT_SYMBOL(start_watchdog); -+EXPORT_SYMBOL(stop_watchdog); -+EXPORT_SYMBOL(ping_watchdog); -+#endif -diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c -index 8febf95da96e1..02b0befd67632 100644 ---- a/arch/um/drivers/line.c -+++ b/arch/um/drivers/line.c -@@ -139,7 +139,7 @@ static int flush_buffer(struct line *line) - count = line->buffer + LINE_BUFSIZE - line->head; - - n = write_chan(line->chan_out, line->head, count, -- line->driver->write_irq); -+ line->write_irq); - if (n < 0) - return n; - if (n == count) { -@@ -156,7 +156,7 @@ static int flush_buffer(struct line *line) - - count = line->tail - line->head; - n = write_chan(line->chan_out, line->head, count, -- line->driver->write_irq); -+ line->write_irq); - - if (n < 0) - return n; -@@ -195,7 +195,7 @@ int line_write(struct tty_struct *tty, const unsigned char *buf, int len) - ret = buffer_data(line, buf, len); - else { - n = write_chan(line->chan_out, buf, len, -- line->driver->write_irq); -+ line->write_irq); - if (n < 0) { - ret = n; - goto out_up; -@@ -215,7 +215,7 @@ void line_throttle(struct tty_struct *tty) - { - struct line *line = tty->driver_data; - -- deactivate_chan(line->chan_in, line->driver->read_irq); -+ deactivate_chan(line->chan_in, line->read_irq); - line->throttled = 1; - } - -@@ -224,7 +224,7 @@ void line_unthrottle(struct tty_struct *tty) - struct line *line = tty->driver_data; - - line->throttled = 0; -- chan_interrupt(line, line->driver->read_irq); -+ chan_interrupt(line, line->read_irq); - } - - static irqreturn_t line_write_interrupt(int irq, void *data) -@@ -260,19 +260,23 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data) - int err; - - if (input) { -- err = um_request_irq(driver->read_irq, fd, IRQ_READ, -- line_interrupt, IRQF_SHARED, -+ err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_READ, -+ line_interrupt, 0, - driver->read_irq_name, data); - if (err < 0) - return err; -+ -+ line->read_irq = err; - } - - if (output) { -- err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, -- line_write_interrupt, IRQF_SHARED, -+ err = um_request_irq(UM_IRQ_ALLOC, fd, IRQ_WRITE, -+ line_write_interrupt, 0, - driver->write_irq_name, data); - if (err < 0) - return err; -+ -+ line->write_irq = err; - } - - return 0; -diff --git a/arch/um/drivers/line.h b/arch/um/drivers/line.h -index bdb16b96e76fd..f15be75a3bf3b 100644 ---- a/arch/um/drivers/line.h -+++ b/arch/um/drivers/line.h -@@ -23,9 +23,7 @@ struct line_driver { - const short minor_start; - const short type; - const short subtype; -- const int read_irq; - const char *read_irq_name; -- const int write_irq; - const char *write_irq_name; - struct mc_device mc; - struct tty_driver *driver; -@@ -35,6 +33,8 @@ struct line { - struct tty_port port; - int valid; - -+ int read_irq, write_irq; -+ - char *init_str; - struct list_head chan_list; - struct chan *chan_in, *chan_out; -diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c -index 6ead1e2404576..8ca67a6926830 100644 ---- a/arch/um/drivers/mconsole_kern.c -+++ b/arch/um/drivers/mconsole_kern.c -@@ -224,7 +224,7 @@ void mconsole_go(struct mc_request *req) - - void mconsole_stop(struct mc_request *req) - { -- deactivate_fd(req->originating_fd, MCONSOLE_IRQ); -+ block_signals(); - os_set_fd_block(req->originating_fd, 1); - mconsole_reply(req, "stopped", 0, 0); - for (;;) { -@@ -247,6 +247,7 @@ void mconsole_stop(struct mc_request *req) - } - os_set_fd_block(req->originating_fd, 0); - mconsole_reply(req, "", 0, 0); -+ unblock_signals(); - } - - static DEFINE_SPINLOCK(mc_devices_lock); -diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c -index 433a3f8f2ef3e..32b3341fe9707 100644 ---- a/arch/um/drivers/random.c -+++ b/arch/um/drivers/random.c -@@ -28,7 +28,7 @@ - * protects against a module being loaded twice at the same time. - */ - static int random_fd = -1; --static struct hwrng hwrng = { 0, }; -+static struct hwrng hwrng; - static DECLARE_COMPLETION(have_data); - - static int rng_dev_read(struct hwrng *rng, void *buf, size_t max, bool block) -diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c -index 41eae2e8fb652..8514966778d53 100644 ---- a/arch/um/drivers/ssl.c -+++ b/arch/um/drivers/ssl.c -@@ -47,9 +47,7 @@ static struct line_driver driver = { - .minor_start = 64, - .type = TTY_DRIVER_TYPE_SERIAL, - .subtype = 0, -- .read_irq = SSL_IRQ, - .read_irq_name = "ssl", -- .write_irq = SSL_WRITE_IRQ, - .write_irq_name = "ssl-write", - .mc = { - .list = LIST_HEAD_INIT(driver.mc.list), -diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c -index e8b762f4d8c25..489d5a746ed33 100644 ---- a/arch/um/drivers/stdio_console.c -+++ b/arch/um/drivers/stdio_console.c -@@ -53,9 +53,7 @@ static struct line_driver driver = { - .minor_start = 0, - .type = TTY_DRIVER_TYPE_CONSOLE, - .subtype = SYSTEM_TYPE_CONSOLE, -- .read_irq = CONSOLE_IRQ, - .read_irq_name = "console", -- .write_irq = CONSOLE_WRITE_IRQ, - .write_irq_name = "console-write", - .mc = { - .list = LIST_HEAD_INIT(driver.mc.list), -diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c -index cd9dc0556e913..fefd343412c79 100644 ---- a/arch/um/drivers/ubd_kern.c -+++ b/arch/um/drivers/ubd_kern.c -@@ -27,6 +27,7 @@ - #include - #include - #include -+#include - #include - #include - #include -diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c -index cde6db184c26b..45a4bcd27a39b 100644 ---- a/arch/um/drivers/vector_kern.c -+++ b/arch/um/drivers/vector_kern.c -@@ -770,6 +770,7 @@ static int vector_config(char *str, char **error_out) - - if (parsed == NULL) { - *error_out = "vector_config failed to parse parameters"; -+ kfree(params); - return -EINVAL; - } - -diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c -index c080666330234..d762d726b66cf 100644 ---- a/arch/um/drivers/virt-pci.c -+++ b/arch/um/drivers/virt-pci.c -@@ -131,8 +131,11 @@ static int um_pci_send_cmd(struct um_pci_device *dev, - out ? 1 : 0, - posted ? cmd : HANDLE_NO_FREE(cmd), - GFP_ATOMIC); -- if (ret) -+ if (ret) { -+ if (posted) -+ kfree(cmd); - goto out; -+ } - - if (posted) { - virtqueue_kick(dev->cmd_vq); -@@ -181,15 +184,15 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, - /* buf->data is maximum size - we may only use parts of it */ - struct um_pci_message_buffer *buf; - u8 *data; -- unsigned long ret = ~0ULL; -+ unsigned long ret = ULONG_MAX; - - if (!dev) -- return ~0ULL; -+ return ULONG_MAX; - - buf = get_cpu_var(um_pci_msg_bufs); - data = buf->data; - -- memset(data, 0xff, sizeof(data)); -+ memset(buf->data, 0xff, sizeof(buf->data)); - - switch (size) { - case 1: -@@ -304,7 +307,7 @@ static unsigned long um_pci_bar_read(void *priv, unsigned int offset, - /* buf->data is maximum size - we may only use parts of it */ - struct um_pci_message_buffer *buf; - u8 *data; -- unsigned long ret = ~0ULL; -+ unsigned long ret = ULONG_MAX; - - buf = get_cpu_var(um_pci_msg_bufs); - data = buf->data; -@@ -615,22 +618,33 @@ static void um_pci_virtio_remove(struct virtio_device *vdev) - struct um_pci_device *dev = vdev->priv; - int i; - -- /* Stop all virtqueues */ -- vdev->config->reset(vdev); -- vdev->config->del_vqs(vdev); -- - device_set_wakeup_enable(&vdev->dev, false); - - mutex_lock(&um_pci_mtx); - for (i = 0; i < MAX_DEVICES; i++) { - if (um_pci_devices[i].dev != dev) - continue; -+ - um_pci_devices[i].dev = NULL; - irq_free_desc(dev->irq); -+ -+ break; - } - mutex_unlock(&um_pci_mtx); - -- um_pci_rescan(); -+ if (i < MAX_DEVICES) { -+ struct pci_dev *pci_dev; -+ -+ pci_dev = pci_get_slot(bridge->bus, i); -+ if (pci_dev) -+ pci_stop_and_remove_bus_device_locked(pci_dev); -+ } -+ -+ /* Stop all virtqueues */ -+ virtio_reset_device(vdev); -+ dev->cmd_vq = NULL; -+ dev->irq_vq = NULL; -+ vdev->config->del_vqs(vdev); - - kfree(dev); - } -diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c -index d51e445df7976..204e9dfbff1a0 100644 ---- a/arch/um/drivers/virtio_uml.c -+++ b/arch/um/drivers/virtio_uml.c -@@ -21,6 +21,7 @@ - * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd. - */ - #include -+#include - #include - #include - #include -@@ -49,6 +50,7 @@ struct virtio_uml_platform_data { - struct virtio_uml_device { - struct virtio_device vdev; - struct platform_device *pdev; -+ struct virtio_uml_platform_data *pdata; - - spinlock_t sock_lock; - int sock, req_fd, irq; -@@ -61,6 +63,7 @@ struct virtio_uml_device { - - u8 config_changed_irq:1; - uint64_t vq_irq_vq_map; -+ int recv_rc; - }; - - struct virtio_uml_vq_info { -@@ -146,14 +149,6 @@ static int vhost_user_recv(struct virtio_uml_device *vu_dev, - - rc = vhost_user_recv_header(fd, msg); - -- if (rc == -ECONNRESET && vu_dev->registered) { -- struct virtio_uml_platform_data *pdata; -- -- pdata = vu_dev->pdev->dev.platform_data; -- -- virtio_break_device(&vu_dev->vdev); -- schedule_work(&pdata->conn_broken_wk); -- } - if (rc) - return rc; - size = msg->header.size; -@@ -162,6 +157,22 @@ static int vhost_user_recv(struct virtio_uml_device *vu_dev, - return full_read(fd, &msg->payload, size, false); - } - -+static void vhost_user_check_reset(struct virtio_uml_device *vu_dev, -+ int rc) -+{ -+ struct virtio_uml_platform_data *pdata = vu_dev->pdata; -+ -+ if (rc != -ECONNRESET) -+ return; -+ -+ if (!vu_dev->registered) -+ return; -+ -+ vu_dev->registered = 0; -+ -+ schedule_work(&pdata->conn_broken_wk); -+} -+ - static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev, - struct vhost_user_msg *msg, - size_t max_payload_size) -@@ -169,8 +180,10 @@ static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev, - int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg, - max_payload_size, true); - -- if (rc) -+ if (rc) { -+ vhost_user_check_reset(vu_dev, rc); - return rc; -+ } - - if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION)) - return -EPROTO; -@@ -367,6 +380,7 @@ static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev, - sizeof(msg.msg.payload) + - sizeof(msg.extra_payload)); - -+ vu_dev->recv_rc = rc; - if (rc) - return IRQ_NONE; - -@@ -410,7 +424,9 @@ static irqreturn_t vu_req_interrupt(int irq, void *data) - if (!um_irq_timetravel_handler_used()) - ret = vu_req_read_message(vu_dev, NULL); - -- if (vu_dev->vq_irq_vq_map) { -+ if (vu_dev->recv_rc) { -+ vhost_user_check_reset(vu_dev, vu_dev->recv_rc); -+ } else if (vu_dev->vq_irq_vq_map) { - struct virtqueue *vq; - - virtio_device_for_each_vq((&vu_dev->vdev), vq) { -@@ -1090,6 +1106,8 @@ static void virtio_uml_release_dev(struct device *d) - container_of(d, struct virtio_device, dev); - struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); - -+ time_travel_propagate_time(); -+ - /* might not have been opened due to not negotiating the feature */ - if (vu_dev->req_fd >= 0) { - um_free_irq(vu_dev->irq, vu_dev); -@@ -1113,21 +1131,72 @@ void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev, - no_vq_suspend ? "dis" : "en"); - } - -+static void vu_of_conn_broken(struct work_struct *wk) -+{ -+ struct virtio_uml_platform_data *pdata; -+ struct virtio_uml_device *vu_dev; -+ -+ pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk); -+ -+ vu_dev = platform_get_drvdata(pdata->pdev); -+ -+ virtio_break_device(&vu_dev->vdev); -+ -+ /* -+ * We can't remove the device from the devicetree so the only thing we -+ * can do is warn. -+ */ -+ WARN_ON(1); -+} -+ - /* Platform device */ - -+static struct virtio_uml_platform_data * -+virtio_uml_create_pdata(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct virtio_uml_platform_data *pdata; -+ int ret; -+ -+ if (!np) -+ return ERR_PTR(-EINVAL); -+ -+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); -+ if (!pdata) -+ return ERR_PTR(-ENOMEM); -+ -+ INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken); -+ pdata->pdev = pdev; -+ -+ ret = of_property_read_string(np, "socket-path", &pdata->socket_path); -+ if (ret) -+ return ERR_PTR(ret); -+ -+ ret = of_property_read_u32(np, "virtio-device-id", -+ &pdata->virtio_device_id); -+ if (ret) -+ return ERR_PTR(ret); -+ -+ return pdata; -+} -+ - static int virtio_uml_probe(struct platform_device *pdev) - { - struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; - struct virtio_uml_device *vu_dev; - int rc; - -- if (!pdata) -- return -EINVAL; -+ if (!pdata) { -+ pdata = virtio_uml_create_pdata(pdev); -+ if (IS_ERR(pdata)) -+ return PTR_ERR(pdata); -+ } - - vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL); - if (!vu_dev) - return -ENOMEM; - -+ vu_dev->pdata = pdata; - vu_dev->vdev.dev.parent = &pdev->dev; - vu_dev->vdev.dev.release = virtio_uml_release_dev; - vu_dev->vdev.config = &virtio_uml_config_ops; -@@ -1136,6 +1205,8 @@ static int virtio_uml_probe(struct platform_device *pdev) - vu_dev->pdev = pdev; - vu_dev->req_fd = -1; - -+ time_travel_propagate_time(); -+ - do { - rc = os_connect_socket(pdata->socket_path); - } while (rc == -EINTR); -@@ -1201,8 +1272,14 @@ static int vu_unregister_cmdline_device(struct device *dev, void *data) - static void vu_conn_broken(struct work_struct *wk) - { - struct virtio_uml_platform_data *pdata; -+ struct virtio_uml_device *vu_dev; - - pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk); -+ -+ vu_dev = platform_get_drvdata(pdata->pdev); -+ -+ virtio_break_device(&vu_dev->vdev); -+ - vu_unregister_cmdline_device(&pdata->pdev->dev, NULL); - } - -diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild -index e5a7b552bb384..a8c763c296b48 100644 ---- a/arch/um/include/asm/Kbuild -+++ b/arch/um/include/asm/Kbuild -@@ -4,6 +4,7 @@ generic-y += bug.h - generic-y += compat.h - generic-y += current.h - generic-y += device.h -+generic-y += dma-mapping.h - generic-y += emergency-restart.h - generic-y += exec.h - generic-y += extable.h -diff --git a/arch/um/include/asm/archrandom.h b/arch/um/include/asm/archrandom.h -new file mode 100644 -index 0000000000000..2f24cb96391d7 ---- /dev/null -+++ b/arch/um/include/asm/archrandom.h -@@ -0,0 +1,30 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+#ifndef __ASM_UM_ARCHRANDOM_H__ -+#define __ASM_UM_ARCHRANDOM_H__ -+ -+#include -+ -+/* This is from , but better not to #include that in a global header here. */ -+ssize_t os_getrandom(void *buf, size_t len, unsigned int flags); -+ -+static inline bool __must_check arch_get_random_long(unsigned long *v) -+{ -+ return os_getrandom(v, sizeof(*v), 0) == sizeof(*v); -+} -+ -+static inline bool __must_check arch_get_random_int(unsigned int *v) -+{ -+ return os_getrandom(v, sizeof(*v), 0) == sizeof(*v); -+} -+ -+static inline bool __must_check arch_get_random_seed_long(unsigned long *v) -+{ -+ return false; -+} -+ -+static inline bool __must_check arch_get_random_seed_int(unsigned int *v) -+{ -+ return false; -+} -+ -+#endif -diff --git a/arch/um/include/asm/bugs.h b/arch/um/include/asm/bugs.h -deleted file mode 100644 -index 4473942a08397..0000000000000 ---- a/arch/um/include/asm/bugs.h -+++ /dev/null -@@ -1,7 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 */ --#ifndef __UM_BUGS_H --#define __UM_BUGS_H -- --void check_bugs(void); -- --#endif -diff --git a/arch/um/include/asm/delay.h b/arch/um/include/asm/delay.h -index 56fc2b8f2dd01..e79b2ab6f40c8 100644 ---- a/arch/um/include/asm/delay.h -+++ b/arch/um/include/asm/delay.h -@@ -14,7 +14,7 @@ static inline void um_ndelay(unsigned long nsecs) - ndelay(nsecs); - } - #undef ndelay --#define ndelay um_ndelay -+#define ndelay(n) um_ndelay(n) - - static inline void um_udelay(unsigned long usecs) - { -@@ -26,5 +26,5 @@ static inline void um_udelay(unsigned long usecs) - udelay(usecs); - } - #undef udelay --#define udelay um_udelay -+#define udelay(n) um_udelay(n) - #endif /* __UM_DELAY_H */ -diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h -index e187c789369d3..749dfe8512e84 100644 ---- a/arch/um/include/asm/irq.h -+++ b/arch/um/include/asm/irq.h -@@ -4,19 +4,15 @@ - - #define TIMER_IRQ 0 - #define UMN_IRQ 1 --#define CONSOLE_IRQ 2 --#define CONSOLE_WRITE_IRQ 3 --#define UBD_IRQ 4 --#define UM_ETH_IRQ 5 --#define SSL_IRQ 6 --#define SSL_WRITE_IRQ 7 --#define ACCEPT_IRQ 8 --#define MCONSOLE_IRQ 9 --#define WINCH_IRQ 10 --#define SIGIO_WRITE_IRQ 11 --#define TELNETD_IRQ 12 --#define XTERM_IRQ 13 --#define RANDOM_IRQ 14 -+#define UBD_IRQ 2 -+#define UM_ETH_IRQ 3 -+#define ACCEPT_IRQ 4 -+#define MCONSOLE_IRQ 5 -+#define WINCH_IRQ 6 -+#define SIGIO_WRITE_IRQ 7 -+#define TELNETD_IRQ 8 -+#define XTERM_IRQ 9 -+#define RANDOM_IRQ 10 - - #ifdef CONFIG_UML_NET_VECTOR - -diff --git a/arch/um/include/asm/irqflags.h b/arch/um/include/asm/irqflags.h -index dab5744e9253d..1e69ef5bc35e0 100644 ---- a/arch/um/include/asm/irqflags.h -+++ b/arch/um/include/asm/irqflags.h -@@ -3,7 +3,7 @@ - #define __UM_IRQFLAGS_H - - extern int signals_enabled; --int set_signals(int enable); -+int um_set_signals(int enable); - void block_signals(void); - void unblock_signals(void); - -@@ -16,7 +16,7 @@ static inline unsigned long arch_local_save_flags(void) - #define arch_local_irq_restore arch_local_irq_restore - static inline void arch_local_irq_restore(unsigned long flags) - { -- set_signals(flags); -+ um_set_signals(flags); - } - - #define arch_local_irq_enable arch_local_irq_enable -diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h -index 3b1cb8b3b1864..e610e932cfe1e 100644 ---- a/arch/um/include/asm/thread_info.h -+++ b/arch/um/include/asm/thread_info.h -@@ -64,6 +64,7 @@ static inline struct thread_info *current_thread_info(void) - #define TIF_RESTORE_SIGMASK 7 - #define TIF_NOTIFY_RESUME 8 - #define TIF_SECCOMP 9 /* secure computing */ -+#define TIF_SINGLESTEP 10 /* single stepping userspace */ - - #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) - #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) -@@ -72,5 +73,6 @@ static inline struct thread_info *current_thread_info(void) - #define _TIF_MEMDIE (1 << TIF_MEMDIE) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) - #define _TIF_SECCOMP (1 << TIF_SECCOMP) -+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) - - #endif -diff --git a/arch/um/include/asm/timex.h b/arch/um/include/asm/timex.h -index e392a9a5bc9bd..9f27176adb26d 100644 ---- a/arch/um/include/asm/timex.h -+++ b/arch/um/include/asm/timex.h -@@ -2,13 +2,8 @@ - #ifndef __UM_TIMEX_H - #define __UM_TIMEX_H - --typedef unsigned long cycles_t; -- --static inline cycles_t get_cycles (void) --{ -- return 0; --} -- - #define CLOCK_TICK_RATE (HZ) - -+#include -+ - #endif -diff --git a/arch/um/include/asm/xor.h b/arch/um/include/asm/xor.h -index f512704a9ec7b..647fae200c5d3 100644 ---- a/arch/um/include/asm/xor.h -+++ b/arch/um/include/asm/xor.h -@@ -4,8 +4,10 @@ - - #ifdef CONFIG_64BIT - #undef CONFIG_X86_32 -+#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_sse_pf64)) - #else - #define CONFIG_X86_32 1 -+#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_8regs)) - #endif - - #include -@@ -16,7 +18,7 @@ - #undef XOR_SELECT_TEMPLATE - /* pick an arbitrary one - measuring isn't possible with inf-cpu */ - #define XOR_SELECT_TEMPLATE(x) \ -- (time_travel_mode == TT_MODE_INFCPU ? &xor_block_8regs : NULL) -+ (time_travel_mode == TT_MODE_INFCPU ? TT_CPU_INF_XOR_DEFAULT : x) - #endif - - #endif -diff --git a/arch/um/include/shared/longjmp.h b/arch/um/include/shared/longjmp.h -index bdb2869b72b31..8863319039f3d 100644 ---- a/arch/um/include/shared/longjmp.h -+++ b/arch/um/include/shared/longjmp.h -@@ -18,7 +18,7 @@ extern void longjmp(jmp_buf, int); - enable = *(volatile int *)&signals_enabled; \ - n = setjmp(*buf); \ - if(n != 0) \ -- set_signals_trace(enable); \ -+ um_set_signals_trace(enable); \ - n; }) - - #endif -diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h -index 96d400387c93e..90e9c9f86f15c 100644 ---- a/arch/um/include/shared/os.h -+++ b/arch/um/include/shared/os.h -@@ -11,6 +11,12 @@ - #include - #include - #include -+/* This is to get size_t */ -+#ifndef __UM_HOST__ -+#include -+#else -+#include -+#endif - - #define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR)) - -@@ -238,8 +244,8 @@ extern void send_sigio_to_self(void); - extern int change_sig(int signal, int on); - extern void block_signals(void); - extern void unblock_signals(void); --extern int set_signals(int enable); --extern int set_signals_trace(int enable); -+extern int um_set_signals(int enable); -+extern int um_set_signals_trace(int enable); - extern int os_is_signal_stack(void); - extern void deliver_alarm(void); - extern void register_pm_wake_signal(void); -@@ -252,6 +258,7 @@ extern void stack_protections(unsigned long address); - extern int raw(int fd); - extern void setup_machinename(char *machine_out); - extern void setup_hostinfo(char *buf, int len); -+extern ssize_t os_getrandom(void *buf, size_t len, unsigned int flags); - extern void os_dump_core(void) __attribute__ ((noreturn)); - extern void um_early_printk(const char *s, unsigned int n); - extern void os_fix_helper_signals(void); -diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h -index 0c50fa6e8a55b..fbb709a222839 100644 ---- a/arch/um/include/shared/registers.h -+++ b/arch/um/include/shared/registers.h -@@ -16,8 +16,8 @@ extern int restore_fp_registers(int pid, unsigned long *fp_regs); - extern int save_fpx_registers(int pid, unsigned long *fp_regs); - extern int restore_fpx_registers(int pid, unsigned long *fp_regs); - extern int save_registers(int pid, struct uml_pt_regs *regs); --extern int restore_registers(int pid, struct uml_pt_regs *regs); --extern int init_registers(int pid); -+extern int restore_pid_registers(int pid, struct uml_pt_regs *regs); -+extern int init_pid_registers(int pid); - extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs); - extern unsigned long get_thread_reg(int reg, jmp_buf *buf); - extern int get_fp_registers(int pid, unsigned long *regs); -diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c -index 4d84981003419..335dcb2d63e78 100644 ---- a/arch/um/kernel/exec.c -+++ b/arch/um/kernel/exec.c -@@ -42,7 +42,7 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) - { - PT_REGS_IP(regs) = eip; - PT_REGS_SP(regs) = esp; -- current->ptrace &= ~PT_DTRACE; -+ clear_thread_flag(TIF_SINGLESTEP); - #ifdef SUBARCH_EXECVE1 - SUBARCH_EXECVE1(regs->regs); - #endif -diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c -index b1e5634398d09..3a85bde3e1734 100644 ---- a/arch/um/kernel/ksyms.c -+++ b/arch/um/kernel/ksyms.c -@@ -6,7 +6,7 @@ - #include - #include - --EXPORT_SYMBOL(set_signals); -+EXPORT_SYMBOL(um_set_signals); - EXPORT_SYMBOL(signals_enabled); - - EXPORT_SYMBOL(os_stat_fd); -diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c -index 457a38db368b7..b3fbfca494006 100644 ---- a/arch/um/kernel/process.c -+++ b/arch/um/kernel/process.c -@@ -339,7 +339,7 @@ int singlestepping(void * t) - { - struct task_struct *task = t ? t : current; - -- if (!(task->ptrace & PT_DTRACE)) -+ if (!test_thread_flag(TIF_SINGLESTEP)) - return 0; - - if (task->thread.singlestep_syscall) -diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c -index b425f47bddbb3..d37802ced5636 100644 ---- a/arch/um/kernel/ptrace.c -+++ b/arch/um/kernel/ptrace.c -@@ -12,7 +12,7 @@ - - void user_enable_single_step(struct task_struct *child) - { -- child->ptrace |= PT_DTRACE; -+ set_tsk_thread_flag(child, TIF_SINGLESTEP); - child->thread.singlestep_syscall = 0; - - #ifdef SUBARCH_SET_SINGLESTEPPING -@@ -22,7 +22,7 @@ void user_enable_single_step(struct task_struct *child) - - void user_disable_single_step(struct task_struct *child) - { -- child->ptrace &= ~PT_DTRACE; -+ clear_tsk_thread_flag(child, TIF_SINGLESTEP); - child->thread.singlestep_syscall = 0; - - #ifdef SUBARCH_SET_SINGLESTEPPING -@@ -121,7 +121,7 @@ static void send_sigtrap(struct uml_pt_regs *regs, int error_code) - } - - /* -- * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and -+ * XXX Check TIF_SINGLESTEP for singlestepping check and - * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check - */ - int syscall_trace_enter(struct pt_regs *regs) -@@ -145,7 +145,7 @@ void syscall_trace_leave(struct pt_regs *regs) - audit_syscall_exit(regs); - - /* Fake a debug trap */ -- if (ptraced & PT_DTRACE) -+ if (test_thread_flag(TIF_SINGLESTEP)) - send_sigtrap(®s->regs, 0); - - if (!test_thread_flag(TIF_SYSCALL_TRACE)) -diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c -index 88cd9b5c1b744..ae4658f576ab7 100644 ---- a/arch/um/kernel/signal.c -+++ b/arch/um/kernel/signal.c -@@ -53,7 +53,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) - unsigned long sp; - int err; - -- if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) -+ if (test_thread_flag(TIF_SINGLESTEP) && (current->ptrace & PT_PTRACED)) - singlestep = 1; - - /* Did we come from a system call? */ -@@ -128,7 +128,7 @@ void do_signal(struct pt_regs *regs) - * on the host. The tracing thread will check this flag and - * PTRACE_SYSCALL if necessary. - */ -- if (current->ptrace & PT_DTRACE) -+ if (test_thread_flag(TIF_SINGLESTEP)) - current->thread.singlestep_syscall = - is_syscall(PT_REGS_IP(¤t->thread.regs)); - -diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c -index 3198c47673879..c32efb09db214 100644 ---- a/arch/um/kernel/trap.c -+++ b/arch/um/kernel/trap.c -@@ -158,7 +158,7 @@ static void bad_segv(struct faultinfo fi, unsigned long ip) - - void fatal_sigsegv(void) - { -- force_sigsegv(SIGSEGV); -+ force_fatal_sig(SIGSEGV); - do_signal(¤t->thread.regs); - /* - * This is to tell gcc that we're not returning - do_signal -diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c -index a149a5e9a16a1..748595b054c44 100644 ---- a/arch/um/kernel/um_arch.c -+++ b/arch/um/kernel/um_arch.c -@@ -3,6 +3,7 @@ - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) - */ - -+#include - #include - #include - #include -@@ -16,6 +17,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -30,7 +32,7 @@ - #include - - #define DEFAULT_COMMAND_LINE_ROOT "root=98:0" --#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty" -+#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0" - - /* Changed in add_arg and setup_arch, which run before SMP is started */ - static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 }; -@@ -93,7 +95,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) - - static void *c_start(struct seq_file *m, loff_t *pos) - { -- return *pos < NR_CPUS ? cpu_data + *pos : NULL; -+ return *pos < nr_cpu_ids ? cpu_data + *pos : NULL; - } - - static void *c_next(struct seq_file *m, void *v, loff_t *pos) -@@ -404,6 +406,8 @@ int __init __weak read_initrd(void) - - void __init setup_arch(char **cmdline_p) - { -+ u8 rng_seed[32]; -+ - stack_protections((unsigned long) &init_thread_info); - setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); - mem_total_pages(physmem_size, iomem_size, highmem); -@@ -413,14 +417,27 @@ void __init setup_arch(char **cmdline_p) - strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); - *cmdline_p = command_line; - setup_hostinfo(host_info, sizeof host_info); -+ -+ if (os_getrandom(rng_seed, sizeof(rng_seed), 0) == sizeof(rng_seed)) { -+ add_bootloader_randomness(rng_seed, sizeof(rng_seed)); -+ memzero_explicit(rng_seed, sizeof(rng_seed)); -+ } - } - --void __init check_bugs(void) -+void __init arch_cpu_finalize_init(void) - { - arch_check_bugs(); - os_check_bugs(); - } - -+void apply_retpolines(s32 *start, s32 *end) -+{ -+} -+ -+void apply_returns(s32 *start, s32 *end) -+{ -+} -+ - void apply_alternatives(struct alt_instr *start, struct alt_instr *end) - { - } -diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S -index 16e49bfa2b426..53d719c04ba94 100644 ---- a/arch/um/kernel/vmlinux.lds.S -+++ b/arch/um/kernel/vmlinux.lds.S -@@ -1,4 +1,4 @@ -- -+#define RUNTIME_DISCARD_EXIT - KERNEL_STACK_SIZE = 4096 * (1 << CONFIG_KERNEL_STACK_ORDER); - - #ifdef CONFIG_LD_SCRIPT_STATIC -diff --git a/arch/um/os-Linux/registers.c b/arch/um/os-Linux/registers.c -index 2d9270508e156..b123955be7acc 100644 ---- a/arch/um/os-Linux/registers.c -+++ b/arch/um/os-Linux/registers.c -@@ -21,7 +21,7 @@ int save_registers(int pid, struct uml_pt_regs *regs) - return 0; - } - --int restore_registers(int pid, struct uml_pt_regs *regs) -+int restore_pid_registers(int pid, struct uml_pt_regs *regs) - { - int err; - -@@ -36,7 +36,7 @@ int restore_registers(int pid, struct uml_pt_regs *regs) - static unsigned long exec_regs[MAX_REG_NR]; - static unsigned long exec_fp_regs[FP_SIZE]; - --int init_registers(int pid) -+int init_pid_registers(int pid) - { - int err; - -diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c -index 6597ea1986ffa..9e71794839e87 100644 ---- a/arch/um/os-Linux/sigio.c -+++ b/arch/um/os-Linux/sigio.c -@@ -132,7 +132,7 @@ static void update_thread(void) - int n; - char c; - -- flags = set_signals_trace(0); -+ flags = um_set_signals_trace(0); - CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c))); - if (n != sizeof(c)) { - printk(UM_KERN_ERR "update_thread : write failed, err = %d\n", -@@ -147,7 +147,7 @@ static void update_thread(void) - goto fail; - } - -- set_signals_trace(flags); -+ um_set_signals_trace(flags); - return; - fail: - /* Critical section start */ -@@ -161,7 +161,7 @@ static void update_thread(void) - close(write_sigio_fds[0]); - close(write_sigio_fds[1]); - /* Critical section end */ -- set_signals_trace(flags); -+ um_set_signals_trace(flags); - } - - int __add_sigio_fd(int fd) -diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c -index 6cf098c23a394..24a403a70a020 100644 ---- a/arch/um/os-Linux/signal.c -+++ b/arch/um/os-Linux/signal.c -@@ -94,7 +94,7 @@ void sig_handler(int sig, struct siginfo *si, mcontext_t *mc) - - sig_handler_common(sig, si, mc); - -- set_signals_trace(enabled); -+ um_set_signals_trace(enabled); - } - - static void timer_real_alarm_handler(mcontext_t *mc) -@@ -126,7 +126,7 @@ void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc) - - signals_active &= ~SIGALRM_MASK; - -- set_signals_trace(enabled); -+ um_set_signals_trace(enabled); - } - - void deliver_alarm(void) { -@@ -348,7 +348,7 @@ void unblock_signals(void) - } - } - --int set_signals(int enable) -+int um_set_signals(int enable) - { - int ret; - if (signals_enabled == enable) -@@ -362,7 +362,7 @@ int set_signals(int enable) - return ret; - } - --int set_signals_trace(int enable) -+int um_set_signals_trace(int enable) - { - int ret; - if (signals_enabled == enable) -diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c -index 87d3129e7362e..0df2ebcc97c0d 100644 ---- a/arch/um/os-Linux/skas/process.c -+++ b/arch/um/os-Linux/skas/process.c -@@ -5,6 +5,7 @@ - */ - - #include -+#include - #include - #include - #include -@@ -707,10 +708,24 @@ void halt_skas(void) - UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT); - } - -+static bool noreboot; -+ -+static int __init noreboot_cmd_param(char *str, int *add) -+{ -+ noreboot = true; -+ return 0; -+} -+ -+__uml_setup("noreboot", noreboot_cmd_param, -+"noreboot\n" -+" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n" -+" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n" -+" crashes in CI\n"); -+ - void reboot_skas(void) - { - block_signals_trace(); -- UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT); -+ UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT); - } - - void __switch_mm(struct mm_id *mm_idp) -diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c -index 8a72c99994eb1..e3ee4db58b40d 100644 ---- a/arch/um/os-Linux/start_up.c -+++ b/arch/um/os-Linux/start_up.c -@@ -368,7 +368,7 @@ void __init os_early_checks(void) - check_tmpexec(); - - pid = start_ptraced_child(); -- if (init_registers(pid)) -+ if (init_pid_registers(pid)) - fatal("Failed to initialize default registers"); - stop_ptraced_child(pid, 1, 1); - } -diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c -index 41297ec404bf9..fc0f2a9dee5af 100644 ---- a/arch/um/os-Linux/util.c -+++ b/arch/um/os-Linux/util.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -96,6 +97,11 @@ static inline void __attribute__ ((noreturn)) uml_abort(void) - exit(127); - } - -+ssize_t os_getrandom(void *buf, size_t len, unsigned int flags) -+{ -+ return getrandom(buf, len, flags); -+} -+ - /* - * UML helper threads must not handle SIGWINCH/INT/TERM - */ -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index d9830e7e1060f..cfb1edd25437d 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -68,6 +68,7 @@ config X86 - select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE - select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI - select ARCH_HAS_CACHE_LINE_SIZE -+ select ARCH_HAS_CPU_FINALIZE_INIT - select ARCH_HAS_DEBUG_VIRTUAL - select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE - select ARCH_HAS_DEVMEM_IS_ALLOWED -@@ -260,6 +261,7 @@ config X86 - select SYSCTL_EXCEPTION_TRACE - select THREAD_INFO_IN_TASK - select TRACE_IRQFLAGS_SUPPORT -+ select TRACE_IRQFLAGS_NMI_SUPPORT - select USER_STACKTRACE_SUPPORT - select VIRT_TO_BUS - select HAVE_ARCH_KCSAN if X86_64 -@@ -459,15 +461,6 @@ config GOLDFISH - def_bool y - depends on X86_GOLDFISH - --config RETPOLINE -- bool "Avoid speculative indirect branches in kernel" -- default y -- help -- Compile kernel with the retpoline compiler options to guard against -- kernel-to-user data leaks by avoiding speculative indirect -- branches. Requires a compiler with -mindirect-branch=thunk-extern -- support for full protection. The kernel may run slower. -- - config X86_CPU_RESCTRL - bool "x86 CPU resource control support" - depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) -@@ -1256,7 +1249,8 @@ config TOSHIBA - - config I8K - tristate "Dell i8k legacy laptop support" -- select HWMON -+ depends on HWMON -+ depends on PROC_FS - select SENSORS_DELL_SMM - help - This option enables legacy /proc/i8k userspace interface in hwmon -@@ -1310,7 +1304,7 @@ config MICROCODE - - config MICROCODE_INTEL - bool "Intel microcode loading support" -- depends on MICROCODE -+ depends on CPU_SUP_INTEL && MICROCODE - default MICROCODE - help - This options enables microcode patch loading support for Intel -@@ -1322,22 +1316,21 @@ config MICROCODE_INTEL - - config MICROCODE_AMD - bool "AMD microcode loading support" -- depends on MICROCODE -+ depends on CPU_SUP_AMD && MICROCODE - help - If you select this option, microcode patch loading support for AMD - processors will be enabled. - --config MICROCODE_OLD_INTERFACE -- bool "Ancient loading interface (DEPRECATED)" -+config MICROCODE_LATE_LOADING -+ bool "Late microcode loading (DANGEROUS)" - default n - depends on MICROCODE - help -- DO NOT USE THIS! This is the ancient /dev/cpu/microcode interface -- which was used by userspace tools like iucode_tool and microcode.ctl. -- It is inadequate because it runs too late to be able to properly -- load microcode on a machine and it needs special tools. Instead, you -- should've switched to the early loading method with the initrd or -- builtin microcode by now: Documentation/x86/microcode.rst -+ Loading microcode late, when the system is up and executing instructions -+ is a tricky business and should be avoided if possible. Just the sequence -+ of synchronizing all cores and SMT threads is one fragile dance which does -+ not guarantee that cores might not softlock after the loading. Therefore, -+ use this at your own risk. Late loading taints the kernel too. - - config X86_MSR - tristate "/dev/cpu/*/msr - Model-specific register support" -@@ -1518,6 +1511,7 @@ config AMD_MEM_ENCRYPT - select ARCH_HAS_FORCE_DMA_UNENCRYPTED - select INSTRUCTION_DECODER - select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS -+ select ARCH_HAS_CC_PLATFORM - help - Say yes to enable support for the encryption of system memory. - This requires an AMD processor that supports Secure Memory -@@ -1917,6 +1911,7 @@ config EFI - depends on ACPI - select UCS2_STRING - select EFI_RUNTIME_WRAPPERS -+ select ARCH_USE_MEMREMAP_PROT - help - This enables the kernel to use EFI runtime services that are - available (such as the EFI variable services). -@@ -1931,7 +1926,6 @@ config EFI - config EFI_STUB - bool "EFI stub support" - depends on EFI && !X86_USE_3DNOW -- depends on $(cc-option,-mabi=ms) || X86_32 - select RELOCATABLE - help - This kernel feature allows a bzImage to be loaded directly -@@ -2392,6 +2386,114 @@ source "kernel/livepatch/Kconfig" - - endmenu - -+config CC_HAS_SLS -+ def_bool $(cc-option,-mharden-sls=all) -+ -+config CC_HAS_RETURN_THUNK -+ def_bool $(cc-option,-mfunction-return=thunk-extern) -+ -+menuconfig SPECULATION_MITIGATIONS -+ bool "Mitigations for speculative execution vulnerabilities" -+ default y -+ help -+ Say Y here to enable options which enable mitigations for -+ speculative execution hardware vulnerabilities. -+ -+ If you say N, all mitigations will be disabled. You really -+ should know what you are doing to say so. -+ -+if SPECULATION_MITIGATIONS -+ -+config PAGE_TABLE_ISOLATION -+ bool "Remove the kernel mapping in user mode" -+ default y -+ depends on (X86_64 || X86_PAE) -+ help -+ This feature reduces the number of hardware side channels by -+ ensuring that the majority of kernel addresses are not mapped -+ into userspace. -+ -+ See Documentation/x86/pti.rst for more details. -+ -+config RETPOLINE -+ bool "Avoid speculative indirect branches in kernel" -+ default y -+ help -+ Compile kernel with the retpoline compiler options to guard against -+ kernel-to-user data leaks by avoiding speculative indirect -+ branches. Requires a compiler with -mindirect-branch=thunk-extern -+ support for full protection. The kernel may run slower. -+ -+config RETHUNK -+ bool "Enable return-thunks" -+ depends on RETPOLINE && CC_HAS_RETURN_THUNK -+ default y if X86_64 -+ help -+ Compile the kernel with the return-thunks compiler option to guard -+ against kernel-to-user data leaks by avoiding return speculation. -+ Requires a compiler with -mfunction-return=thunk-extern -+ support for full protection. The kernel may run slower. -+ -+config CPU_UNRET_ENTRY -+ bool "Enable UNRET on kernel entry" -+ depends on CPU_SUP_AMD && RETHUNK && X86_64 -+ default y -+ help -+ Compile the kernel with support for the retbleed=unret mitigation. -+ -+config CPU_IBPB_ENTRY -+ bool "Enable IBPB on kernel entry" -+ depends on CPU_SUP_AMD && X86_64 -+ default y -+ help -+ Compile the kernel with support for the retbleed=ibpb mitigation. -+ -+config CPU_IBRS_ENTRY -+ bool "Enable IBRS on kernel entry" -+ depends on CPU_SUP_INTEL && X86_64 -+ default y -+ help -+ Compile the kernel with support for the spectre_v2=ibrs mitigation. -+ This mitigates both spectre_v2 and retbleed at great cost to -+ performance. -+ -+config CPU_SRSO -+ bool "Mitigate speculative RAS overflow on AMD" -+ depends on CPU_SUP_AMD && X86_64 && RETHUNK -+ default y -+ help -+ Enable the SRSO mitigation needed on AMD Zen1-4 machines. -+ -+config SLS -+ bool "Mitigate Straight-Line-Speculation" -+ depends on CC_HAS_SLS && X86_64 -+ default n -+ help -+ Compile the kernel with straight-line-speculation options to guard -+ against straight line speculation. The kernel image might be slightly -+ larger. -+ -+config GDS_FORCE_MITIGATION -+ bool "Force GDS Mitigation" -+ depends on CPU_SUP_INTEL -+ default n -+ help -+ Gather Data Sampling (GDS) is a hardware vulnerability which allows -+ unprivileged speculative access to data which was previously stored in -+ vector registers. -+ -+ This option is equivalent to setting gather_data_sampling=force on the -+ command line. The microcode mitigation is used if present, otherwise -+ AVX is disabled as a mitigation. On affected systems that are missing -+ the microcode any userspace code that unconditionally uses AVX will -+ break with this option set. -+ -+ Setting this option on systems not vulnerable to GDS has no effect. -+ -+ If in doubt, say N. -+ -+endif -+ - config ARCH_HAS_ADD_PAGES - def_bool y - depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG -@@ -2795,6 +2897,11 @@ config IA32_AOUT - config X86_X32 - bool "x32 ABI for 64-bit mode" - depends on X86_64 -+ # llvm-objcopy does not convert x86_64 .note.gnu.property or -+ # compressed debug sections to x86_x32 properly: -+ # https://github.com/ClangBuiltLinux/linux/issues/514 -+ # https://github.com/ClangBuiltLinux/linux/issues/1141 -+ depends on $(success,$(OBJCOPY) --version | head -n1 | grep -qv llvm) - help - Include code to run binaries for the x32 native 32-bit ABI - for 64-bit processors. An x32 process gets access to the -diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug -index d3a6f74a94bdf..d4d6db4dde220 100644 ---- a/arch/x86/Kconfig.debug -+++ b/arch/x86/Kconfig.debug -@@ -1,8 +1,5 @@ - # SPDX-License-Identifier: GPL-2.0 - --config TRACE_IRQFLAGS_NMI_SUPPORT -- def_bool y -- - config EARLY_PRINTK_USB - bool - -diff --git a/arch/x86/Makefile b/arch/x86/Makefile -index 7488cfbbd2f60..9c09bbd390cec 100644 ---- a/arch/x86/Makefile -+++ b/arch/x86/Makefile -@@ -24,7 +24,7 @@ endif - - # How to compile the 16-bit code. Note we always compile for -march=i386; - # that way we can complain to the user if the CPU is insufficient. --REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \ -+REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \ - -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ - -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ - -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) -@@ -179,6 +179,10 @@ ifdef CONFIG_RETPOLINE - endif - endif - -+ifdef CONFIG_SLS -+ KBUILD_CFLAGS += -mharden-sls=all -+endif -+ - KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE) - - ifdef CONFIG_LTO_CLANG -diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile -index b5aecb524a8aa..ffec8bb01ba8c 100644 ---- a/arch/x86/boot/Makefile -+++ b/arch/x86/boot/Makefile -@@ -103,7 +103,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE - AFLAGS_header.o += -I$(objtree)/$(obj) - $(obj)/header.o: $(obj)/zoffset.h - --LDFLAGS_setup.elf := -m elf_i386 -T -+LDFLAGS_setup.elf := -m elf_i386 -z noexecstack -T - $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE - $(call if_changed,ld) - -diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S -index 5521ea12f44e0..aa9b964575843 100644 ---- a/arch/x86/boot/bioscall.S -+++ b/arch/x86/boot/bioscall.S -@@ -32,7 +32,7 @@ intcall: - movw %dx, %si - movw %sp, %di - movw $11, %cx -- rep; movsd -+ rep; movsl - - /* Pop full state from the stack */ - popal -@@ -67,7 +67,7 @@ intcall: - jz 4f - movw %sp, %si - movw $11, %cx -- rep; movsd -+ rep; movsl - 4: addw $44, %sp - - /* Restore state and return */ -diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h -index 34c9dbb6a47d6..686a9d75a0e41 100644 ---- a/arch/x86/boot/boot.h -+++ b/arch/x86/boot/boot.h -@@ -110,66 +110,78 @@ typedef unsigned int addr_t; - - static inline u8 rdfs8(addr_t addr) - { -+ u8 *ptr = (u8 *)absolute_pointer(addr); - u8 v; -- asm volatile("movb %%fs:%1,%0" : "=q" (v) : "m" (*(u8 *)addr)); -+ asm volatile("movb %%fs:%1,%0" : "=q" (v) : "m" (*ptr)); - return v; - } - static inline u16 rdfs16(addr_t addr) - { -+ u16 *ptr = (u16 *)absolute_pointer(addr); - u16 v; -- asm volatile("movw %%fs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr)); -+ asm volatile("movw %%fs:%1,%0" : "=r" (v) : "m" (*ptr)); - return v; - } - static inline u32 rdfs32(addr_t addr) - { -+ u32 *ptr = (u32 *)absolute_pointer(addr); - u32 v; -- asm volatile("movl %%fs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr)); -+ asm volatile("movl %%fs:%1,%0" : "=r" (v) : "m" (*ptr)); - return v; - } - - static inline void wrfs8(u8 v, addr_t addr) - { -- asm volatile("movb %1,%%fs:%0" : "+m" (*(u8 *)addr) : "qi" (v)); -+ u8 *ptr = (u8 *)absolute_pointer(addr); -+ asm volatile("movb %1,%%fs:%0" : "+m" (*ptr) : "qi" (v)); - } - static inline void wrfs16(u16 v, addr_t addr) - { -- asm volatile("movw %1,%%fs:%0" : "+m" (*(u16 *)addr) : "ri" (v)); -+ u16 *ptr = (u16 *)absolute_pointer(addr); -+ asm volatile("movw %1,%%fs:%0" : "+m" (*ptr) : "ri" (v)); - } - static inline void wrfs32(u32 v, addr_t addr) - { -- asm volatile("movl %1,%%fs:%0" : "+m" (*(u32 *)addr) : "ri" (v)); -+ u32 *ptr = (u32 *)absolute_pointer(addr); -+ asm volatile("movl %1,%%fs:%0" : "+m" (*ptr) : "ri" (v)); - } - - static inline u8 rdgs8(addr_t addr) - { -+ u8 *ptr = (u8 *)absolute_pointer(addr); - u8 v; -- asm volatile("movb %%gs:%1,%0" : "=q" (v) : "m" (*(u8 *)addr)); -+ asm volatile("movb %%gs:%1,%0" : "=q" (v) : "m" (*ptr)); - return v; - } - static inline u16 rdgs16(addr_t addr) - { -+ u16 *ptr = (u16 *)absolute_pointer(addr); - u16 v; -- asm volatile("movw %%gs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr)); -+ asm volatile("movw %%gs:%1,%0" : "=r" (v) : "m" (*ptr)); - return v; - } - static inline u32 rdgs32(addr_t addr) - { -+ u32 *ptr = (u32 *)absolute_pointer(addr); - u32 v; -- asm volatile("movl %%gs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr)); -+ asm volatile("movl %%gs:%1,%0" : "=r" (v) : "m" (*ptr)); - return v; - } - - static inline void wrgs8(u8 v, addr_t addr) - { -- asm volatile("movb %1,%%gs:%0" : "+m" (*(u8 *)addr) : "qi" (v)); -+ u8 *ptr = (u8 *)absolute_pointer(addr); -+ asm volatile("movb %1,%%gs:%0" : "+m" (*ptr) : "qi" (v)); - } - static inline void wrgs16(u16 v, addr_t addr) - { -- asm volatile("movw %1,%%gs:%0" : "+m" (*(u16 *)addr) : "ri" (v)); -+ u16 *ptr = (u16 *)absolute_pointer(addr); -+ asm volatile("movw %1,%%gs:%0" : "+m" (*ptr) : "ri" (v)); - } - static inline void wrgs32(u32 v, addr_t addr) - { -- asm volatile("movl %1,%%gs:%0" : "+m" (*(u32 *)addr) : "ri" (v)); -+ u32 *ptr = (u32 *)absolute_pointer(addr); -+ asm volatile("movl %1,%%gs:%0" : "+m" (*ptr) : "ri" (v)); - } - - /* Note: these only return true/false, not a signed return value! */ -diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile -index 431bf7f846c3c..15c5ae62a0e94 100644 ---- a/arch/x86/boot/compressed/Makefile -+++ b/arch/x86/boot/compressed/Makefile -@@ -28,7 +28,11 @@ KCOV_INSTRUMENT := n - targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ - vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst - --KBUILD_CFLAGS := -m$(BITS) -O2 -+# CLANG_FLAGS must come before any cc-disable-warning or cc-option calls in -+# case of cross compiling, as it has the '--target=' flag, which is needed to -+# avoid errors with '-march=i386', and future flags may depend on the target to -+# be valid. -+KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS) - KBUILD_CFLAGS += -fno-strict-aliasing -fPIE - KBUILD_CFLAGS += -Wundef - KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -@@ -47,7 +51,6 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS - # Disable relocation relaxation in case the link is not PIE. - KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) - KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h --KBUILD_CFLAGS += $(CLANG_FLAGS) - - # sev.c indirectly inludes inat-table.h which is generated during - # compilation and stored in $(objtree). Add the directory to the includes so -@@ -66,6 +69,10 @@ LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker) - ifdef CONFIG_LD_ORPHAN_WARN - LDFLAGS_vmlinux += --orphan-handling=warn - endif -+LDFLAGS_vmlinux += -z noexecstack -+ifeq ($(CONFIG_LD_IS_BFD),y) -+LDFLAGS_vmlinux += $(call ld-option,--no-warn-rwx-segments) -+endif - LDFLAGS_vmlinux += -T - - hostprogs := mkpiggy -diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S -index 8bb92e9f4e973..70052779b235c 100644 ---- a/arch/x86/boot/compressed/efi_thunk_64.S -+++ b/arch/x86/boot/compressed/efi_thunk_64.S -@@ -93,7 +93,7 @@ SYM_FUNC_START(__efi64_thunk) - - pop %rbx - pop %rbp -- ret -+ RET - SYM_FUNC_END(__efi64_thunk) - - .code32 -diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S -index 572c535cf45bc..c3d427c817c73 100644 ---- a/arch/x86/boot/compressed/head_64.S -+++ b/arch/x86/boot/compressed/head_64.S -@@ -468,11 +468,25 @@ SYM_CODE_START(startup_64) - /* Save the trampoline address in RCX */ - movq %rax, %rcx - -+ /* Set up 32-bit addressable stack */ -+ leaq TRAMPOLINE_32BIT_STACK_END(%rcx), %rsp -+ -+ /* -+ * Preserve live 64-bit registers on the stack: this is necessary -+ * because the architecture does not guarantee that GPRs will retain -+ * their full 64-bit values across a 32-bit mode switch. -+ */ -+ pushq %rbp -+ pushq %rbx -+ pushq %rsi -+ - /* -- * Load the address of trampoline_return() into RDI. -- * It will be used by the trampoline to return to the main code. -+ * Push the 64-bit address of trampoline_return() onto the new stack. -+ * It will be used by the trampoline to return to the main code. Due to -+ * the 32-bit mode switch, it cannot be kept it in a register either. - */ - leaq trampoline_return(%rip), %rdi -+ pushq %rdi - - /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ - pushq $__KERNEL32_CS -@@ -480,6 +494,11 @@ SYM_CODE_START(startup_64) - pushq %rax - lretq - trampoline_return: -+ /* Restore live 64-bit registers */ -+ popq %rsi -+ popq %rbx -+ popq %rbp -+ - /* Restore the stack, the 32-bit trampoline uses its own stack */ - leaq rva(boot_stack_end)(%rbx), %rsp - -@@ -600,7 +619,7 @@ SYM_FUNC_END(.Lrelocated) - /* - * This is the 32-bit trampoline that will be copied over to low memory. - * -- * RDI contains the return address (might be above 4G). -+ * Return address is at the top of the stack (might be above 4G). - * ECX contains the base address of the trampoline memory. - * Non zero RDX means trampoline needs to enable 5-level paging. - */ -@@ -610,9 +629,6 @@ SYM_CODE_START(trampoline_32bit_src) - movl %eax, %ds - movl %eax, %ss - -- /* Set up new stack */ -- leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp -- - /* Disable paging */ - movl %cr0, %eax - btrl $X86_CR0_PG_BIT, %eax -@@ -672,7 +688,7 @@ SYM_CODE_END(trampoline_32bit_src) - .code64 - SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled) - /* Return from the trampoline */ -- jmp *%rdi -+ retq - SYM_FUNC_END(.Lpaging_enabled) - - /* -@@ -813,7 +829,7 @@ SYM_FUNC_START(efi32_pe_entry) - 2: popl %edi // restore callee-save registers - popl %ebx - leave -- ret -+ RET - SYM_FUNC_END(efi32_pe_entry) - - .section ".rodata" -@@ -868,7 +884,7 @@ SYM_FUNC_START(startup32_set_idt_entry) - - pop %ecx - pop %ebx -- ret -+ RET - SYM_FUNC_END(startup32_set_idt_entry) - #endif - -@@ -884,7 +900,7 @@ SYM_FUNC_START(startup32_load_idt) - movl %eax, rva(boot32_idt_desc+2)(%ebp) - lidt rva(boot32_idt_desc)(%ebp) - #endif -- ret -+ RET - SYM_FUNC_END(startup32_load_idt) - - /* -@@ -954,7 +970,7 @@ SYM_FUNC_START(startup32_check_sev_cbit) - popl %ebx - popl %eax - #endif -- ret -+ RET - SYM_FUNC_END(startup32_check_sev_cbit) - - /* -diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S -index c1e81a848b2a5..a63424d13627b 100644 ---- a/arch/x86/boot/compressed/mem_encrypt.S -+++ b/arch/x86/boot/compressed/mem_encrypt.S -@@ -58,7 +58,7 @@ SYM_FUNC_START(get_sev_encryption_bit) - - #endif /* CONFIG_AMD_MEM_ENCRYPT */ - -- ret -+ RET - SYM_FUNC_END(get_sev_encryption_bit) - - /** -@@ -92,7 +92,7 @@ SYM_CODE_START_LOCAL(sev_es_req_cpuid) - /* All good - return success */ - xorl %eax, %eax - 1: -- ret -+ RET - 2: - movl $-1, %eax - jmp 1b -@@ -221,7 +221,7 @@ SYM_FUNC_START(set_sev_encryption_mask) - #endif - - xor %rax, %rax -- ret -+ RET - SYM_FUNC_END(set_sev_encryption_mask) - - .data -diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c -index e3add857c2c9d..c421af5a3cdce 100644 ---- a/arch/x86/boot/main.c -+++ b/arch/x86/boot/main.c -@@ -33,7 +33,7 @@ static void copy_boot_params(void) - u16 cl_offset; - }; - const struct old_cmdline * const oldcmd = -- (const struct old_cmdline *)OLD_CL_ADDRESS; -+ absolute_pointer(OLD_CL_ADDRESS); - - BUILD_BUG_ON(sizeof(boot_params) != 4096); - memcpy(&boot_params.hdr, &hdr, sizeof(hdr)); -diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig -index e81885384f604..99398cbdae434 100644 ---- a/arch/x86/configs/i386_defconfig -+++ b/arch/x86/configs/i386_defconfig -@@ -262,3 +262,4 @@ CONFIG_BLK_DEV_IO_TRACE=y - CONFIG_PROVIDE_OHCI1394_DMA_INIT=y - CONFIG_EARLY_PRINTK_DBGP=y - CONFIG_DEBUG_BOOT_PARAMS=y -+CONFIG_KALLSYMS_ALL=y -diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig -index e8a7a0af2bdaa..d7298b104a456 100644 ---- a/arch/x86/configs/x86_64_defconfig -+++ b/arch/x86/configs/x86_64_defconfig -@@ -258,3 +258,4 @@ CONFIG_BLK_DEV_IO_TRACE=y - CONFIG_PROVIDE_OHCI1394_DMA_INIT=y - CONFIG_EARLY_PRINTK_DBGP=y - CONFIG_DEBUG_BOOT_PARAMS=y -+CONFIG_KALLSYMS_ALL=y -diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile -index f307c93fc90a7..fce05e9df56db 100644 ---- a/arch/x86/crypto/Makefile -+++ b/arch/x86/crypto/Makefile -@@ -61,8 +61,8 @@ sha256-ssse3-$(CONFIG_AS_SHA256_NI) += sha256_ni_asm.o - obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o - sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o - --obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o --blake2s-x86_64-y := blake2s-core.o blake2s-glue.o -+obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += libblake2s-x86_64.o -+libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o - - obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o - ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o -diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S -index 51d46d93efbcc..b48ddebb47489 100644 ---- a/arch/x86/crypto/aegis128-aesni-asm.S -+++ b/arch/x86/crypto/aegis128-aesni-asm.S -@@ -122,7 +122,7 @@ SYM_FUNC_START_LOCAL(__load_partial) - pxor T0, MSG - - .Lld_partial_8: -- ret -+ RET - SYM_FUNC_END(__load_partial) - - /* -@@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(__store_partial) - mov %r10b, (%r9) - - .Lst_partial_1: -- ret -+ RET - SYM_FUNC_END(__store_partial) - - /* -@@ -225,7 +225,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_init) - movdqu STATE4, 0x40(STATEP) - - FRAME_END -- ret -+ RET - SYM_FUNC_END(crypto_aegis128_aesni_init) - - /* -@@ -337,7 +337,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) - movdqu STATE3, 0x30(STATEP) - movdqu STATE4, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lad_out_1: - movdqu STATE4, 0x00(STATEP) -@@ -346,7 +346,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) - movdqu STATE2, 0x30(STATEP) - movdqu STATE3, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lad_out_2: - movdqu STATE3, 0x00(STATEP) -@@ -355,7 +355,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) - movdqu STATE1, 0x30(STATEP) - movdqu STATE2, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lad_out_3: - movdqu STATE2, 0x00(STATEP) -@@ -364,7 +364,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) - movdqu STATE0, 0x30(STATEP) - movdqu STATE1, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lad_out_4: - movdqu STATE1, 0x00(STATEP) -@@ -373,11 +373,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad) - movdqu STATE4, 0x30(STATEP) - movdqu STATE0, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lad_out: - FRAME_END -- ret -+ RET - SYM_FUNC_END(crypto_aegis128_aesni_ad) - - .macro encrypt_block a s0 s1 s2 s3 s4 i -@@ -452,7 +452,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) - movdqu STATE2, 0x30(STATEP) - movdqu STATE3, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lenc_out_1: - movdqu STATE3, 0x00(STATEP) -@@ -461,7 +461,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) - movdqu STATE1, 0x30(STATEP) - movdqu STATE2, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lenc_out_2: - movdqu STATE2, 0x00(STATEP) -@@ -470,7 +470,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) - movdqu STATE0, 0x30(STATEP) - movdqu STATE1, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lenc_out_3: - movdqu STATE1, 0x00(STATEP) -@@ -479,7 +479,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) - movdqu STATE4, 0x30(STATEP) - movdqu STATE0, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lenc_out_4: - movdqu STATE0, 0x00(STATEP) -@@ -488,11 +488,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc) - movdqu STATE3, 0x30(STATEP) - movdqu STATE4, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Lenc_out: - FRAME_END -- ret -+ RET - SYM_FUNC_END(crypto_aegis128_aesni_enc) - - /* -@@ -532,7 +532,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc_tail) - movdqu STATE3, 0x40(STATEP) - - FRAME_END -- ret -+ RET - SYM_FUNC_END(crypto_aegis128_aesni_enc_tail) - - .macro decrypt_block a s0 s1 s2 s3 s4 i -@@ -606,7 +606,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) - movdqu STATE2, 0x30(STATEP) - movdqu STATE3, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Ldec_out_1: - movdqu STATE3, 0x00(STATEP) -@@ -615,7 +615,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) - movdqu STATE1, 0x30(STATEP) - movdqu STATE2, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Ldec_out_2: - movdqu STATE2, 0x00(STATEP) -@@ -624,7 +624,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) - movdqu STATE0, 0x30(STATEP) - movdqu STATE1, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Ldec_out_3: - movdqu STATE1, 0x00(STATEP) -@@ -633,7 +633,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) - movdqu STATE4, 0x30(STATEP) - movdqu STATE0, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Ldec_out_4: - movdqu STATE0, 0x00(STATEP) -@@ -642,11 +642,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec) - movdqu STATE3, 0x30(STATEP) - movdqu STATE4, 0x40(STATEP) - FRAME_END -- ret -+ RET - - .Ldec_out: - FRAME_END -- ret -+ RET - SYM_FUNC_END(crypto_aegis128_aesni_dec) - - /* -@@ -696,7 +696,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec_tail) - movdqu STATE3, 0x40(STATEP) - - FRAME_END -- ret -+ RET - SYM_FUNC_END(crypto_aegis128_aesni_dec_tail) - - /* -@@ -743,5 +743,5 @@ SYM_FUNC_START(crypto_aegis128_aesni_final) - movdqu MSG, (%rsi) - - FRAME_END -- ret -+ RET - SYM_FUNC_END(crypto_aegis128_aesni_final) -diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S -index 3f0fc7dd87d77..c799838242a69 100644 ---- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S -+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S -@@ -525,7 +525,7 @@ ddq_add_8: - /* return updated IV */ - vpshufb xbyteswap, xcounter, xcounter - vmovdqu xcounter, (p_iv) -- ret -+ RET - .endm - - /* -diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S -index 4e3972570916e..363699dd72206 100644 ---- a/arch/x86/crypto/aesni-intel_asm.S -+++ b/arch/x86/crypto/aesni-intel_asm.S -@@ -1594,7 +1594,7 @@ SYM_FUNC_START(aesni_gcm_dec) - GCM_ENC_DEC dec - GCM_COMPLETE arg10, arg11 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_dec) - - -@@ -1683,7 +1683,7 @@ SYM_FUNC_START(aesni_gcm_enc) - - GCM_COMPLETE arg10, arg11 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_enc) - - /***************************************************************************** -@@ -1701,7 +1701,7 @@ SYM_FUNC_START(aesni_gcm_init) - FUNC_SAVE - GCM_INIT %arg3, %arg4,%arg5, %arg6 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_init) - - /***************************************************************************** -@@ -1716,7 +1716,7 @@ SYM_FUNC_START(aesni_gcm_enc_update) - FUNC_SAVE - GCM_ENC_DEC enc - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_enc_update) - - /***************************************************************************** -@@ -1731,7 +1731,7 @@ SYM_FUNC_START(aesni_gcm_dec_update) - FUNC_SAVE - GCM_ENC_DEC dec - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_dec_update) - - /***************************************************************************** -@@ -1746,7 +1746,7 @@ SYM_FUNC_START(aesni_gcm_finalize) - FUNC_SAVE - GCM_COMPLETE %arg3 %arg4 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_finalize) - - #endif -@@ -1762,7 +1762,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a) - pxor %xmm1, %xmm0 - movaps %xmm0, (TKEYP) - add $0x10, TKEYP -- ret -+ RET - SYM_FUNC_END(_key_expansion_256a) - SYM_FUNC_END_ALIAS(_key_expansion_128) - -@@ -1787,7 +1787,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192a) - shufps $0b01001110, %xmm2, %xmm1 - movaps %xmm1, 0x10(TKEYP) - add $0x20, TKEYP -- ret -+ RET - SYM_FUNC_END(_key_expansion_192a) - - SYM_FUNC_START_LOCAL(_key_expansion_192b) -@@ -1806,7 +1806,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192b) - - movaps %xmm0, (TKEYP) - add $0x10, TKEYP -- ret -+ RET - SYM_FUNC_END(_key_expansion_192b) - - SYM_FUNC_START_LOCAL(_key_expansion_256b) -@@ -1818,7 +1818,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256b) - pxor %xmm1, %xmm2 - movaps %xmm2, (TKEYP) - add $0x10, TKEYP -- ret -+ RET - SYM_FUNC_END(_key_expansion_256b) - - /* -@@ -1933,7 +1933,7 @@ SYM_FUNC_START(aesni_set_key) - popl KEYP - #endif - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_set_key) - - /* -@@ -1957,7 +1957,7 @@ SYM_FUNC_START(aesni_enc) - popl KEYP - #endif - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_enc) - - /* -@@ -2014,7 +2014,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc1) - aesenc KEY, STATE - movaps 0x70(TKEYP), KEY - aesenclast KEY, STATE -- ret -+ RET - SYM_FUNC_END(_aesni_enc1) - - /* -@@ -2122,7 +2122,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc4) - aesenclast KEY, STATE2 - aesenclast KEY, STATE3 - aesenclast KEY, STATE4 -- ret -+ RET - SYM_FUNC_END(_aesni_enc4) - - /* -@@ -2147,7 +2147,7 @@ SYM_FUNC_START(aesni_dec) - popl KEYP - #endif - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_dec) - - /* -@@ -2204,7 +2204,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec1) - aesdec KEY, STATE - movaps 0x70(TKEYP), KEY - aesdeclast KEY, STATE -- ret -+ RET - SYM_FUNC_END(_aesni_dec1) - - /* -@@ -2312,7 +2312,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec4) - aesdeclast KEY, STATE2 - aesdeclast KEY, STATE3 - aesdeclast KEY, STATE4 -- ret -+ RET - SYM_FUNC_END(_aesni_dec4) - - /* -@@ -2372,7 +2372,7 @@ SYM_FUNC_START(aesni_ecb_enc) - popl LEN - #endif - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_ecb_enc) - - /* -@@ -2433,7 +2433,7 @@ SYM_FUNC_START(aesni_ecb_dec) - popl LEN - #endif - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_ecb_dec) - - /* -@@ -2477,7 +2477,7 @@ SYM_FUNC_START(aesni_cbc_enc) - popl IVP - #endif - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_cbc_enc) - - /* -@@ -2570,7 +2570,7 @@ SYM_FUNC_START(aesni_cbc_dec) - popl IVP - #endif - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_cbc_dec) - - /* -@@ -2627,7 +2627,7 @@ SYM_FUNC_START(aesni_cts_cbc_enc) - popl IVP - #endif - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_cts_cbc_enc) - - /* -@@ -2688,7 +2688,7 @@ SYM_FUNC_START(aesni_cts_cbc_dec) - popl IVP - #endif - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_cts_cbc_dec) - - .pushsection .rodata -@@ -2725,7 +2725,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc_init) - mov $1, TCTR_LOW - movq TCTR_LOW, INC - movq CTR, TCTR_LOW -- ret -+ RET - SYM_FUNC_END(_aesni_inc_init) - - /* -@@ -2753,7 +2753,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc) - .Linc_low: - movaps CTR, IV - pshufb BSWAP_MASK, IV -- ret -+ RET - SYM_FUNC_END(_aesni_inc) - - /* -@@ -2816,7 +2816,7 @@ SYM_FUNC_START(aesni_ctr_enc) - movups IV, (IVP) - .Lctr_enc_just_ret: - FRAME_END -- ret -+ RET - SYM_FUNC_END(aesni_ctr_enc) - - #endif -@@ -2932,7 +2932,7 @@ SYM_FUNC_START(aesni_xts_encrypt) - popl IVP - #endif - FRAME_END -- ret -+ RET - - .Lxts_enc_1x: - add $64, LEN -@@ -3092,7 +3092,7 @@ SYM_FUNC_START(aesni_xts_decrypt) - popl IVP - #endif - FRAME_END -- ret -+ RET - - .Lxts_dec_1x: - add $64, LEN -diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S -index 98e3552b6e039..0852ab573fd30 100644 ---- a/arch/x86/crypto/aesni-intel_avx-x86_64.S -+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S -@@ -1767,7 +1767,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen2) - FUNC_SAVE - INIT GHASH_MUL_AVX, PRECOMPUTE_AVX - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_init_avx_gen2) - - ############################################################################### -@@ -1788,15 +1788,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2) - # must be 192 - GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11 - FUNC_RESTORE -- ret -+ RET - key_128_enc_update: - GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9 - FUNC_RESTORE -- ret -+ RET - key_256_enc_update: - GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2) - - ############################################################################### -@@ -1817,15 +1817,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2) - # must be 192 - GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11 - FUNC_RESTORE -- ret -+ RET - key_128_dec_update: - GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9 - FUNC_RESTORE -- ret -+ RET - key_256_dec_update: - GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2) - - ############################################################################### -@@ -1846,15 +1846,15 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen2) - # must be 192 - GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4 - FUNC_RESTORE -- ret -+ RET - key_128_finalize: - GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4 - FUNC_RESTORE -- ret -+ RET - key_256_finalize: - GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) - - ############################################################################### -@@ -2735,7 +2735,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen4) - FUNC_SAVE - INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_init_avx_gen4) - - ############################################################################### -@@ -2756,15 +2756,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4) - # must be 192 - GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11 - FUNC_RESTORE -- ret -+ RET - key_128_enc_update4: - GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9 - FUNC_RESTORE -- ret -+ RET - key_256_enc_update4: - GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4) - - ############################################################################### -@@ -2785,15 +2785,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4) - # must be 192 - GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11 - FUNC_RESTORE -- ret -+ RET - key_128_dec_update4: - GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9 - FUNC_RESTORE -- ret -+ RET - key_256_dec_update4: - GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4) - - ############################################################################### -@@ -2814,13 +2814,13 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen4) - # must be 192 - GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4 - FUNC_RESTORE -- ret -+ RET - key_128_finalize4: - GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4 - FUNC_RESTORE -- ret -+ RET - key_256_finalize4: - GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4 - FUNC_RESTORE -- ret -+ RET - SYM_FUNC_END(aesni_gcm_finalize_avx_gen4) -diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c -index 0fc961bef299c..41901ba9d3a2c 100644 ---- a/arch/x86/crypto/aesni-intel_glue.c -+++ b/arch/x86/crypto/aesni-intel_glue.c -@@ -866,7 +866,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt) - req = &subreq; - - err = skcipher_walk_virt(&walk, req, false); -- if (err) -+ if (!walk.nbytes) - return err; - } else { - tail = 0; -@@ -1107,7 +1107,7 @@ static struct aead_alg aesni_aeads[] = { { - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx), -- .cra_alignmask = AESNI_ALIGN - 1, -+ .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - }, { -@@ -1124,7 +1124,7 @@ static struct aead_alg aesni_aeads[] = { { - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), -- .cra_alignmask = AESNI_ALIGN - 1, -+ .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - } }; -diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/crypto/blake2s-core.S -index 2ca79974f8198..b50b35ff1fdba 100644 ---- a/arch/x86/crypto/blake2s-core.S -+++ b/arch/x86/crypto/blake2s-core.S -@@ -171,7 +171,7 @@ SYM_FUNC_START(blake2s_compress_ssse3) - movdqu %xmm1,0x10(%rdi) - movdqu %xmm14,0x20(%rdi) - .Lendofloop: -- ret -+ RET - SYM_FUNC_END(blake2s_compress_ssse3) - - #ifdef CONFIG_AS_AVX512 -@@ -251,6 +251,6 @@ SYM_FUNC_START(blake2s_compress_avx512) - vmovdqu %xmm1,0x10(%rdi) - vmovdqu %xmm4,0x20(%rdi) - vzeroupper -- retq -+ RET - SYM_FUNC_END(blake2s_compress_avx512) - #endif /* CONFIG_AS_AVX512 */ -diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c -index a40365ab301ee..aaba212305288 100644 ---- a/arch/x86/crypto/blake2s-glue.c -+++ b/arch/x86/crypto/blake2s-glue.c -@@ -4,8 +4,6 @@ - */ - - #include --#include --#include - - #include - #include -@@ -28,14 +26,13 @@ asmlinkage void blake2s_compress_avx512(struct blake2s_state *state, - static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3); - static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512); - --void blake2s_compress_arch(struct blake2s_state *state, -- const u8 *block, size_t nblocks, -- const u32 inc) -+void blake2s_compress(struct blake2s_state *state, const u8 *block, -+ size_t nblocks, const u32 inc) - { - /* SIMD disables preemption, so relax after processing each page. */ - BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8); - -- if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) { -+ if (!static_branch_likely(&blake2s_use_ssse3) || !may_use_simd()) { - blake2s_compress_generic(state, block, nblocks, inc); - return; - } -@@ -56,49 +53,12 @@ void blake2s_compress_arch(struct blake2s_state *state, - block += blocks * BLAKE2S_BLOCK_SIZE; - } while (nblocks); - } --EXPORT_SYMBOL(blake2s_compress_arch); -- --static int crypto_blake2s_update_x86(struct shash_desc *desc, -- const u8 *in, unsigned int inlen) --{ -- return crypto_blake2s_update(desc, in, inlen, blake2s_compress_arch); --} -- --static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out) --{ -- return crypto_blake2s_final(desc, out, blake2s_compress_arch); --} -- --#define BLAKE2S_ALG(name, driver_name, digest_size) \ -- { \ -- .base.cra_name = name, \ -- .base.cra_driver_name = driver_name, \ -- .base.cra_priority = 200, \ -- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ -- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ -- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ -- .base.cra_module = THIS_MODULE, \ -- .digestsize = digest_size, \ -- .setkey = crypto_blake2s_setkey, \ -- .init = crypto_blake2s_init, \ -- .update = crypto_blake2s_update_x86, \ -- .final = crypto_blake2s_final_x86, \ -- .descsize = sizeof(struct blake2s_state), \ -- } -- --static struct shash_alg blake2s_algs[] = { -- BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE), -- BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE), -- BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE), -- BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE), --}; -+EXPORT_SYMBOL(blake2s_compress); - - static int __init blake2s_mod_init(void) - { -- if (!boot_cpu_has(X86_FEATURE_SSSE3)) -- return 0; -- -- static_branch_enable(&blake2s_use_ssse3); -+ if (boot_cpu_has(X86_FEATURE_SSSE3)) -+ static_branch_enable(&blake2s_use_ssse3); - - if (IS_ENABLED(CONFIG_AS_AVX512) && - boot_cpu_has(X86_FEATURE_AVX) && -@@ -109,26 +69,9 @@ static int __init blake2s_mod_init(void) - XFEATURE_MASK_AVX512, NULL)) - static_branch_enable(&blake2s_use_avx512); - -- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? -- crypto_register_shashes(blake2s_algs, -- ARRAY_SIZE(blake2s_algs)) : 0; --} -- --static void __exit blake2s_mod_exit(void) --{ -- if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3)) -- crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -+ return 0; - } - - module_init(blake2s_mod_init); --module_exit(blake2s_mod_exit); - --MODULE_ALIAS_CRYPTO("blake2s-128"); --MODULE_ALIAS_CRYPTO("blake2s-128-x86"); --MODULE_ALIAS_CRYPTO("blake2s-160"); --MODULE_ALIAS_CRYPTO("blake2s-160-x86"); --MODULE_ALIAS_CRYPTO("blake2s-224"); --MODULE_ALIAS_CRYPTO("blake2s-224-x86"); --MODULE_ALIAS_CRYPTO("blake2s-256"); --MODULE_ALIAS_CRYPTO("blake2s-256-x86"); - MODULE_LICENSE("GPL v2"); -diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S -index 4222ac6d65848..802d715826891 100644 ---- a/arch/x86/crypto/blowfish-x86_64-asm_64.S -+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S -@@ -135,10 +135,10 @@ SYM_FUNC_START(__blowfish_enc_blk) - jnz .L__enc_xor; - - write_block(); -- ret; -+ RET; - .L__enc_xor: - xor_block(); -- ret; -+ RET; - SYM_FUNC_END(__blowfish_enc_blk) - - SYM_FUNC_START(blowfish_dec_blk) -@@ -170,7 +170,7 @@ SYM_FUNC_START(blowfish_dec_blk) - - movq %r11, %r12; - -- ret; -+ RET; - SYM_FUNC_END(blowfish_dec_blk) - - /********************************************************************** -@@ -322,14 +322,14 @@ SYM_FUNC_START(__blowfish_enc_blk_4way) - - popq %rbx; - popq %r12; -- ret; -+ RET; - - .L__enc_xor4: - xor_block4(); - - popq %rbx; - popq %r12; -- ret; -+ RET; - SYM_FUNC_END(__blowfish_enc_blk_4way) - - SYM_FUNC_START(blowfish_dec_blk_4way) -@@ -364,5 +364,5 @@ SYM_FUNC_START(blowfish_dec_blk_4way) - popq %rbx; - popq %r12; - -- ret; -+ RET; - SYM_FUNC_END(blowfish_dec_blk_4way) -diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S -index e2a0e0f4bf9d8..2e1658ddbe1a9 100644 ---- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S -+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S -@@ -192,7 +192,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c - roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, - %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, - %rcx, (%r9)); -- ret; -+ RET; - SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) - - .align 8 -@@ -200,7 +200,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a - roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, - %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, - %rax, (%r9)); -- ret; -+ RET; - SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) - - /* -@@ -778,7 +778,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk16) - %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); - - FRAME_END -- ret; -+ RET; - - .align 8 - .Lenc_max32: -@@ -865,7 +865,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk16) - %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); - - FRAME_END -- ret; -+ RET; - - .align 8 - .Ldec_max32: -@@ -906,7 +906,7 @@ SYM_FUNC_START(camellia_ecb_enc_16way) - %xmm8, %rsi); - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(camellia_ecb_enc_16way) - - SYM_FUNC_START(camellia_ecb_dec_16way) -@@ -936,7 +936,7 @@ SYM_FUNC_START(camellia_ecb_dec_16way) - %xmm8, %rsi); - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(camellia_ecb_dec_16way) - - SYM_FUNC_START(camellia_cbc_dec_16way) -@@ -987,5 +987,5 @@ SYM_FUNC_START(camellia_cbc_dec_16way) - %xmm8, %rsi); - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(camellia_cbc_dec_16way) -diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S -index 706f70829a07e..0e4e9abbf4de3 100644 ---- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S -+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S -@@ -226,7 +226,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c - roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, - %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, - %rcx, (%r9)); -- ret; -+ RET; - SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) - - .align 8 -@@ -234,7 +234,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a - roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, - %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, - %rax, (%r9)); -- ret; -+ RET; - SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) - - /* -@@ -814,7 +814,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk32) - %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); - - FRAME_END -- ret; -+ RET; - - .align 8 - .Lenc_max32: -@@ -901,7 +901,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk32) - %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); - - FRAME_END -- ret; -+ RET; - - .align 8 - .Ldec_max32: -@@ -946,7 +946,7 @@ SYM_FUNC_START(camellia_ecb_enc_32way) - vzeroupper; - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(camellia_ecb_enc_32way) - - SYM_FUNC_START(camellia_ecb_dec_32way) -@@ -980,7 +980,7 @@ SYM_FUNC_START(camellia_ecb_dec_32way) - vzeroupper; - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(camellia_ecb_dec_32way) - - SYM_FUNC_START(camellia_cbc_dec_32way) -@@ -1047,5 +1047,5 @@ SYM_FUNC_START(camellia_cbc_dec_32way) - - addq $(16 * 32), %rsp; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(camellia_cbc_dec_32way) -diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S -index 1372e64088507..347c059f59403 100644 ---- a/arch/x86/crypto/camellia-x86_64-asm_64.S -+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S -@@ -213,13 +213,13 @@ SYM_FUNC_START(__camellia_enc_blk) - enc_outunpack(mov, RT1); - - movq RR12, %r12; -- ret; -+ RET; - - .L__enc_xor: - enc_outunpack(xor, RT1); - - movq RR12, %r12; -- ret; -+ RET; - SYM_FUNC_END(__camellia_enc_blk) - - SYM_FUNC_START(camellia_dec_blk) -@@ -257,7 +257,7 @@ SYM_FUNC_START(camellia_dec_blk) - dec_outunpack(); - - movq RR12, %r12; -- ret; -+ RET; - SYM_FUNC_END(camellia_dec_blk) - - /********************************************************************** -@@ -448,14 +448,14 @@ SYM_FUNC_START(__camellia_enc_blk_2way) - - movq RR12, %r12; - popq %rbx; -- ret; -+ RET; - - .L__enc2_xor: - enc_outunpack2(xor, RT2); - - movq RR12, %r12; - popq %rbx; -- ret; -+ RET; - SYM_FUNC_END(__camellia_enc_blk_2way) - - SYM_FUNC_START(camellia_dec_blk_2way) -@@ -495,5 +495,5 @@ SYM_FUNC_START(camellia_dec_blk_2way) - - movq RR12, %r12; - movq RXOR, %rbx; -- ret; -+ RET; - SYM_FUNC_END(camellia_dec_blk_2way) -diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S -index 8a6181b08b590..b258af420c92c 100644 ---- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S -+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S -@@ -279,7 +279,7 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16) - outunpack_blocks(RR3, RL3, RTMP, RX, RKM); - outunpack_blocks(RR4, RL4, RTMP, RX, RKM); - -- ret; -+ RET; - SYM_FUNC_END(__cast5_enc_blk16) - - .align 16 -@@ -352,7 +352,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16) - outunpack_blocks(RR3, RL3, RTMP, RX, RKM); - outunpack_blocks(RR4, RL4, RTMP, RX, RKM); - -- ret; -+ RET; - - .L__skip_dec: - vpsrldq $4, RKR, RKR; -@@ -393,7 +393,7 @@ SYM_FUNC_START(cast5_ecb_enc_16way) - - popq %r15; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(cast5_ecb_enc_16way) - - SYM_FUNC_START(cast5_ecb_dec_16way) -@@ -431,7 +431,7 @@ SYM_FUNC_START(cast5_ecb_dec_16way) - - popq %r15; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(cast5_ecb_dec_16way) - - SYM_FUNC_START(cast5_cbc_dec_16way) -@@ -483,7 +483,7 @@ SYM_FUNC_START(cast5_cbc_dec_16way) - popq %r15; - popq %r12; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(cast5_cbc_dec_16way) - - SYM_FUNC_START(cast5_ctr_16way) -@@ -559,5 +559,5 @@ SYM_FUNC_START(cast5_ctr_16way) - popq %r15; - popq %r12; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(cast5_ctr_16way) -diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S -index fbddcecc3e3fc..82b716fd5dbac 100644 ---- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S -+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S -@@ -289,7 +289,7 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8) - outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); - outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); - -- ret; -+ RET; - SYM_FUNC_END(__cast6_enc_blk8) - - .align 8 -@@ -336,7 +336,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8) - outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); - outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); - -- ret; -+ RET; - SYM_FUNC_END(__cast6_dec_blk8) - - SYM_FUNC_START(cast6_ecb_enc_8way) -@@ -359,7 +359,7 @@ SYM_FUNC_START(cast6_ecb_enc_8way) - - popq %r15; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(cast6_ecb_enc_8way) - - SYM_FUNC_START(cast6_ecb_dec_8way) -@@ -382,7 +382,7 @@ SYM_FUNC_START(cast6_ecb_dec_8way) - - popq %r15; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(cast6_ecb_dec_8way) - - SYM_FUNC_START(cast6_cbc_dec_8way) -@@ -408,5 +408,5 @@ SYM_FUNC_START(cast6_cbc_dec_8way) - popq %r15; - popq %r12; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(cast6_cbc_dec_8way) -diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/crypto/chacha-avx2-x86_64.S -index ee9a40ab41093..f3d8fc0182493 100644 ---- a/arch/x86/crypto/chacha-avx2-x86_64.S -+++ b/arch/x86/crypto/chacha-avx2-x86_64.S -@@ -193,7 +193,7 @@ SYM_FUNC_START(chacha_2block_xor_avx2) - - .Ldone2: - vzeroupper -- ret -+ RET - - .Lxorpart2: - # xor remaining bytes from partial register into output -@@ -498,7 +498,7 @@ SYM_FUNC_START(chacha_4block_xor_avx2) - - .Ldone4: - vzeroupper -- ret -+ RET - - .Lxorpart4: - # xor remaining bytes from partial register into output -@@ -992,7 +992,7 @@ SYM_FUNC_START(chacha_8block_xor_avx2) - .Ldone8: - vzeroupper - lea -8(%r10),%rsp -- ret -+ RET - - .Lxorpart8: - # xor remaining bytes from partial register into output -diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S -index bb193fde123a0..259383e1ad440 100644 ---- a/arch/x86/crypto/chacha-avx512vl-x86_64.S -+++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S -@@ -166,13 +166,13 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl) - - .Ldone2: - vzeroupper -- ret -+ RET - - .Lxorpart2: - # xor remaining bytes from partial register into output - mov %rcx,%rax - and $0xf,%rcx -- jz .Ldone8 -+ jz .Ldone2 - mov %rax,%r9 - and $~0xf,%r9 - -@@ -432,13 +432,13 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl) - - .Ldone4: - vzeroupper -- ret -+ RET - - .Lxorpart4: - # xor remaining bytes from partial register into output - mov %rcx,%rax - and $0xf,%rcx -- jz .Ldone8 -+ jz .Ldone4 - mov %rax,%r9 - and $~0xf,%r9 - -@@ -812,7 +812,7 @@ SYM_FUNC_START(chacha_8block_xor_avx512vl) - - .Ldone8: - vzeroupper -- ret -+ RET - - .Lxorpart8: - # xor remaining bytes from partial register into output -diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S -index ca1788bfee162..7111949cd5b99 100644 ---- a/arch/x86/crypto/chacha-ssse3-x86_64.S -+++ b/arch/x86/crypto/chacha-ssse3-x86_64.S -@@ -108,7 +108,7 @@ SYM_FUNC_START_LOCAL(chacha_permute) - sub $2,%r8d - jnz .Ldoubleround - -- ret -+ RET - SYM_FUNC_END(chacha_permute) - - SYM_FUNC_START(chacha_block_xor_ssse3) -@@ -166,7 +166,7 @@ SYM_FUNC_START(chacha_block_xor_ssse3) - - .Ldone: - FRAME_END -- ret -+ RET - - .Lxorpart: - # xor remaining bytes from partial register into output -@@ -217,7 +217,7 @@ SYM_FUNC_START(hchacha_block_ssse3) - movdqu %xmm3,0x10(%rsi) - - FRAME_END -- ret -+ RET - SYM_FUNC_END(hchacha_block_ssse3) - - SYM_FUNC_START(chacha_4block_xor_ssse3) -@@ -762,7 +762,7 @@ SYM_FUNC_START(chacha_4block_xor_ssse3) - - .Ldone4: - lea -8(%r10),%rsp -- ret -+ RET - - .Lxorpart4: - # xor remaining bytes from partial register into output -diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S -index 6e7d4c4d32081..c392a6edbfff6 100644 ---- a/arch/x86/crypto/crc32-pclmul_asm.S -+++ b/arch/x86/crypto/crc32-pclmul_asm.S -@@ -236,5 +236,5 @@ fold_64: - pxor %xmm2, %xmm1 - pextrd $0x01, %xmm1, %eax - -- ret -+ RET - SYM_FUNC_END(crc32_pclmul_le_16) -diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S -index ac1f303eed0f4..80c0d22fc42c6 100644 ---- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S -+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S -@@ -306,7 +306,7 @@ do_return: - popq %rsi - popq %rdi - popq %rbx -- ret -+ RET - SYM_FUNC_END(crc_pcl) - - .section .rodata, "a", @progbits -diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S -index b2533d63030e5..721474abfb719 100644 ---- a/arch/x86/crypto/crct10dif-pcl-asm_64.S -+++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S -@@ -257,7 +257,7 @@ SYM_FUNC_START(crc_t10dif_pcl) - # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0. - - pextrw $0, %xmm0, %eax -- ret -+ RET - - .align 16 - .Lless_than_256_bytes: -diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S -index fac0fdc3f25da..f4c760f4cade6 100644 ---- a/arch/x86/crypto/des3_ede-asm_64.S -+++ b/arch/x86/crypto/des3_ede-asm_64.S -@@ -243,7 +243,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk) - popq %r12; - popq %rbx; - -- ret; -+ RET; - SYM_FUNC_END(des3_ede_x86_64_crypt_blk) - - /*********************************************************************** -@@ -528,7 +528,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way) - popq %r12; - popq %rbx; - -- ret; -+ RET; - SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way) - - .section .rodata, "a", @progbits -diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S -index 99ac25e18e098..2bf8718999209 100644 ---- a/arch/x86/crypto/ghash-clmulni-intel_asm.S -+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S -@@ -85,7 +85,7 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble) - psrlq $1, T2 - pxor T2, T1 - pxor T1, DATA -- ret -+ RET - SYM_FUNC_END(__clmul_gf128mul_ble) - - /* void clmul_ghash_mul(char *dst, const u128 *shash) */ -@@ -99,7 +99,7 @@ SYM_FUNC_START(clmul_ghash_mul) - pshufb BSWAP, DATA - movups DATA, (%rdi) - FRAME_END -- ret -+ RET - SYM_FUNC_END(clmul_ghash_mul) - - /* -@@ -128,5 +128,5 @@ SYM_FUNC_START(clmul_ghash_update) - movups DATA, (%rdi) - .Lupdate_just_ret: - FRAME_END -- ret -+ RET - SYM_FUNC_END(clmul_ghash_update) -diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c -index 1f1a95f3dd0ca..c0ab0ff4af655 100644 ---- a/arch/x86/crypto/ghash-clmulni-intel_glue.c -+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - - #define GHASH_BLOCK_SIZE 16 - #define GHASH_DIGEST_SIZE 16 -@@ -54,15 +55,14 @@ static int ghash_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) - { - struct ghash_ctx *ctx = crypto_shash_ctx(tfm); -- be128 *x = (be128 *)key; - u64 a, b; - - if (keylen != GHASH_BLOCK_SIZE) - return -EINVAL; - - /* perform multiplication by 'x' in GF(2^128) */ -- a = be64_to_cpu(x->a); -- b = be64_to_cpu(x->b); -+ a = get_unaligned_be64(key); -+ b = get_unaligned_be64(key + 8); - - ctx->shash.a = (b << 1) | (a >> 63); - ctx->shash.b = (a << 1) | (b >> 63); -diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S -index b22c7b9362726..6a0b15e7196a8 100644 ---- a/arch/x86/crypto/nh-avx2-x86_64.S -+++ b/arch/x86/crypto/nh-avx2-x86_64.S -@@ -153,5 +153,5 @@ SYM_FUNC_START(nh_avx2) - vpaddq T1, T0, T0 - vpaddq T4, T0, T0 - vmovdqu T0, (HASH) -- ret -+ RET - SYM_FUNC_END(nh_avx2) -diff --git a/arch/x86/crypto/nh-sse2-x86_64.S b/arch/x86/crypto/nh-sse2-x86_64.S -index d7ae22dd66839..34c567bbcb4fa 100644 ---- a/arch/x86/crypto/nh-sse2-x86_64.S -+++ b/arch/x86/crypto/nh-sse2-x86_64.S -@@ -119,5 +119,5 @@ SYM_FUNC_START(nh_sse2) - paddq PASS2_SUMS, T1 - movdqu T0, 0x00(HASH) - movdqu T1, 0x10(HASH) -- ret -+ RET - SYM_FUNC_END(nh_sse2) -diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl -index 71fae5a09e56d..2077ce7a56479 100644 ---- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl -+++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl -@@ -297,7 +297,7 @@ ___ - $code.=<<___; - mov \$1,%eax - .Lno_key: -- ret -+ RET - ___ - &end_function("poly1305_init_x86_64"); - -@@ -373,7 +373,7 @@ $code.=<<___; - .cfi_adjust_cfa_offset -48 - .Lno_data: - .Lblocks_epilogue: -- ret -+ RET - .cfi_endproc - ___ - &end_function("poly1305_blocks_x86_64"); -@@ -399,7 +399,7 @@ $code.=<<___; - mov %rax,0($mac) # write result - mov %rcx,8($mac) - -- ret -+ RET - ___ - &end_function("poly1305_emit_x86_64"); - if ($avx) { -@@ -429,7 +429,7 @@ ___ - &poly1305_iteration(); - $code.=<<___; - pop $ctx -- ret -+ RET - .size __poly1305_block,.-__poly1305_block - - .type __poly1305_init_avx,\@abi-omnipotent -@@ -594,7 +594,7 @@ __poly1305_init_avx: - - lea -48-64($ctx),$ctx # size [de-]optimization - pop %rbp -- ret -+ RET - .size __poly1305_init_avx,.-__poly1305_init_avx - ___ - -@@ -747,7 +747,7 @@ $code.=<<___; - .cfi_restore %rbp - .Lno_data_avx: - .Lblocks_avx_epilogue: -- ret -+ RET - .cfi_endproc - - .align 32 -@@ -1452,7 +1452,7 @@ $code.=<<___ if (!$win64); - ___ - $code.=<<___; - vzeroupper -- ret -+ RET - .cfi_endproc - ___ - &end_function("poly1305_blocks_avx"); -@@ -1508,7 +1508,7 @@ $code.=<<___; - mov %rax,0($mac) # write result - mov %rcx,8($mac) - -- ret -+ RET - ___ - &end_function("poly1305_emit_avx"); - -@@ -1675,7 +1675,7 @@ $code.=<<___; - .cfi_restore %rbp - .Lno_data_avx2$suffix: - .Lblocks_avx2_epilogue$suffix: -- ret -+ RET - .cfi_endproc - - .align 32 -@@ -2201,7 +2201,7 @@ $code.=<<___ if (!$win64); - ___ - $code.=<<___; - vzeroupper -- ret -+ RET - .cfi_endproc - ___ - if($avx > 2 && $avx512) { -@@ -2792,7 +2792,7 @@ $code.=<<___ if (!$win64); - .cfi_def_cfa_register %rsp - ___ - $code.=<<___; -- ret -+ RET - .cfi_endproc - ___ - -@@ -2893,7 +2893,7 @@ $code.=<<___ if ($flavour =~ /elf32/); - ___ - $code.=<<___; - mov \$1,%eax -- ret -+ RET - .size poly1305_init_base2_44,.-poly1305_init_base2_44 - ___ - { -@@ -3010,7 +3010,7 @@ poly1305_blocks_vpmadd52: - jnz .Lblocks_vpmadd52_4x - - .Lno_data_vpmadd52: -- ret -+ RET - .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 - ___ - } -@@ -3451,7 +3451,7 @@ poly1305_blocks_vpmadd52_4x: - vzeroall - - .Lno_data_vpmadd52_4x: -- ret -+ RET - .size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x - ___ - } -@@ -3824,7 +3824,7 @@ $code.=<<___; - vzeroall - - .Lno_data_vpmadd52_8x: -- ret -+ RET - .size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x - ___ - } -@@ -3861,7 +3861,7 @@ poly1305_emit_base2_44: - mov %rax,0($mac) # write result - mov %rcx,8($mac) - -- ret -+ RET - .size poly1305_emit_base2_44,.-poly1305_emit_base2_44 - ___ - } } } -@@ -3916,7 +3916,7 @@ xor128_encrypt_n_pad: - - .Ldone_enc: - mov $otp,%rax -- ret -+ RET - .size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad - - .globl xor128_decrypt_n_pad -@@ -3967,7 +3967,7 @@ xor128_decrypt_n_pad: - - .Ldone_dec: - mov $otp,%rax -- ret -+ RET - .size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad - ___ - } -@@ -4109,7 +4109,7 @@ avx_handler: - pop %rbx - pop %rdi - pop %rsi -- ret -+ RET - .size avx_handler,.-avx_handler - - .section .pdata -diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S -index b7ee24df7fbae..82f2313f512b8 100644 ---- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S -+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S -@@ -601,7 +601,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx) - write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); - write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); - -- ret; -+ RET; - SYM_FUNC_END(__serpent_enc_blk8_avx) - - .align 8 -@@ -655,7 +655,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx) - write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); - write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); - -- ret; -+ RET; - SYM_FUNC_END(__serpent_dec_blk8_avx) - - SYM_FUNC_START(serpent_ecb_enc_8way_avx) -@@ -673,7 +673,7 @@ SYM_FUNC_START(serpent_ecb_enc_8way_avx) - store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(serpent_ecb_enc_8way_avx) - - SYM_FUNC_START(serpent_ecb_dec_8way_avx) -@@ -691,7 +691,7 @@ SYM_FUNC_START(serpent_ecb_dec_8way_avx) - store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(serpent_ecb_dec_8way_avx) - - SYM_FUNC_START(serpent_cbc_dec_8way_avx) -@@ -709,5 +709,5 @@ SYM_FUNC_START(serpent_cbc_dec_8way_avx) - store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(serpent_cbc_dec_8way_avx) -diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S -index 9161b6e441f31..8ea34c9b93160 100644 ---- a/arch/x86/crypto/serpent-avx2-asm_64.S -+++ b/arch/x86/crypto/serpent-avx2-asm_64.S -@@ -601,7 +601,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk16) - write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); - write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); - -- ret; -+ RET; - SYM_FUNC_END(__serpent_enc_blk16) - - .align 8 -@@ -655,7 +655,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk16) - write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); - write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); - -- ret; -+ RET; - SYM_FUNC_END(__serpent_dec_blk16) - - SYM_FUNC_START(serpent_ecb_enc_16way) -@@ -677,7 +677,7 @@ SYM_FUNC_START(serpent_ecb_enc_16way) - vzeroupper; - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(serpent_ecb_enc_16way) - - SYM_FUNC_START(serpent_ecb_dec_16way) -@@ -699,7 +699,7 @@ SYM_FUNC_START(serpent_ecb_dec_16way) - vzeroupper; - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(serpent_ecb_dec_16way) - - SYM_FUNC_START(serpent_cbc_dec_16way) -@@ -722,5 +722,5 @@ SYM_FUNC_START(serpent_cbc_dec_16way) - vzeroupper; - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(serpent_cbc_dec_16way) -diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S -index 6379b99cb722e..8ccb03ad7cef5 100644 ---- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S -+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S -@@ -553,12 +553,12 @@ SYM_FUNC_START(__serpent_enc_blk_4way) - - write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); - -- ret; -+ RET; - - .L__enc_xor4: - xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); - -- ret; -+ RET; - SYM_FUNC_END(__serpent_enc_blk_4way) - - SYM_FUNC_START(serpent_dec_blk_4way) -@@ -612,5 +612,5 @@ SYM_FUNC_START(serpent_dec_blk_4way) - movl arg_dst(%esp), %eax; - write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); - -- ret; -+ RET; - SYM_FUNC_END(serpent_dec_blk_4way) -diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S -index efb6dc17dc907..e0998a011d1dd 100644 ---- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S -+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S -@@ -675,13 +675,13 @@ SYM_FUNC_START(__serpent_enc_blk_8way) - write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); - write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); - -- ret; -+ RET; - - .L__enc_xor8: - xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); - xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); - -- ret; -+ RET; - SYM_FUNC_END(__serpent_enc_blk_8way) - - SYM_FUNC_START(serpent_dec_blk_8way) -@@ -735,5 +735,5 @@ SYM_FUNC_START(serpent_dec_blk_8way) - write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); - write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); - -- ret; -+ RET; - SYM_FUNC_END(serpent_dec_blk_8way) -diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S -index 5eed620f46765..a96b2fd26dab4 100644 ---- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S -+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S -@@ -674,7 +674,7 @@ _loop3: - pop %r12 - pop %rbx - -- ret -+ RET - - SYM_FUNC_END(\name) - .endm -diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S -index 5d8415f482bd7..2f94ec0e763bf 100644 ---- a/arch/x86/crypto/sha1_ni_asm.S -+++ b/arch/x86/crypto/sha1_ni_asm.S -@@ -290,7 +290,7 @@ SYM_FUNC_START(sha1_ni_transform) - mov %rbp, %rsp - pop %rbp - -- ret -+ RET - SYM_FUNC_END(sha1_ni_transform) - - .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 -diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S -index d25668d2a1e92..263f916362e02 100644 ---- a/arch/x86/crypto/sha1_ssse3_asm.S -+++ b/arch/x86/crypto/sha1_ssse3_asm.S -@@ -99,7 +99,7 @@ - pop %rbp - pop %r12 - pop %rbx -- ret -+ RET - - SYM_FUNC_END(\name) - .endm -diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S -index 4739cd31b9db1..3baa1ec390974 100644 ---- a/arch/x86/crypto/sha256-avx-asm.S -+++ b/arch/x86/crypto/sha256-avx-asm.S -@@ -458,7 +458,7 @@ done_hash: - popq %r13 - popq %r12 - popq %rbx -- ret -+ RET - SYM_FUNC_END(sha256_transform_avx) - - .section .rodata.cst256.K256, "aM", @progbits, 256 -diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S -index 4087f7432a7e8..9bcdbc47b8b4b 100644 ---- a/arch/x86/crypto/sha256-avx2-asm.S -+++ b/arch/x86/crypto/sha256-avx2-asm.S -@@ -710,7 +710,7 @@ done_hash: - popq %r13 - popq %r12 - popq %rbx -- ret -+ RET - SYM_FUNC_END(sha256_transform_rorx) - - .section .rodata.cst512.K256, "aM", @progbits, 512 -diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S -index ddfa863b4ee33..c4a5db612c327 100644 ---- a/arch/x86/crypto/sha256-ssse3-asm.S -+++ b/arch/x86/crypto/sha256-ssse3-asm.S -@@ -472,7 +472,7 @@ done_hash: - popq %r12 - popq %rbx - -- ret -+ RET - SYM_FUNC_END(sha256_transform_ssse3) - - .section .rodata.cst256.K256, "aM", @progbits, 256 -diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S -index 7abade04a3a38..94d50dd27cb53 100644 ---- a/arch/x86/crypto/sha256_ni_asm.S -+++ b/arch/x86/crypto/sha256_ni_asm.S -@@ -326,7 +326,7 @@ SYM_FUNC_START(sha256_ni_transform) - - .Ldone_hash: - -- ret -+ RET - SYM_FUNC_END(sha256_ni_transform) - - .section .rodata.cst256.K256, "aM", @progbits, 256 -diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S -index 3d8f0fd4eea87..1fefe6dd3a9e2 100644 ---- a/arch/x86/crypto/sha512-avx-asm.S -+++ b/arch/x86/crypto/sha512-avx-asm.S -@@ -361,7 +361,7 @@ updateblock: - pop %rbx - - nowork: -- ret -+ RET - SYM_FUNC_END(sha512_transform_avx) - - ######################################################################## -diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S -index 072cb0f0deae3..5cdaab7d69015 100644 ---- a/arch/x86/crypto/sha512-avx2-asm.S -+++ b/arch/x86/crypto/sha512-avx2-asm.S -@@ -679,7 +679,7 @@ done_hash: - pop %r12 - pop %rbx - -- ret -+ RET - SYM_FUNC_END(sha512_transform_rorx) - - ######################################################################## -diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S -index bd51c9070bedc..b84c22e06c5f7 100644 ---- a/arch/x86/crypto/sha512-ssse3-asm.S -+++ b/arch/x86/crypto/sha512-ssse3-asm.S -@@ -363,7 +363,7 @@ updateblock: - pop %rbx - - nowork: -- ret -+ RET - SYM_FUNC_END(sha512_transform_ssse3) - - ######################################################################## -diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S -index 1cc72b4804fab..4767ab61ff489 100644 ---- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S -+++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S -@@ -246,7 +246,7 @@ SYM_FUNC_START(sm4_aesni_avx_crypt4) - .Lblk4_store_output_done: - vzeroall; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(sm4_aesni_avx_crypt4) - - .align 8 -@@ -356,7 +356,7 @@ SYM_FUNC_START_LOCAL(__sm4_crypt_blk8) - vpshufb RTMP2, RB3, RB3; - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(__sm4_crypt_blk8) - - /* -@@ -412,7 +412,7 @@ SYM_FUNC_START(sm4_aesni_avx_crypt8) - .Lblk8_store_output_done: - vzeroall; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(sm4_aesni_avx_crypt8) - - /* -@@ -487,7 +487,7 @@ SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) - - vzeroall; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8) - - /* -@@ -537,7 +537,7 @@ SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) - - vzeroall; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8) - - /* -@@ -590,5 +590,5 @@ SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) - - vzeroall; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8) -diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S -index 9c5d3f3ad45a9..4732fe8bb65b6 100644 ---- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S -+++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S -@@ -268,7 +268,7 @@ SYM_FUNC_START_LOCAL(__sm4_crypt_blk16) - vpshufb RTMP2, RB3, RB3; - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(__sm4_crypt_blk16) - - #define inc_le128(x, minus_one, tmp) \ -@@ -387,7 +387,7 @@ SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16) - - vzeroall; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16) - - /* -@@ -441,7 +441,7 @@ SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) - - vzeroall; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16) - - /* -@@ -497,5 +497,5 @@ SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) - - vzeroall; - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16) -diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S -index 37e63b3c664eb..31f9b2ec3857d 100644 ---- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S -+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S -@@ -267,7 +267,7 @@ SYM_FUNC_START_LOCAL(__twofish_enc_blk8) - outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); - outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); - -- ret; -+ RET; - SYM_FUNC_END(__twofish_enc_blk8) - - .align 8 -@@ -307,7 +307,7 @@ SYM_FUNC_START_LOCAL(__twofish_dec_blk8) - outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); - outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); - -- ret; -+ RET; - SYM_FUNC_END(__twofish_dec_blk8) - - SYM_FUNC_START(twofish_ecb_enc_8way) -@@ -327,7 +327,7 @@ SYM_FUNC_START(twofish_ecb_enc_8way) - store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(twofish_ecb_enc_8way) - - SYM_FUNC_START(twofish_ecb_dec_8way) -@@ -347,7 +347,7 @@ SYM_FUNC_START(twofish_ecb_dec_8way) - store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(twofish_ecb_dec_8way) - - SYM_FUNC_START(twofish_cbc_dec_8way) -@@ -372,5 +372,5 @@ SYM_FUNC_START(twofish_cbc_dec_8way) - popq %r12; - - FRAME_END -- ret; -+ RET; - SYM_FUNC_END(twofish_cbc_dec_8way) -diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S -index a6f09e4f2e463..3abcad6618840 100644 ---- a/arch/x86/crypto/twofish-i586-asm_32.S -+++ b/arch/x86/crypto/twofish-i586-asm_32.S -@@ -260,7 +260,7 @@ SYM_FUNC_START(twofish_enc_blk) - pop %ebx - pop %ebp - mov $1, %eax -- ret -+ RET - SYM_FUNC_END(twofish_enc_blk) - - SYM_FUNC_START(twofish_dec_blk) -@@ -317,5 +317,5 @@ SYM_FUNC_START(twofish_dec_blk) - pop %ebx - pop %ebp - mov $1, %eax -- ret -+ RET - SYM_FUNC_END(twofish_dec_blk) -diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S -index bca4cea757ce2..d2288bf38a8a5 100644 ---- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S -+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S -@@ -258,7 +258,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way) - popq %rbx; - popq %r12; - popq %r13; -- ret; -+ RET; - - .L__enc_xor3: - outunpack_enc3(xor); -@@ -266,7 +266,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way) - popq %rbx; - popq %r12; - popq %r13; -- ret; -+ RET; - SYM_FUNC_END(__twofish_enc_blk_3way) - - SYM_FUNC_START(twofish_dec_blk_3way) -@@ -301,5 +301,5 @@ SYM_FUNC_START(twofish_dec_blk_3way) - popq %rbx; - popq %r12; - popq %r13; -- ret; -+ RET; - SYM_FUNC_END(twofish_dec_blk_3way) -diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S -index d2e56232494a8..775af290cd196 100644 ---- a/arch/x86/crypto/twofish-x86_64-asm_64.S -+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S -@@ -252,7 +252,7 @@ SYM_FUNC_START(twofish_enc_blk) - - popq R1 - movl $1,%eax -- ret -+ RET - SYM_FUNC_END(twofish_enc_blk) - - SYM_FUNC_START(twofish_dec_blk) -@@ -304,5 +304,5 @@ SYM_FUNC_START(twofish_dec_blk) - - popq R1 - movl $1,%eax -- ret -+ RET - SYM_FUNC_END(twofish_dec_blk) -diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile -index 7fec5dcf64386..ca2fe186994b0 100644 ---- a/arch/x86/entry/Makefile -+++ b/arch/x86/entry/Makefile -@@ -11,12 +11,13 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) - - CFLAGS_common.o += -fno-stack-protector - --obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o -+obj-y := entry.o entry_$(BITS).o syscall_$(BITS).o - obj-y += common.o - - obj-y += vdso/ - obj-y += vsyscall/ - -+obj-$(CONFIG_PREEMPTION) += thunk_$(BITS).o - obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o - obj-$(CONFIG_X86_X32_ABI) += syscall_x32.o - -diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h -index a4c061fb7c6ea..b00a3a95fbfab 100644 ---- a/arch/x86/entry/calling.h -+++ b/arch/x86/entry/calling.h -@@ -7,6 +7,8 @@ - #include - #include - #include -+#include -+#include - - /* - -@@ -119,27 +121,19 @@ For 32-bit we have the following conventions - kernel is built with - CLEAR_REGS - .endm - --.macro POP_REGS pop_rdi=1 skip_r11rcx=0 -+.macro POP_REGS pop_rdi=1 - popq %r15 - popq %r14 - popq %r13 - popq %r12 - popq %rbp - popq %rbx -- .if \skip_r11rcx -- popq %rsi -- .else - popq %r11 -- .endif - popq %r10 - popq %r9 - popq %r8 - popq %rax -- .if \skip_r11rcx -- popq %rsi -- .else - popq %rcx -- .endif - popq %rdx - popq %rsi - .if \pop_rdi -@@ -289,6 +283,66 @@ For 32-bit we have the following conventions - kernel is built with - - #endif - -+/* -+ * IBRS kernel mitigation for Spectre_v2. -+ * -+ * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers -+ * the regs it uses (AX, CX, DX). Must be called before the first RET -+ * instruction (NOTE! UNTRAIN_RET includes a RET instruction) -+ * -+ * The optional argument is used to save/restore the current value, -+ * which is used on the paranoid paths. -+ * -+ * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. -+ */ -+.macro IBRS_ENTER save_reg -+#ifdef CONFIG_CPU_IBRS_ENTRY -+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS -+ movl $MSR_IA32_SPEC_CTRL, %ecx -+ -+.ifnb \save_reg -+ rdmsr -+ shl $32, %rdx -+ or %rdx, %rax -+ mov %rax, \save_reg -+ test $SPEC_CTRL_IBRS, %eax -+ jz .Ldo_wrmsr_\@ -+ lfence -+ jmp .Lend_\@ -+.Ldo_wrmsr_\@: -+.endif -+ -+ movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx -+ movl %edx, %eax -+ shr $32, %rdx -+ wrmsr -+.Lend_\@: -+#endif -+.endm -+ -+/* -+ * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) -+ * regs. Must be called after the last RET. -+ */ -+.macro IBRS_EXIT save_reg -+#ifdef CONFIG_CPU_IBRS_ENTRY -+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS -+ movl $MSR_IA32_SPEC_CTRL, %ecx -+ -+.ifnb \save_reg -+ mov \save_reg, %rdx -+.else -+ movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx -+ andl $(~SPEC_CTRL_IBRS), %edx -+.endif -+ -+ movl %edx, %eax -+ shr $32, %rdx -+ wrmsr -+.Lend_\@: -+#endif -+.endm -+ - /* - * Mitigate Spectre v1 for conditional swapgs code paths. - * -diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S -new file mode 100644 -index 0000000000000..bfb7bcb362bcf ---- /dev/null -+++ b/arch/x86/entry/entry.S -@@ -0,0 +1,22 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Common place for both 32- and 64-bit entry routines. -+ */ -+ -+#include -+#include -+#include -+ -+.pushsection .noinstr.text, "ax" -+ -+SYM_FUNC_START(entry_ibpb) -+ movl $MSR_IA32_PRED_CMD, %ecx -+ movl $PRED_CMD_IBPB, %eax -+ xorl %edx, %edx -+ wrmsr -+ RET -+SYM_FUNC_END(entry_ibpb) -+/* For KVM */ -+EXPORT_SYMBOL_GPL(entry_ibpb); -+ -+.popsection -diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S -index ccb9d32768f31..e309e71560389 100644 ---- a/arch/x86/entry/entry_32.S -+++ b/arch/x86/entry/entry_32.S -@@ -268,19 +268,16 @@ - 1: popl %ds - 2: popl %es - 3: popl %fs -- addl $(4 + \pop), %esp /* pop the unused "gs" slot */ -+4: addl $(4 + \pop), %esp /* pop the unused "gs" slot */ - IRET_FRAME --.pushsection .fixup, "ax" --4: movl $0, (%esp) -- jmp 1b --5: movl $0, (%esp) -- jmp 2b --6: movl $0, (%esp) -- jmp 3b --.popsection -- _ASM_EXTABLE(1b, 4b) -- _ASM_EXTABLE(2b, 5b) -- _ASM_EXTABLE(3b, 6b) -+ -+ /* -+ * There is no _ASM_EXTABLE_TYPE_REG() for ASM, however since this is -+ * ASM the registers are known and we can trivially hard-code them. -+ */ -+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_POP_ZERO|EX_REG_DS) -+ _ASM_EXTABLE_TYPE(2b, 3b, EX_TYPE_POP_ZERO|EX_REG_ES) -+ _ASM_EXTABLE_TYPE(3b, 4b, EX_TYPE_POP_ZERO|EX_REG_FS) - .endm - - .macro RESTORE_ALL_NMI cr3_reg:req pop=0 -@@ -701,7 +698,6 @@ SYM_CODE_START(__switch_to_asm) - movl %ebx, PER_CPU_VAR(__stack_chk_guard) - #endif - --#ifdef CONFIG_RETPOLINE - /* - * When switching from a shallower to a deeper call stack - * the RSB may either underflow or use entries populated -@@ -710,7 +706,6 @@ SYM_CODE_START(__switch_to_asm) - * speculative execution to prevent attack. - */ - FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW --#endif - - /* Restore flags or the incoming task to restore AC state. */ - popfl -@@ -740,7 +735,7 @@ SYM_FUNC_START(schedule_tail_wrapper) - popl %eax - - FRAME_END -- ret -+ RET - SYM_FUNC_END(schedule_tail_wrapper) - .popsection - -@@ -925,10 +920,8 @@ SYM_FUNC_START(entry_SYSENTER_32) - sti - sysexit - --.pushsection .fixup, "ax" --2: movl $0, PT_FS(%esp) -- jmp 1b --.popsection -+2: movl $0, PT_FS(%esp) -+ jmp 1b - _ASM_EXTABLE(1b, 2b) - - .Lsysenter_fix_flags: -@@ -996,8 +989,7 @@ restore_all_switch_stack: - */ - iret - --.section .fixup, "ax" --SYM_CODE_START(asm_iret_error) -+.Lasm_iret_error: - pushl $0 # no error code - pushl $iret_error - -@@ -1014,9 +1006,8 @@ SYM_CODE_START(asm_iret_error) - #endif - - jmp handle_exception --SYM_CODE_END(asm_iret_error) --.previous -- _ASM_EXTABLE(.Lirq_return, asm_iret_error) -+ -+ _ASM_EXTABLE(.Lirq_return, .Lasm_iret_error) - SYM_FUNC_END(entry_INT80_32) - - .macro FIXUP_ESPFIX_STACK -@@ -1248,14 +1239,14 @@ SYM_CODE_START(asm_exc_nmi) - SYM_CODE_END(asm_exc_nmi) - - .pushsection .text, "ax" --SYM_CODE_START(rewind_stack_do_exit) -+SYM_CODE_START(rewind_stack_and_make_dead) - /* Prevent any naive code from trying to unwind to our caller. */ - xorl %ebp, %ebp - - movl PER_CPU_VAR(cpu_current_top_of_stack), %esi - leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp - -- call do_exit -+ call make_task_dead - 1: jmp 1b --SYM_CODE_END(rewind_stack_do_exit) -+SYM_CODE_END(rewind_stack_and_make_dead) - .popsection -diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S -index e38a4cf795d96..9f1333a9ee41d 100644 ---- a/arch/x86/entry/entry_64.S -+++ b/arch/x86/entry/entry_64.S -@@ -85,7 +85,7 @@ - */ - - SYM_CODE_START(entry_SYSCALL_64) -- UNWIND_HINT_EMPTY -+ UNWIND_HINT_ENTRY - - swapgs - /* tss.sp2 is scratch space. */ -@@ -110,6 +110,11 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) - movq %rsp, %rdi - /* Sign extend the lower 32bit as syscall numbers are treated as int */ - movslq %eax, %rsi -+ -+ /* clobbers %rax, make sure it is after saving the syscall nr */ -+ IBRS_ENTER -+ UNTRAIN_RET -+ - call do_syscall_64 /* returns with IRQs disabled */ - - /* -@@ -189,8 +194,8 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) - * perf profiles. Nothing jumps here. - */ - syscall_return_via_sysret: -- /* rcx and r11 are already restored (see code above) */ -- POP_REGS pop_rdi=0 skip_r11rcx=1 -+ IBRS_EXIT -+ POP_REGS pop_rdi=0 - - /* - * Now all regs are restored except RSP and RDI. -@@ -243,7 +248,6 @@ SYM_FUNC_START(__switch_to_asm) - movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset - #endif - --#ifdef CONFIG_RETPOLINE - /* - * When switching from a shallower to a deeper call stack - * the RSB may either underflow or use entries populated -@@ -252,7 +256,6 @@ SYM_FUNC_START(__switch_to_asm) - * speculative execution to prevent attack. - */ - FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW --#endif - - /* restore callee-saved registers */ - popq %r15 -@@ -315,6 +318,14 @@ SYM_CODE_END(ret_from_fork) - #endif - .endm - -+SYM_CODE_START_LOCAL(xen_error_entry) -+ UNWIND_HINT_FUNC -+ PUSH_AND_CLEAR_REGS save_ret=1 -+ ENCODE_FRAME_POINTER 8 -+ UNTRAIN_RET -+ RET -+SYM_CODE_END(xen_error_entry) -+ - /** - * idtentry_body - Macro to emit code calling the C function - * @cfunc: C function to be called -@@ -322,7 +333,18 @@ SYM_CODE_END(ret_from_fork) - */ - .macro idtentry_body cfunc has_error_code:req - -- call error_entry -+ /* -+ * Call error_entry() and switch to the task stack if from userspace. -+ * -+ * When in XENPV, it is already in the task stack, and it can't fault -+ * for native_iret() nor native_load_gs_index() since XENPV uses its -+ * own pvops for IRET and load_gs_index(). And it doesn't need to -+ * switch the CR3. So it can skip invoking error_entry(). -+ */ -+ ALTERNATIVE "call error_entry; movq %rax, %rsp", \ -+ "call xen_error_entry", X86_FEATURE_XENPV -+ -+ ENCODE_FRAME_POINTER - UNWIND_HINT_REGS - - movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ -@@ -351,6 +373,7 @@ SYM_CODE_END(ret_from_fork) - SYM_CODE_START(\asmsym) - UNWIND_HINT_IRET_REGS offset=\has_error_code*8 - ASM_CLAC -+ cld - - .if \has_error_code == 0 - pushq $-1 /* ORIG_RAX: no syscall to restart */ -@@ -418,6 +441,7 @@ SYM_CODE_END(\asmsym) - SYM_CODE_START(\asmsym) - UNWIND_HINT_IRET_REGS - ASM_CLAC -+ cld - - pushq $-1 /* ORIG_RAX: no syscall to restart */ - -@@ -473,6 +497,7 @@ SYM_CODE_END(\asmsym) - SYM_CODE_START(\asmsym) - UNWIND_HINT_IRET_REGS - ASM_CLAC -+ cld - - /* - * If the entry is from userspace, switch stacks and treat it as -@@ -499,6 +524,7 @@ SYM_CODE_START(\asmsym) - call vc_switch_off_ist - movq %rax, %rsp /* Switch to new stack */ - -+ ENCODE_FRAME_POINTER - UNWIND_HINT_REGS - - /* Update pt_regs */ -@@ -534,6 +560,7 @@ SYM_CODE_END(\asmsym) - SYM_CODE_START(\asmsym) - UNWIND_HINT_IRET_REGS offset=8 - ASM_CLAC -+ cld - - /* paranoid_entry returns GS information for paranoid_exit in EBX. */ - call paranoid_entry -@@ -567,6 +594,7 @@ __irqentry_text_end: - - SYM_CODE_START_LOCAL(common_interrupt_return) - SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) -+ IBRS_EXIT - #ifdef CONFIG_DEBUG_ENTRY - /* Assert that pt_regs indicates user mode. */ - testb $3, CS(%rsp) -@@ -574,6 +602,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) - ud2 - 1: - #endif -+#ifdef CONFIG_XEN_PV -+ ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV -+#endif -+ - POP_REGS pop_rdi=0 - - /* -@@ -670,6 +702,7 @@ native_irq_return_ldt: - pushq %rdi /* Stash user RDI */ - swapgs /* to kernel GS */ - SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ -+ UNTRAIN_RET - - movq PER_CPU_VAR(espfix_waddr), %rdi - movq %rax, (0*8)(%rdi) /* user RAX */ -@@ -734,7 +767,7 @@ SYM_FUNC_START(asm_load_gs_index) - 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE - swapgs - FRAME_END -- ret -+ RET - SYM_FUNC_END(asm_load_gs_index) - EXPORT_SYMBOL(asm_load_gs_index) - -@@ -841,10 +874,12 @@ SYM_CODE_END(xen_failsafe_callback) - * 1 -> no SWAPGS on exit - * - * Y GSBASE value at entry, must be restored in paranoid_exit -+ * -+ * R14 - old CR3 -+ * R15 - old SPEC_CTRL - */ - SYM_CODE_START_LOCAL(paranoid_entry) - UNWIND_HINT_FUNC -- cld - PUSH_AND_CLEAR_REGS save_ret=1 - ENCODE_FRAME_POINTER 8 - -@@ -885,11 +920,12 @@ SYM_CODE_START_LOCAL(paranoid_entry) - * is needed here. - */ - SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx -- ret -+ jmp .Lparanoid_gsbase_done - - .Lparanoid_entry_checkgs: - /* EBX = 1 -> kernel GSBASE active, no restore required */ - movl $1, %ebx -+ - /* - * The kernel-enforced convention is a negative GSBASE indicates - * a kernel value. No SWAPGS needed on entry and exit. -@@ -897,22 +933,23 @@ SYM_CODE_START_LOCAL(paranoid_entry) - movl $MSR_GS_BASE, %ecx - rdmsr - testl %edx, %edx -- jns .Lparanoid_entry_swapgs -- ret -+ js .Lparanoid_kernel_gsbase - --.Lparanoid_entry_swapgs: -+ /* EBX = 0 -> SWAPGS required on exit */ -+ xorl %ebx, %ebx - swapgs -+.Lparanoid_kernel_gsbase: -+ FENCE_SWAPGS_KERNEL_ENTRY -+.Lparanoid_gsbase_done: - - /* -- * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an -- * unconditional CR3 write, even in the PTI case. So do an lfence -- * to prevent GS speculation, regardless of whether PTI is enabled. -+ * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like -+ * CR3 above, keep the old value in a callee saved register. - */ -- FENCE_SWAPGS_KERNEL_ENTRY -+ IBRS_ENTER save_reg=%r15 -+ UNTRAIN_RET - -- /* EBX = 0 -> SWAPGS required on exit */ -- xorl %ebx, %ebx -- ret -+ RET - SYM_CODE_END(paranoid_entry) - - /* -@@ -933,9 +970,19 @@ SYM_CODE_END(paranoid_entry) - * 1 -> no SWAPGS on exit - * - * Y User space GSBASE, must be restored unconditionally -+ * -+ * R14 - old CR3 -+ * R15 - old SPEC_CTRL - */ - SYM_CODE_START_LOCAL(paranoid_exit) - UNWIND_HINT_REGS -+ -+ /* -+ * Must restore IBRS state before both CR3 and %GS since we need access -+ * to the per-CPU x86_spec_ctrl_shadow variable. -+ */ -+ IBRS_EXIT save_reg=%r15 -+ - /* - * The order of operations is important. RESTORE_CR3 requires - * kernel GSBASE. -@@ -964,13 +1011,14 @@ SYM_CODE_START_LOCAL(paranoid_exit) - SYM_CODE_END(paranoid_exit) - - /* -- * Save all registers in pt_regs, and switch GS if needed. -+ * Switch GS and CR3 if needed. - */ - SYM_CODE_START_LOCAL(error_entry) - UNWIND_HINT_FUNC -- cld -+ - PUSH_AND_CLEAR_REGS save_ret=1 - ENCODE_FRAME_POINTER 8 -+ - testb $3, CS+8(%rsp) - jz .Lerror_kernelspace - -@@ -982,21 +1030,15 @@ SYM_CODE_START_LOCAL(error_entry) - FENCE_SWAPGS_USER_ENTRY - /* We have user CR3. Change to kernel CR3. */ - SWITCH_TO_KERNEL_CR3 scratch_reg=%rax -+ IBRS_ENTER -+ UNTRAIN_RET - -+ leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */ - .Lerror_entry_from_usermode_after_swapgs: -+ - /* Put us onto the real thread stack. */ -- popq %r12 /* save return addr in %12 */ -- movq %rsp, %rdi /* arg0 = pt_regs pointer */ - call sync_regs -- movq %rax, %rsp /* switch stack */ -- ENCODE_FRAME_POINTER -- pushq %r12 -- ret -- --.Lerror_entry_done_lfence: -- FENCE_SWAPGS_KERNEL_ENTRY --.Lerror_entry_done: -- ret -+ RET - - /* - * There are two places in the kernel that can potentially fault with -@@ -1020,8 +1062,16 @@ SYM_CODE_START_LOCAL(error_entry) - * .Lgs_change's error handler with kernel gsbase. - */ - SWAPGS -- FENCE_SWAPGS_USER_ENTRY -- jmp .Lerror_entry_done -+ -+ /* -+ * Issue an LFENCE to prevent GS speculation, regardless of whether it is a -+ * kernel or user gsbase. -+ */ -+.Lerror_entry_done_lfence: -+ FENCE_SWAPGS_KERNEL_ENTRY -+ leaq 8(%rsp), %rax /* return pt_regs pointer */ -+ ANNOTATE_UNRET_END -+ RET - - .Lbstep_iret: - /* Fix truncated RIP */ -@@ -1036,14 +1086,16 @@ SYM_CODE_START_LOCAL(error_entry) - SWAPGS - FENCE_SWAPGS_USER_ENTRY - SWITCH_TO_KERNEL_CR3 scratch_reg=%rax -+ IBRS_ENTER -+ UNTRAIN_RET - - /* - * Pretend that the exception came from user mode: set up pt_regs - * as if we faulted immediately after IRET. - */ -- mov %rsp, %rdi -+ leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */ - call fixup_bad_iret -- mov %rax, %rsp -+ mov %rax, %rdi - jmp .Lerror_entry_from_usermode_after_swapgs - SYM_CODE_END(error_entry) - -@@ -1105,6 +1157,7 @@ SYM_CODE_START(asm_exc_nmi) - */ - - ASM_CLAC -+ cld - - /* Use %rdx as our temp variable throughout */ - pushq %rdx -@@ -1124,7 +1177,6 @@ SYM_CODE_START(asm_exc_nmi) - */ - - swapgs -- cld - FENCE_SWAPGS_USER_ENTRY - SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx - movq %rsp, %rdx -@@ -1140,6 +1192,9 @@ SYM_CODE_START(asm_exc_nmi) - PUSH_AND_CLEAR_REGS rdx=(%rdx) - ENCODE_FRAME_POINTER - -+ IBRS_ENTER -+ UNTRAIN_RET -+ - /* - * At this point we no longer need to worry about stack damage - * due to nesting -- we're on the normal thread stack and we're -@@ -1362,6 +1417,9 @@ end_repeat_nmi: - movq $-1, %rsi - call exc_nmi - -+ /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */ -+ IBRS_EXIT save_reg=%r15 -+ - /* Always restore stashed CR3 value (see paranoid_entry) */ - RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 - -@@ -1429,7 +1487,7 @@ SYM_CODE_END(ignore_sysret) - #endif - - .pushsection .text, "ax" --SYM_CODE_START(rewind_stack_do_exit) -+SYM_CODE_START(rewind_stack_and_make_dead) - UNWIND_HINT_FUNC - /* Prevent any naive code from trying to unwind to our caller. */ - xorl %ebp, %ebp -@@ -1438,6 +1496,6 @@ SYM_CODE_START(rewind_stack_do_exit) - leaq -PTREGS_SIZE(%rax), %rsp - UNWIND_HINT_REGS - -- call do_exit --SYM_CODE_END(rewind_stack_do_exit) -+ call make_task_dead -+SYM_CODE_END(rewind_stack_and_make_dead) - .popsection -diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S -index 0051cf5c792d1..4d637a965efbe 100644 ---- a/arch/x86/entry/entry_64_compat.S -+++ b/arch/x86/entry/entry_64_compat.S -@@ -4,7 +4,6 @@ - * - * Copyright 2000-2002 Andi Kleen, SuSE Labs. - */ --#include "calling.h" - #include - #include - #include -@@ -14,9 +13,12 @@ - #include - #include - #include -+#include - #include - #include - -+#include "calling.h" -+ - .section .entry.text, "ax" - - /* -@@ -47,7 +49,7 @@ - * 0(%ebp) arg6 - */ - SYM_CODE_START(entry_SYSENTER_compat) -- UNWIND_HINT_EMPTY -+ UNWIND_HINT_ENTRY - /* Interrupts are off on entry. */ - SWAPGS - -@@ -112,6 +114,9 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) - - cld - -+ IBRS_ENTER -+ UNTRAIN_RET -+ - /* - * SYSENTER doesn't filter flags, so we need to clear NT and AC - * ourselves. To save a few cycles, we can check whether -@@ -197,7 +202,7 @@ SYM_CODE_END(entry_SYSENTER_compat) - * 0(%esp) arg6 - */ - SYM_CODE_START(entry_SYSCALL_compat) -- UNWIND_HINT_EMPTY -+ UNWIND_HINT_ENTRY - /* Interrupts are off on entry. */ - swapgs - -@@ -252,6 +257,9 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL) - - UNWIND_HINT_REGS - -+ IBRS_ENTER -+ UNTRAIN_RET -+ - movq %rsp, %rdi - call do_fast_syscall_32 - /* XEN PV guests always use IRET path */ -@@ -266,6 +274,8 @@ sysret32_from_system_call: - */ - STACKLEAK_ERASE - -+ IBRS_EXIT -+ - movq RBX(%rsp), %rbx /* pt_regs->rbx */ - movq RBP(%rsp), %rbp /* pt_regs->rbp */ - movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ -@@ -339,7 +349,7 @@ SYM_CODE_END(entry_SYSCALL_compat) - * ebp arg6 - */ - SYM_CODE_START(entry_INT80_compat) -- UNWIND_HINT_EMPTY -+ UNWIND_HINT_ENTRY - /* - * Interrupts are off on entry. - */ -@@ -409,6 +419,9 @@ SYM_CODE_START(entry_INT80_compat) - - cld - -+ IBRS_ENTER -+ UNTRAIN_RET -+ - movq %rsp, %rdi - call do_int80_syscall_32 - jmp swapgs_restore_regs_and_return_to_usermode -diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S -index f1f96d4d8cd60..ff6e7003da974 100644 ---- a/arch/x86/entry/thunk_32.S -+++ b/arch/x86/entry/thunk_32.S -@@ -24,15 +24,13 @@ SYM_CODE_START_NOALIGN(\name) - popl %edx - popl %ecx - popl %eax -- ret -+ RET - _ASM_NOKPROBE(\name) - SYM_CODE_END(\name) - .endm - --#ifdef CONFIG_PREEMPTION - THUNK preempt_schedule_thunk, preempt_schedule - THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace - EXPORT_SYMBOL(preempt_schedule_thunk) - EXPORT_SYMBOL(preempt_schedule_notrace_thunk) --#endif - -diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S -index 496b11ec469de..f38b07d2768bb 100644 ---- a/arch/x86/entry/thunk_64.S -+++ b/arch/x86/entry/thunk_64.S -@@ -31,14 +31,11 @@ SYM_FUNC_END(\name) - _ASM_NOKPROBE(\name) - .endm - --#ifdef CONFIG_PREEMPTION - THUNK preempt_schedule_thunk, preempt_schedule - THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace - EXPORT_SYMBOL(preempt_schedule_thunk) - EXPORT_SYMBOL(preempt_schedule_notrace_thunk) --#endif - --#ifdef CONFIG_PREEMPTION - SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore) - popq %r11 - popq %r10 -@@ -50,7 +47,6 @@ SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore) - popq %rsi - popq %rdi - popq %rbp -- ret -+ RET - _ASM_NOKPROBE(__thunk_restore) - SYM_CODE_END(__thunk_restore) --#endif -diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile -index a2dddcc189f69..c277c63195ce8 100644 ---- a/arch/x86/entry/vdso/Makefile -+++ b/arch/x86/entry/vdso/Makefile -@@ -92,6 +92,7 @@ endif - endif - - $(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) -+$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO - - # - # vDSO code runs in userspace and -pg doesn't help with profiling anyway. -@@ -178,7 +179,7 @@ quiet_cmd_vdso = VDSO $@ - sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' - - VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 \ -- $(call ld-option, --eh-frame-hdr) -Bsymbolic -+ $(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack - GCOV_PROFILE := n - - quiet_cmd_vdso_and_check = VDSO $@ -diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S -index 4bf48462fca7a..e8c60ae7a7c83 100644 ---- a/arch/x86/entry/vdso/vdso.lds.S -+++ b/arch/x86/entry/vdso/vdso.lds.S -@@ -27,7 +27,9 @@ VERSION { - __vdso_time; - clock_getres; - __vdso_clock_getres; -+#ifdef CONFIG_X86_SGX - __vdso_sgx_enter_enclave; -+#endif - local: *; - }; - } -diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S -index 6ddd7a937b3e3..d33c6513fd2cb 100644 ---- a/arch/x86/entry/vdso/vdso32/system_call.S -+++ b/arch/x86/entry/vdso/vdso32/system_call.S -@@ -78,7 +78,7 @@ SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL) - popl %ecx - CFI_RESTORE ecx - CFI_ADJUST_CFA_OFFSET -4 -- ret -+ RET - CFI_ENDPROC - - .size __kernel_vsyscall,.-__kernel_vsyscall -diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c -index 235a5794296ac..a380f7ecdd544 100644 ---- a/arch/x86/entry/vdso/vma.c -+++ b/arch/x86/entry/vdso/vma.c -@@ -322,8 +322,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) - - /* Round the lowest possible end address up to a PMD boundary. */ - end = (start + len + PMD_SIZE - 1) & PMD_MASK; -- if (end >= TASK_SIZE_MAX) -- end = TASK_SIZE_MAX; -+ if (end >= DEFAULT_MAP_WINDOW) -+ end = DEFAULT_MAP_WINDOW; - end -= len; - - if (end > start) { -@@ -438,7 +438,7 @@ bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) - static __init int vdso_setup(char *s) - { - vdso64_enabled = simple_strtoul(s, NULL, 0); -- return 0; -+ return 1; - } - __setup("vdso=", vdso_setup); - -diff --git a/arch/x86/entry/vdso/vsgx.S b/arch/x86/entry/vdso/vsgx.S -index 99dafac992e2c..d77d278ee9dd6 100644 ---- a/arch/x86/entry/vdso/vsgx.S -+++ b/arch/x86/entry/vdso/vsgx.S -@@ -81,7 +81,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave) - pop %rbx - leave - .cfi_def_cfa %rsp, 8 -- ret -+ RET - - /* The out-of-line code runs with the pre-leave stack frame. */ - .cfi_def_cfa %rbp, 16 -diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c -index 1b40b92970831..fd2ee9408e914 100644 ---- a/arch/x86/entry/vsyscall/vsyscall_64.c -+++ b/arch/x86/entry/vsyscall/vsyscall_64.c -@@ -226,7 +226,8 @@ bool emulate_vsyscall(unsigned long error_code, - if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { - warn_bad_vsyscall(KERN_DEBUG, regs, - "seccomp tried to change syscall nr or ip"); -- do_exit(SIGSYS); -+ force_exit_sig(SIGSYS); -+ return true; - } - regs->orig_ax = -1; - if (tmp) -diff --git a/arch/x86/entry/vsyscall/vsyscall_emu_64.S b/arch/x86/entry/vsyscall/vsyscall_emu_64.S -index 2e203f3a25a7b..ef2dd18272431 100644 ---- a/arch/x86/entry/vsyscall/vsyscall_emu_64.S -+++ b/arch/x86/entry/vsyscall/vsyscall_emu_64.S -@@ -20,16 +20,19 @@ __vsyscall_page: - mov $__NR_gettimeofday, %rax - syscall - ret -+ int3 - - .balign 1024, 0xcc - mov $__NR_time, %rax - syscall - ret -+ int3 - - .balign 1024, 0xcc - mov $__NR_getcpu, %rax - syscall - ret -+ int3 - - .balign 4096, 0xcc - -diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c -index 9687a8aef01c5..4ebedc7e1188b 100644 ---- a/arch/x86/events/amd/core.c -+++ b/arch/x86/events/amd/core.c -@@ -364,7 +364,7 @@ static int amd_pmu_hw_config(struct perf_event *event) - - /* pass precise event sampling to ibs: */ - if (event->attr.precise_ip && get_ibs_caps()) -- return -ENOENT; -+ return forward_event_to_ibs(event); - - if (has_branch_stack(event)) - return -EOPNOTSUPP; -@@ -976,7 +976,7 @@ static int __init amd_core_pmu_init(void) - * numbered counter following it. - */ - for (i = 0; i < x86_pmu.num_counters - 1; i += 2) -- even_ctr_mask |= 1 << i; -+ even_ctr_mask |= BIT_ULL(i); - - pair_constraint = (struct event_constraint) - __EVENT_CONSTRAINT(0, even_ctr_mask, 0, -diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c -index 9739019d4b67a..b605e08f9a8ef 100644 ---- a/arch/x86/events/amd/ibs.c -+++ b/arch/x86/events/amd/ibs.c -@@ -194,7 +194,7 @@ static struct perf_ibs *get_ibs_pmu(int type) - } - - /* -- * Use IBS for precise event sampling: -+ * core pmu config -> IBS config - * - * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count - * perf record -a -e r076:p ... # same as -e cpu-cycles:p -@@ -203,25 +203,9 @@ static struct perf_ibs *get_ibs_pmu(int type) - * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl, - * MSRC001_1033) is used to select either cycle or micro-ops counting - * mode. -- * -- * The rip of IBS samples has skid 0. Thus, IBS supports precise -- * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the -- * rip is invalid when IBS was not able to record the rip correctly. -- * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then. -- * - */ --static int perf_ibs_precise_event(struct perf_event *event, u64 *config) -+static int core_pmu_ibs_config(struct perf_event *event, u64 *config) - { -- switch (event->attr.precise_ip) { -- case 0: -- return -ENOENT; -- case 1: -- case 2: -- break; -- default: -- return -EOPNOTSUPP; -- } -- - switch (event->attr.type) { - case PERF_TYPE_HARDWARE: - switch (event->attr.config) { -@@ -247,22 +231,37 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config) - return -EOPNOTSUPP; - } - -+/* -+ * The rip of IBS samples has skid 0. Thus, IBS supports precise -+ * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the -+ * rip is invalid when IBS was not able to record the rip correctly. -+ * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then. -+ */ -+int forward_event_to_ibs(struct perf_event *event) -+{ -+ u64 config = 0; -+ -+ if (!event->attr.precise_ip || event->attr.precise_ip > 2) -+ return -EOPNOTSUPP; -+ -+ if (!core_pmu_ibs_config(event, &config)) { -+ event->attr.type = perf_ibs_op.pmu.type; -+ event->attr.config = config; -+ } -+ return -ENOENT; -+} -+ - static int perf_ibs_init(struct perf_event *event) - { - struct hw_perf_event *hwc = &event->hw; - struct perf_ibs *perf_ibs; - u64 max_cnt, config; -- int ret; - - perf_ibs = get_ibs_pmu(event->attr.type); -- if (perf_ibs) { -- config = event->attr.config; -- } else { -- perf_ibs = &perf_ibs_op; -- ret = perf_ibs_precise_event(event, &config); -- if (ret) -- return ret; -- } -+ if (!perf_ibs) -+ return -ENOENT; -+ -+ config = event->attr.config; - - if (event->pmu != &perf_ibs->pmu) - return -ENOENT; -@@ -304,6 +303,16 @@ static int perf_ibs_init(struct perf_event *event) - hwc->config_base = perf_ibs->msr; - hwc->config = config; - -+ /* -+ * rip recorded by IbsOpRip will not be consistent with rsp and rbp -+ * recorded as part of interrupt regs. Thus we need to use rip from -+ * interrupt regs while unwinding call stack. Setting _EARLY flag -+ * makes sure we unwind call-stack before perf sample rip is set to -+ * IbsOpRip. -+ */ -+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) -+ event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY; -+ - return 0; - } - -@@ -687,6 +696,14 @@ fail: - data.raw = &raw; - } - -+ /* -+ * rip recorded by IbsOpRip will not be consistent with rsp and rbp -+ * recorded as part of interrupt regs. Thus we need to use rip from -+ * interrupt regs while unwinding call stack. -+ */ -+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) -+ data.callchain = perf_callchain(event, iregs); -+ - throttle = perf_event_overflow(event, &data, ®s); - out: - if (throttle) { -@@ -759,9 +776,10 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name) - return ret; - } - --static __init void perf_event_ibs_init(void) -+static __init int perf_event_ibs_init(void) - { - struct attribute **attr = ibs_op_format_attrs; -+ int ret; - - /* - * Some chips fail to reset the fetch count when it is written; instead -@@ -773,7 +791,9 @@ static __init void perf_event_ibs_init(void) - if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10) - perf_ibs_fetch.fetch_ignore_if_zero_rip = 1; - -- perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); -+ ret = perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); -+ if (ret) -+ return ret; - - if (ibs_caps & IBS_CAPS_OPCNT) { - perf_ibs_op.config_mask |= IBS_OP_CNT_CTL; -@@ -786,15 +806,35 @@ static __init void perf_event_ibs_init(void) - perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK; - } - -- perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); -+ ret = perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); -+ if (ret) -+ goto err_op; -+ -+ ret = register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); -+ if (ret) -+ goto err_nmi; - -- register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); - pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps); -+ return 0; -+ -+err_nmi: -+ perf_pmu_unregister(&perf_ibs_op.pmu); -+ free_percpu(perf_ibs_op.pcpu); -+ perf_ibs_op.pcpu = NULL; -+err_op: -+ perf_pmu_unregister(&perf_ibs_fetch.pmu); -+ free_percpu(perf_ibs_fetch.pcpu); -+ perf_ibs_fetch.pcpu = NULL; -+ -+ return ret; - } - - #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */ - --static __init void perf_event_ibs_init(void) { } -+static __init int perf_event_ibs_init(void) -+{ -+ return 0; -+} - - #endif - -@@ -1064,9 +1104,7 @@ static __init int amd_ibs_init(void) - x86_pmu_amd_ibs_starting_cpu, - x86_pmu_amd_ibs_dying_cpu); - -- perf_event_ibs_init(); -- -- return 0; -+ return perf_event_ibs_init(); - } - - /* Since we need the pci subsystem to init ibs we can't do this earlier: */ -diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c -index 6dfa8ddaa60f7..81d5e0a1f48cd 100644 ---- a/arch/x86/events/core.c -+++ b/arch/x86/events/core.c -@@ -2762,10 +2762,11 @@ static bool perf_hw_regs(struct pt_regs *regs) - void - perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - struct unwind_state state; - unsigned long addr; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ if (guest_cbs && guest_cbs->is_in_guest()) { - /* TODO: We don't support guest os callchain now */ - return; - } -@@ -2865,10 +2866,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent - void - perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - struct stack_frame frame; - const struct stack_frame __user *fp; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -+ if (guest_cbs && guest_cbs->is_in_guest()) { - /* TODO: We don't support guest os callchain now */ - return; - } -@@ -2945,18 +2947,21 @@ static unsigned long code_segment_base(struct pt_regs *regs) - - unsigned long perf_instruction_pointer(struct pt_regs *regs) - { -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) -- return perf_guest_cbs->get_guest_ip(); -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); -+ -+ if (guest_cbs && guest_cbs->is_in_guest()) -+ return guest_cbs->get_guest_ip(); - - return regs->ip + code_segment_base(regs); - } - - unsigned long perf_misc_flags(struct pt_regs *regs) - { -+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); - int misc = 0; - -- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { -- if (perf_guest_cbs->is_user_mode()) -+ if (guest_cbs && guest_cbs->is_in_guest()) { -+ if (guest_cbs->is_user_mode()) - misc |= PERF_RECORD_MISC_GUEST_USER; - else - misc |= PERF_RECORD_MISC_GUEST_KERNEL; -diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c -index 9a044438072ba..b70e1522a27ac 100644 ---- a/arch/x86/events/intel/core.c -+++ b/arch/x86/events/intel/core.c -@@ -243,7 +243,8 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { - - static struct event_constraint intel_icl_event_constraints[] = { - FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ -- FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */ -+ FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */ -+ FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ - FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ - FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ -@@ -254,7 +255,7 @@ static struct event_constraint intel_icl_event_constraints[] = { - INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), - INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), - INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ -- INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf), -+ INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf), - INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), - INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ - INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */ -@@ -280,7 +281,7 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = { - INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), - INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), - INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), -- INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), -+ INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), - INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE), - INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), - EVENT_EXTRA_END -@@ -288,7 +289,7 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = { - - static struct event_constraint intel_spr_event_constraints[] = { - FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ -- FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */ -+ FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ - FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ - FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ -@@ -2787,6 +2788,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) - { - struct perf_sample_data data; - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); -+ struct perf_guest_info_callbacks *guest_cbs; - int bit; - int handled = 0; - u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); -@@ -2853,9 +2855,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) - */ - if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { - handled++; -- if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() && -- perf_guest_cbs->handle_intel_pt_intr)) -- perf_guest_cbs->handle_intel_pt_intr(); -+ -+ guest_cbs = perf_get_guest_cbs(); -+ if (unlikely(guest_cbs && guest_cbs->is_in_guest() && -+ guest_cbs->handle_intel_pt_intr)) -+ guest_cbs->handle_intel_pt_intr(); - else - intel_pt_interrupt(); - } -@@ -2998,8 +3002,10 @@ intel_vlbr_constraints(struct perf_event *event) - { - struct event_constraint *c = &vlbr_constraint; - -- if (unlikely(constraint_match(c, event->hw.config))) -+ if (unlikely(constraint_match(c, event->hw.config))) { -+ event->hw.flags |= c->flags; - return c; -+ } - - return NULL; - } -@@ -4648,6 +4654,19 @@ static __initconst const struct x86_pmu intel_pmu = { - .lbr_read = intel_pmu_lbr_read_64, - .lbr_save = intel_pmu_lbr_save, - .lbr_restore = intel_pmu_lbr_restore, -+ -+ /* -+ * SMM has access to all 4 rings and while traditionally SMM code only -+ * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM. -+ * -+ * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction -+ * between SMM or not, this results in what should be pure userspace -+ * counters including SMM data. -+ * -+ * This is a clear privilege issue, therefore globally disable -+ * counting SMM by default. -+ */ -+ .attr_freeze_on_smi = 1, - }; - - static __init void intel_clovertown_quirk(void) -@@ -4694,6 +4713,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = { - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000), - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000), -+ INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000), - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), - INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), - INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e), -@@ -5447,7 +5467,11 @@ static void intel_pmu_check_event_constraints(struct event_constraint *event_con - /* Disabled fixed counters which are not in CPUID */ - c->idxmsk64 &= intel_ctrl; - -- if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) -+ /* -+ * Don't extend the pseudo-encoding to the -+ * generic counters -+ */ -+ if (!use_fixed_pseudo_encoding(c->code)) - c->idxmsk64 |= (1ULL << num_counters) - 1; - } - c->idxmsk64 &= -@@ -6085,6 +6109,7 @@ __init int intel_pmu_init(void) - break; - - case INTEL_FAM6_SAPPHIRERAPIDS_X: -+ case INTEL_FAM6_EMERALDRAPIDS_X: - pmem = true; - x86_pmu.late_ack = true; - memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids)); -@@ -6181,6 +6206,19 @@ __init int intel_pmu_init(void) - pmu->num_counters = x86_pmu.num_counters; - pmu->num_counters_fixed = x86_pmu.num_counters_fixed; - } -+ -+ /* -+ * Quirk: For some Alder Lake machine, when all E-cores are disabled in -+ * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However, -+ * the X86_FEATURE_HYBRID_CPU is still set. The above codes will -+ * mistakenly add extra counters for P-cores. Correct the number of -+ * counters here. -+ */ -+ if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) { -+ pmu->num_counters = x86_pmu.num_counters; -+ pmu->num_counters_fixed = x86_pmu.num_counters_fixed; -+ } -+ - pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); - pmu->unconstrained = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, -diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c -index 8647713276a73..21a9cb48daf5d 100644 ---- a/arch/x86/events/intel/ds.c -+++ b/arch/x86/events/intel/ds.c -@@ -236,6 +236,7 @@ static u64 load_latency_data(u64 status) - static u64 store_latency_data(u64 status) - { - union intel_x86_pebs_dse dse; -+ union perf_mem_data_src src; - u64 val; - - dse.val = status; -@@ -263,7 +264,14 @@ static u64 store_latency_data(u64 status) - - val |= P(BLK, NA); - -- return val; -+ /* -+ * the pebs_data_source table is only for loads -+ * so override the mem_op to say STORE instead -+ */ -+ src.val = val; -+ src.mem_op = P(OP,STORE); -+ -+ return src.val; - } - - struct pebs_record_core { -@@ -923,12 +931,18 @@ struct event_constraint intel_skl_pebs_event_constraints[] = { - }; - - struct event_constraint intel_icl_pebs_event_constraints[] = { -- INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x100000000ULL), /* old INST_RETIRED.PREC_DIST */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ - INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */ - - INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ -- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */ -- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ - - INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */ - -@@ -943,14 +957,19 @@ struct event_constraint intel_icl_pebs_event_constraints[] = { - }; - - struct event_constraint intel_spr_pebs_event_constraints[] = { -- INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), -+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ - INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), - - INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe), - INTEL_PLD_CONSTRAINT(0x1cd, 0xfe), - INTEL_PSD_CONSTRAINT(0x2cd, 0x1), -- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), -- INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ -+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ - - INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), - -diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c -index 9e6d6eaeb4cb6..b3f92255cbd2d 100644 ---- a/arch/x86/events/intel/lbr.c -+++ b/arch/x86/events/intel/lbr.c -@@ -1114,6 +1114,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) - - if (static_cpu_has(X86_FEATURE_ARCH_LBR)) { - reg->config = mask; -+ -+ /* -+ * The Arch LBR HW can retrieve the common branch types -+ * from the LBR_INFO. It doesn't require the high overhead -+ * SW disassemble. -+ * Enable the branch type by default for the Arch LBR. -+ */ -+ reg->reg |= X86_BR_TYPE_SAVE; - return 0; - } - -@@ -1734,6 +1742,9 @@ static bool is_arch_lbr_xsave_available(void) - * Check the LBR state with the corresponding software structure. - * Disable LBR XSAVES support if the size doesn't match. - */ -+ if (xfeature_size(XFEATURE_LBR) == 0) -+ return false; -+ - if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size())) - return false; - -@@ -1836,7 +1847,7 @@ void __init intel_pmu_arch_lbr_init(void) - return; - - clear_arch_lbr: -- clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR); -+ setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR); - } - - /** -diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c -index 7f406c14715fd..d0295240c78a8 100644 ---- a/arch/x86/events/intel/pt.c -+++ b/arch/x86/events/intel/pt.c -@@ -13,6 +13,8 @@ - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - - #include -+#include -+#include - #include - #include - -@@ -472,7 +474,7 @@ static u64 pt_config_filters(struct perf_event *event) - pt->filters.filter[range].msr_b = filter->msr_b; - } - -- rtit_ctl |= filter->config << pt_address_ranges[range].reg_off; -+ rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off; - } - - return rtit_ctl; -@@ -897,8 +899,9 @@ static void pt_handle_status(struct pt *pt) - * means we are already losing data; need to let the decoder - * know. - */ -- if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || -- buf->output_off == pt_buffer_region_size(buf)) { -+ if (!buf->single && -+ (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || -+ buf->output_off == pt_buffer_region_size(buf))) { - perf_aux_output_flag(&pt->handle, - PERF_AUX_FLAG_TRUNCATED); - advance++; -@@ -1244,6 +1247,15 @@ static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages) - if (1 << order != nr_pages) - goto out; - -+ /* -+ * Some processors cannot always support single range for more than -+ * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might -+ * also be affected, so for now rather than trying to keep track of -+ * which ones, just disable it for all. -+ */ -+ if (nr_pages > 1) -+ goto out; -+ - buf->single = true; - buf->nr_pages = nr_pages; - ret = 0; -@@ -1347,10 +1359,36 @@ static void pt_addr_filters_fini(struct perf_event *event) - event->hw.addr_filters = NULL; - } - --static inline bool valid_kernel_ip(unsigned long ip) -+#ifdef CONFIG_X86_64 -+static u64 canonical_address(u64 vaddr, u8 vaddr_bits) -+{ -+ return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); -+} -+ -+static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits) -+{ -+ return canonical_address(vaddr, vaddr_bits) == vaddr; -+} -+ -+/* Clamp to a canonical address greater-than-or-equal-to the address given */ -+static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits) -+{ -+ return is_canonical_address(vaddr, vaddr_bits) ? -+ vaddr : -+ -BIT_ULL(vaddr_bits - 1); -+} -+ -+/* Clamp to a canonical address less-than-or-equal-to the address given */ -+static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits) - { -- return virt_addr_valid(ip) && kernel_ip(ip); -+ return is_canonical_address(vaddr, vaddr_bits) ? -+ vaddr : -+ BIT_ULL(vaddr_bits - 1) - 1; - } -+#else -+#define clamp_to_ge_canonical_addr(x, y) (x) -+#define clamp_to_le_canonical_addr(x, y) (x) -+#endif - - static int pt_event_addr_filters_validate(struct list_head *filters) - { -@@ -1366,14 +1404,6 @@ static int pt_event_addr_filters_validate(struct list_head *filters) - filter->action == PERF_ADDR_FILTER_ACTION_START) - return -EOPNOTSUPP; - -- if (!filter->path.dentry) { -- if (!valid_kernel_ip(filter->offset)) -- return -EINVAL; -- -- if (!valid_kernel_ip(filter->offset + filter->size)) -- return -EINVAL; -- } -- - if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges)) - return -EOPNOTSUPP; - } -@@ -1397,9 +1427,26 @@ static void pt_event_addr_filters_sync(struct perf_event *event) - if (filter->path.dentry && !fr[range].start) { - msr_a = msr_b = 0; - } else { -- /* apply the offset */ -- msr_a = fr[range].start; -- msr_b = msr_a + fr[range].size - 1; -+ unsigned long n = fr[range].size - 1; -+ unsigned long a = fr[range].start; -+ unsigned long b; -+ -+ if (a > ULONG_MAX - n) -+ b = ULONG_MAX; -+ else -+ b = a + n; -+ /* -+ * Apply the offset. 64-bit addresses written to the -+ * MSRs must be canonical, but the range can encompass -+ * non-canonical addresses. Since software cannot -+ * execute at non-canonical addresses, adjusting to -+ * canonical addresses does not affect the result of the -+ * address filter. -+ */ -+ msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits); -+ msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits); -+ if (msr_b < msr_a) -+ msr_a = msr_b = 0; - } - - filters->filter[range].msr_a = msr_a; -diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c -index c72e368dd1641..7e16c590f2593 100644 ---- a/arch/x86/events/intel/uncore.c -+++ b/arch/x86/events/intel/uncore.c -@@ -1829,6 +1829,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init), -+ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), - {}, - }; -diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h -index b9687980aab6d..d6f7c6c1a930a 100644 ---- a/arch/x86/events/intel/uncore.h -+++ b/arch/x86/events/intel/uncore.h -@@ -2,6 +2,7 @@ - #include - #include - #include -+#include - #include - - #include -diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h -index 7280c8a3c8310..6d735611c281c 100644 ---- a/arch/x86/events/intel/uncore_discovery.h -+++ b/arch/x86/events/intel/uncore_discovery.h -@@ -30,7 +30,7 @@ - - - #define uncore_discovery_invalid_unit(unit) \ -- (!unit.table1 || !unit.ctl || !unit.table3 || \ -+ (!unit.table1 || !unit.ctl || \ - unit.table1 == -1ULL || unit.ctl == -1ULL || \ - unit.table3 == -1ULL) - -diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c -index 0f63706cdadfc..912fb3821a6bb 100644 ---- a/arch/x86/events/intel/uncore_snb.c -+++ b/arch/x86/events/intel/uncore_snb.c -@@ -788,6 +788,22 @@ int snb_pci2phy_map_init(int devid) - return 0; - } - -+static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) -+{ -+ struct hw_perf_event *hwc = &event->hw; -+ -+ /* -+ * SNB IMC counters are 32-bit and are laid out back to back -+ * in MMIO space. Therefore we must use a 32-bit accessor function -+ * using readq() from uncore_mmio_read_counter() causes problems -+ * because it is reading 64-bit at a time. This is okay for the -+ * uncore_perf_event_update() function because it drops the upper -+ * 32-bits but not okay for plain uncore_read_counter() as invoked -+ * in uncore_pmu_event_start(). -+ */ -+ return (u64)readl(box->io_addr + hwc->event_base); -+} -+ - static struct pmu snb_uncore_imc_pmu = { - .task_ctx_nr = perf_invalid_context, - .event_init = snb_uncore_imc_event_init, -@@ -807,7 +823,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = { - .disable_event = snb_uncore_imc_disable_event, - .enable_event = snb_uncore_imc_enable_event, - .hw_config = snb_uncore_imc_hw_config, -- .read_counter = uncore_mmio_read_counter, -+ .read_counter = snb_uncore_imc_read_counter, - }; - - static struct intel_uncore_type snb_uncore_imc = { -@@ -1407,6 +1423,7 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) - /* MCHBAR is disabled */ - if (!(mch_bar & BIT(0))) { - pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n"); -+ pci_dev_put(pdev); - return; - } - mch_bar &= ~BIT(0); -@@ -1420,6 +1437,8 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) - box->io_addr = ioremap(addr, type->mmio_map_size); - if (!box->io_addr) - pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); -+ -+ pci_dev_put(pdev); - } - - static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = { -diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c -index 5ddc0f30db6fc..9b5859812f4fb 100644 ---- a/arch/x86/events/intel/uncore_snbep.c -+++ b/arch/x86/events/intel/uncore_snbep.c -@@ -452,7 +452,7 @@ - #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0 - - /* ICX IMC */ --#define ICX_NUMBER_IMC_CHN 2 -+#define ICX_NUMBER_IMC_CHN 3 - #define ICX_IMC_MEM_STRIDE 0x4 - - /* SPR */ -@@ -2891,6 +2891,7 @@ static bool hswep_has_limit_sbox(unsigned int device) - return false; - - pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); -+ pci_dev_put(dev); - if (!hswep_get_chop(capid4)) - return true; - -@@ -3608,6 +3609,9 @@ static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct extra_reg *er; - int idx = 0; -+ /* Any of the CHA events may be filtered by Thread/Core-ID.*/ -+ if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN) -+ idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID; - - for (er = skx_uncore_cha_extra_regs; er->msr; er++) { - if (er->event != (event->hw.config & er->config_mask)) -@@ -3675,6 +3679,7 @@ static struct event_constraint skx_uncore_iio_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), - UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), - UNCORE_EVENT_CONSTRAINT(0xd4, 0xc), -+ UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), - EVENT_CONSTRAINT_END - }; - -@@ -3799,6 +3804,21 @@ static const struct attribute_group *skx_iio_attr_update[] = { - NULL, - }; - -+static void pmu_clear_mapping_attr(const struct attribute_group **groups, -+ struct attribute_group *ag) -+{ -+ int i; -+ -+ for (i = 0; groups[i]; i++) { -+ if (groups[i] == ag) { -+ for (i++; groups[i]; i++) -+ groups[i - 1] = groups[i]; -+ groups[i - 1] = NULL; -+ break; -+ } -+ } -+} -+ - static int - pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) - { -@@ -3847,7 +3867,7 @@ clear_attrs: - clear_topology: - kfree(type->topology); - clear_attr_update: -- type->attr_update = NULL; -+ pmu_clear_mapping_attr(type->attr_update, ag); - return ret; - } - -@@ -4488,6 +4508,8 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map - type->topology = NULL; - } - -+ pci_dev_put(dev); -+ - return ret; - } - -@@ -4525,6 +4547,13 @@ static void snr_iio_cleanup_mapping(struct intel_uncore_type *type) - pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group); - } - -+static struct event_constraint snr_uncore_iio_constraints[] = { -+ UNCORE_EVENT_CONSTRAINT(0x83, 0x3), -+ UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), -+ UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), -+ EVENT_CONSTRAINT_END -+}; -+ - static struct intel_uncore_type snr_uncore_iio = { - .name = "iio", - .num_counters = 4, -@@ -4536,6 +4565,7 @@ static struct intel_uncore_type snr_uncore_iio = { - .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, - .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL, - .msr_offset = SNR_IIO_MSR_OFFSET, -+ .constraints = snr_uncore_iio_constraints, - .ops = &ivbep_uncore_msr_ops, - .format_group = &snr_uncore_iio_format_group, - .attr_update = snr_iio_attr_update, -@@ -4845,6 +4875,8 @@ static int snr_uncore_mmio_map(struct intel_uncore_box *box, - - addr += box_ctl; - -+ pci_dev_put(pdev); -+ - box->io_addr = ioremap(addr, type->mmio_map_size); - if (!box->io_addr) { - pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); -@@ -5076,8 +5108,10 @@ static struct event_constraint icx_uncore_iio_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x02, 0x3), - UNCORE_EVENT_CONSTRAINT(0x03, 0x3), - UNCORE_EVENT_CONSTRAINT(0x83, 0x3), -+ UNCORE_EVENT_CONSTRAINT(0x88, 0xc), - UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), - UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), -+ UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), - EVENT_CONSTRAINT_END - }; - -@@ -5125,6 +5159,11 @@ static int icx_iio_get_topology(struct intel_uncore_type *type) - - static int icx_iio_set_mapping(struct intel_uncore_type *type) - { -+ /* Detect ICX-D system. This case is not supported */ -+ if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) { -+ pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group); -+ return -EPERM; -+ } - return pmu_iio_set_mapping(type, &icx_iio_mapping_group); - } - -@@ -5463,12 +5502,12 @@ static struct intel_uncore_ops icx_uncore_mmio_ops = { - static struct intel_uncore_type icx_uncore_imc = { - .name = "imc", - .num_counters = 4, -- .num_boxes = 8, -+ .num_boxes = 12, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, - .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, -- .event_descs = hswep_uncore_imc_events, -+ .event_descs = snr_uncore_imc_events, - .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, - .event_ctl = SNR_IMC_MMIO_PMON_CTL0, - .event_mask = SNBEP_PMON_RAW_EVENT_MASK, -@@ -5647,6 +5686,7 @@ static struct intel_uncore_type spr_uncore_chabox = { - .event_mask = SPR_CHA_PMON_EVENT_MASK, - .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, - .num_shared_regs = 1, -+ .constraints = skx_uncore_chabox_constraints, - .ops = &spr_uncore_chabox_ops, - .format_group = &spr_uncore_chabox_format_group, - .attr_update = uncore_alias_groups, -@@ -5658,6 +5698,7 @@ static struct intel_uncore_type spr_uncore_iio = { - .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, - .format_group = &snr_uncore_iio_format_group, - .attr_update = uncore_alias_groups, -+ .constraints = icx_uncore_iio_constraints, - }; - - static struct attribute *spr_uncore_raw_formats_attr[] = { -@@ -5686,9 +5727,16 @@ static struct intel_uncore_type spr_uncore_irp = { - - }; - -+static struct event_constraint spr_uncore_m2pcie_constraints[] = { -+ UNCORE_EVENT_CONSTRAINT(0x14, 0x3), -+ UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), -+ EVENT_CONSTRAINT_END -+}; -+ - static struct intel_uncore_type spr_uncore_m2pcie = { - SPR_UNCORE_COMMON_FORMAT(), - .name = "m2pcie", -+ .constraints = spr_uncore_m2pcie_constraints, - }; - - static struct intel_uncore_type spr_uncore_pcu = { -@@ -5765,6 +5813,7 @@ static struct intel_uncore_type spr_uncore_upi = { - static struct intel_uncore_type spr_uncore_m3upi = { - SPR_UNCORE_PCI_COMMON_FORMAT(), - .name = "m3upi", -+ .constraints = icx_uncore_m3upi_constraints, - }; - - static struct intel_uncore_type spr_uncore_mdf = { -@@ -5773,6 +5822,7 @@ static struct intel_uncore_type spr_uncore_mdf = { - }; - - #define UNCORE_SPR_NUM_UNCORE_TYPES 12 -+#define UNCORE_SPR_CHA 0 - #define UNCORE_SPR_IIO 1 - #define UNCORE_SPR_IMC 6 - -@@ -6015,12 +6065,32 @@ static int uncore_type_max_boxes(struct intel_uncore_type **types, - return max + 1; - } - -+#define SPR_MSR_UNC_CBO_CONFIG 0x2FFE -+ - void spr_uncore_cpu_init(void) - { -+ struct intel_uncore_type *type; -+ u64 num_cbo; -+ - uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, - UNCORE_SPR_MSR_EXTRA_UNCORES, - spr_msr_uncores); - -+ type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA); -+ if (type) { -+ /* -+ * The value from the discovery table (stored in the type->num_boxes -+ * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a -+ * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it. -+ */ -+ rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo); -+ /* -+ * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact -+ * the EMR XCC. Don't let the value from the MSR replace the existing value. -+ */ -+ if (num_cbo) -+ type->num_boxes = num_cbo; -+ } - spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO); - } - -diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c -index 96c775abe31ff..d23b5523cdd3b 100644 ---- a/arch/x86/events/msr.c -+++ b/arch/x86/events/msr.c -@@ -69,6 +69,7 @@ static bool test_intel(int idx, void *data) - case INTEL_FAM6_BROADWELL_G: - case INTEL_FAM6_BROADWELL_X: - case INTEL_FAM6_SAPPHIRERAPIDS_X: -+ case INTEL_FAM6_EMERALDRAPIDS_X: - - case INTEL_FAM6_ATOM_SILVERMONT: - case INTEL_FAM6_ATOM_SILVERMONT_D: -diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c -index 85feafacc445d..840ee43e3e464 100644 ---- a/arch/x86/events/rapl.c -+++ b/arch/x86/events/rapl.c -@@ -536,11 +536,14 @@ static struct perf_msr intel_rapl_spr_msrs[] = { - * - perf_msr_probe(PERF_RAPL_MAX) - * - want to use same event codes across both architectures - */ --static struct perf_msr amd_rapl_msrs[PERF_RAPL_MAX] = { -- [PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr }, -+static struct perf_msr amd_rapl_msrs[] = { -+ [PERF_RAPL_PP0] = { 0, &rapl_events_cores_group, 0, false, 0 }, -+ [PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK }, -+ [PERF_RAPL_RAM] = { 0, &rapl_events_ram_group, 0, false, 0 }, -+ [PERF_RAPL_PP1] = { 0, &rapl_events_gpu_group, 0, false, 0 }, -+ [PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, 0, false, 0 }, - }; - -- - static int rapl_cpu_offline(unsigned int cpu) - { - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); -@@ -801,6 +804,8 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl), -+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &model_skl), -+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &model_skl), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr), -diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c -index 949d845c922b4..3e9acdaeed1ec 100644 ---- a/arch/x86/events/zhaoxin/core.c -+++ b/arch/x86/events/zhaoxin/core.c -@@ -541,7 +541,13 @@ __init int zhaoxin_pmu_init(void) - - switch (boot_cpu_data.x86) { - case 0x06: -- if (boot_cpu_data.x86_model == 0x0f || boot_cpu_data.x86_model == 0x19) { -+ /* -+ * Support Zhaoxin CPU from ZXC series, exclude Nano series through FMS. -+ * Nano FMS: Family=6, Model=F, Stepping=[0-A][C-D] -+ * ZXC FMS: Family=6, Model=F, Stepping=E-F OR Family=6, Model=0x19, Stepping=0-3 -+ */ -+ if ((boot_cpu_data.x86_model == 0x0f && boot_cpu_data.x86_stepping >= 0x0e) || -+ boot_cpu_data.x86_model == 0x19) { - - x86_pmu.max_period = x86_pmu.cntval_mask >> 1; - -diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c -index 708a2712a516d..95f98af74fdca 100644 ---- a/arch/x86/hyperv/hv_init.c -+++ b/arch/x86/hyperv/hv_init.c -@@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(hv_vp_assist_page); - static int hv_cpu_init(unsigned int cpu) - { - union hv_vp_assist_msr_contents msr = { 0 }; -- struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; -+ struct hv_vp_assist_page **hvp = &hv_vp_assist_page[cpu]; - int ret; - - ret = hv_common_cpu_init(cpu); -@@ -55,34 +55,32 @@ static int hv_cpu_init(unsigned int cpu) - if (!hv_vp_assist_page) - return 0; - -- if (!*hvp) { -- if (hv_root_partition) { -- /* -- * For root partition we get the hypervisor provided VP assist -- * page, instead of allocating a new page. -- */ -- rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); -- *hvp = memremap(msr.pfn << -- HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT, -- PAGE_SIZE, MEMREMAP_WB); -- } else { -- /* -- * The VP assist page is an "overlay" page (see Hyper-V TLFS's -- * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed -- * out to make sure we always write the EOI MSR in -- * hv_apic_eoi_write() *after* the EOI optimization is disabled -- * in hv_cpu_die(), otherwise a CPU may not be stopped in the -- * case of CPU offlining and the VM will hang. -- */ -+ if (hv_root_partition) { -+ /* -+ * For root partition we get the hypervisor provided VP assist -+ * page, instead of allocating a new page. -+ */ -+ rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); -+ *hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT, -+ PAGE_SIZE, MEMREMAP_WB); -+ } else { -+ /* -+ * The VP assist page is an "overlay" page (see Hyper-V TLFS's -+ * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed -+ * out to make sure we always write the EOI MSR in -+ * hv_apic_eoi_write() *after* the EOI optimization is disabled -+ * in hv_cpu_die(), otherwise a CPU may not be stopped in the -+ * case of CPU offlining and the VM will hang. -+ */ -+ if (!*hvp) - *hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); -- if (*hvp) -- msr.pfn = vmalloc_to_pfn(*hvp); -- } -- WARN_ON(!(*hvp)); -- if (*hvp) { -- msr.enable = 1; -- wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); -- } -+ if (*hvp) -+ msr.pfn = vmalloc_to_pfn(*hvp); -+ -+ } -+ if (!WARN_ON(!(*hvp))) { -+ msr.enable = 1; -+ wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); - } - - return 0; -@@ -139,7 +137,6 @@ void set_hv_tscchange_cb(void (*cb)(void)) - struct hv_reenlightenment_control re_ctrl = { - .vector = HYPERV_REENLIGHTENMENT_VECTOR, - .enabled = 1, -- .target_vp = hv_vp_index[smp_processor_id()] - }; - struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1}; - -@@ -148,13 +145,20 @@ void set_hv_tscchange_cb(void (*cb)(void)) - return; - } - -+ if (!hv_vp_index) -+ return; -+ - hv_reenlightenment_cb = cb; - - /* Make sure callback is registered before we write to MSRs */ - wmb(); - -+ re_ctrl.target_vp = hv_vp_index[get_cpu()]; -+ - wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); - wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl)); -+ -+ put_cpu(); - } - EXPORT_SYMBOL_GPL(set_hv_tscchange_cb); - -@@ -342,20 +346,13 @@ static void __init hv_get_partition_id(void) - */ - void __init hyperv_init(void) - { -- u64 guest_id, required_msrs; -+ u64 guest_id; - union hv_x64_msr_hypercall_contents hypercall_msr; - int cpuhp; - - if (x86_hyper_type != X86_HYPER_MS_HYPERV) - return; - -- /* Absolutely required MSRs */ -- required_msrs = HV_MSR_HYPERCALL_AVAILABLE | -- HV_MSR_VP_INDEX_AVAILABLE; -- -- if ((ms_hyperv.features & required_msrs) != required_msrs) -- return; -- - if (hv_common_init()) - return; - -@@ -472,8 +469,6 @@ void hyperv_cleanup(void) - { - union hv_x64_msr_hypercall_contents hypercall_msr; - -- unregister_syscore_ops(&hv_syscore_ops); -- - /* Reset our OS id */ - wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); - -diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c -index bd13736d0c054..0ad2378fe6ad7 100644 ---- a/arch/x86/hyperv/mmu.c -+++ b/arch/x86/hyperv/mmu.c -@@ -68,15 +68,6 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus, - - local_irq_save(flags); - -- /* -- * Only check the mask _after_ interrupt has been disabled to avoid the -- * mask changing under our feet. -- */ -- if (cpumask_empty(cpus)) { -- local_irq_restore(flags); -- return; -- } -- - flush_pcpu = (struct hv_tlb_flush **) - this_cpu_ptr(hyperv_pcpu_input_arg); - -@@ -115,7 +106,9 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus, - * must. We will also check all VP numbers when walking the - * supplied CPU set to remain correct in all cases. - */ -- if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64) -+ cpu = cpumask_last(cpus); -+ -+ if (cpu < nr_cpumask_bits && hv_cpu_number_to_vp_number(cpu) >= 64) - goto do_ex_hypercall; - - for_each_cpu(cpu, cpus) { -@@ -131,6 +124,12 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus, - __set_bit(vcpu, (unsigned long *) - &flush->processor_mask); - } -+ -+ /* nothing to flush if 'processor_mask' ends up being empty */ -+ if (!flush->processor_mask) { -+ local_irq_restore(flags); -+ return; -+ } - } - - /* -diff --git a/arch/x86/include/asm/GEN-for-each-reg.h b/arch/x86/include/asm/GEN-for-each-reg.h -index 1b07fb102c4ed..07949102a08d0 100644 ---- a/arch/x86/include/asm/GEN-for-each-reg.h -+++ b/arch/x86/include/asm/GEN-for-each-reg.h -@@ -1,11 +1,16 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * These are in machine order; things rely on that. -+ */ - #ifdef CONFIG_64BIT - GEN(rax) --GEN(rbx) - GEN(rcx) - GEN(rdx) -+GEN(rbx) -+GEN(rsp) -+GEN(rbp) - GEN(rsi) - GEN(rdi) --GEN(rbp) - GEN(r8) - GEN(r9) - GEN(r10) -@@ -16,10 +21,11 @@ GEN(r14) - GEN(r15) - #else - GEN(eax) --GEN(ebx) - GEN(ecx) - GEN(edx) -+GEN(ebx) -+GEN(esp) -+GEN(ebp) - GEN(esi) - GEN(edi) --GEN(ebp) - #endif -diff --git a/arch/x86/include/asm/acenv.h b/arch/x86/include/asm/acenv.h -index 9aff97f0de7fd..d937c55e717e6 100644 ---- a/arch/x86/include/asm/acenv.h -+++ b/arch/x86/include/asm/acenv.h -@@ -13,7 +13,19 @@ - - /* Asm macros */ - --#define ACPI_FLUSH_CPU_CACHE() wbinvd() -+/* -+ * ACPI_FLUSH_CPU_CACHE() flushes caches on entering sleep states. -+ * It is required to prevent data loss. -+ * -+ * While running inside virtual machine, the kernel can bypass cache flushing. -+ * Changing sleep state in a virtual machine doesn't affect the host system -+ * sleep state and cannot lead to data loss. -+ */ -+#define ACPI_FLUSH_CPU_CACHE() \ -+do { \ -+ if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) \ -+ wbinvd(); \ -+} while (0) - - int __acpi_acquire_global_lock(unsigned int *lock); - int __acpi_release_global_lock(unsigned int *lock); -diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h -index a3c2315aca121..a364971967c40 100644 ---- a/arch/x86/include/asm/alternative.h -+++ b/arch/x86/include/asm/alternative.h -@@ -75,6 +75,8 @@ extern int alternatives_patched; - - extern void alternative_instructions(void); - extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); -+extern void apply_retpolines(s32 *start, s32 *end); -+extern void apply_returns(s32 *start, s32 *end); - - struct module; - -diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h -index 4cb726c71ed8c..8f80de627c60a 100644 ---- a/arch/x86/include/asm/asm-prototypes.h -+++ b/arch/x86/include/asm/asm-prototypes.h -@@ -17,21 +17,3 @@ - extern void cmpxchg8b_emu(void); - #endif - --#ifdef CONFIG_RETPOLINE -- --#undef GEN --#define GEN(reg) \ -- extern asmlinkage void __x86_indirect_thunk_ ## reg (void); --#include -- --#undef GEN --#define GEN(reg) \ -- extern asmlinkage void __x86_indirect_alt_call_ ## reg (void); --#include -- --#undef GEN --#define GEN(reg) \ -- extern asmlinkage void __x86_indirect_alt_jmp_ ## reg (void); --#include -- --#endif /* CONFIG_RETPOLINE */ -diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h -index 3ad3da9a7d974..6dd47c9ec788a 100644 ---- a/arch/x86/include/asm/asm.h -+++ b/arch/x86/include/asm/asm.h -@@ -122,28 +122,19 @@ - - #ifdef __KERNEL__ - -+# include -+ - /* Exception table entry */ - #ifdef __ASSEMBLY__ --# define _ASM_EXTABLE_HANDLE(from, to, handler) \ -+ -+# define _ASM_EXTABLE_TYPE(from, to, type) \ - .pushsection "__ex_table","a" ; \ - .balign 4 ; \ - .long (from) - . ; \ - .long (to) - . ; \ -- .long (handler) - . ; \ -+ .long type ; \ - .popsection - --# define _ASM_EXTABLE(from, to) \ -- _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) -- --# define _ASM_EXTABLE_UA(from, to) \ -- _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) -- --# define _ASM_EXTABLE_CPY(from, to) \ -- _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy) -- --# define _ASM_EXTABLE_FAULT(from, to) \ -- _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) -- - # ifdef CONFIG_KPROBES - # define _ASM_NOKPROBE(entry) \ - .pushsection "_kprobe_blacklist","aw" ; \ -@@ -155,26 +146,51 @@ - # endif - - #else /* ! __ASSEMBLY__ */ --# define _EXPAND_EXTABLE_HANDLE(x) #x --# define _ASM_EXTABLE_HANDLE(from, to, handler) \ -+ -+# define DEFINE_EXTABLE_TYPE_REG \ -+ ".macro extable_type_reg type:req reg:req\n" \ -+ ".set .Lfound, 0\n" \ -+ ".set .Lregnr, 0\n" \ -+ ".irp rs,rax,rcx,rdx,rbx,rsp,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15\n" \ -+ ".ifc \\reg, %%\\rs\n" \ -+ ".set .Lfound, .Lfound+1\n" \ -+ ".long \\type + (.Lregnr << 8)\n" \ -+ ".endif\n" \ -+ ".set .Lregnr, .Lregnr+1\n" \ -+ ".endr\n" \ -+ ".set .Lregnr, 0\n" \ -+ ".irp rs,eax,ecx,edx,ebx,esp,ebp,esi,edi,r8d,r9d,r10d,r11d,r12d,r13d,r14d,r15d\n" \ -+ ".ifc \\reg, %%\\rs\n" \ -+ ".set .Lfound, .Lfound+1\n" \ -+ ".long \\type + (.Lregnr << 8)\n" \ -+ ".endif\n" \ -+ ".set .Lregnr, .Lregnr+1\n" \ -+ ".endr\n" \ -+ ".if (.Lfound != 1)\n" \ -+ ".error \"extable_type_reg: bad register argument\"\n" \ -+ ".endif\n" \ -+ ".endm\n" -+ -+# define UNDEFINE_EXTABLE_TYPE_REG \ -+ ".purgem extable_type_reg\n" -+ -+# define _ASM_EXTABLE_TYPE(from, to, type) \ - " .pushsection \"__ex_table\",\"a\"\n" \ - " .balign 4\n" \ - " .long (" #from ") - .\n" \ - " .long (" #to ") - .\n" \ -- " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \ -+ " .long " __stringify(type) " \n" \ - " .popsection\n" - --# define _ASM_EXTABLE(from, to) \ -- _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) -- --# define _ASM_EXTABLE_UA(from, to) \ -- _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) -- --# define _ASM_EXTABLE_CPY(from, to) \ -- _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy) -- --# define _ASM_EXTABLE_FAULT(from, to) \ -- _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) -+# define _ASM_EXTABLE_TYPE_REG(from, to, type, reg) \ -+ " .pushsection \"__ex_table\",\"a\"\n" \ -+ " .balign 4\n" \ -+ " .long (" #from ") - .\n" \ -+ " .long (" #to ") - .\n" \ -+ DEFINE_EXTABLE_TYPE_REG \ -+ "extable_type_reg reg=" __stringify(reg) ", type=" __stringify(type) " \n"\ -+ UNDEFINE_EXTABLE_TYPE_REG \ -+ " .popsection\n" - - /* For C file, we already have NOKPROBE_SYMBOL macro */ - -@@ -188,6 +204,17 @@ register unsigned long current_stack_pointer asm(_ASM_SP); - #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) - #endif /* __ASSEMBLY__ */ - --#endif /* __KERNEL__ */ -+#define _ASM_EXTABLE(from, to) \ -+ _ASM_EXTABLE_TYPE(from, to, EX_TYPE_DEFAULT) - -+#define _ASM_EXTABLE_UA(from, to) \ -+ _ASM_EXTABLE_TYPE(from, to, EX_TYPE_UACCESS) -+ -+#define _ASM_EXTABLE_CPY(from, to) \ -+ _ASM_EXTABLE_TYPE(from, to, EX_TYPE_COPY) -+ -+#define _ASM_EXTABLE_FAULT(from, to) \ -+ _ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT) -+ -+#endif /* __KERNEL__ */ - #endif /* _ASM_X86_ASM_H */ -diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h -index 84b87538a15de..66570e95af398 100644 ---- a/arch/x86/include/asm/bug.h -+++ b/arch/x86/include/asm/bug.h -@@ -22,7 +22,7 @@ - - #ifdef CONFIG_DEBUG_BUGVERBOSE - --#define _BUG_FLAGS(ins, flags) \ -+#define _BUG_FLAGS(ins, flags, extra) \ - do { \ - asm_inline volatile("1:\t" ins "\n" \ - ".pushsection __bug_table,\"aw\"\n" \ -@@ -31,7 +31,8 @@ do { \ - "\t.word %c1" "\t# bug_entry::line\n" \ - "\t.word %c2" "\t# bug_entry::flags\n" \ - "\t.org 2b+%c3\n" \ -- ".popsection" \ -+ ".popsection\n" \ -+ extra \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (flags), \ - "i" (sizeof(struct bug_entry))); \ -@@ -39,14 +40,15 @@ do { \ - - #else /* !CONFIG_DEBUG_BUGVERBOSE */ - --#define _BUG_FLAGS(ins, flags) \ -+#define _BUG_FLAGS(ins, flags, extra) \ - do { \ - asm_inline volatile("1:\t" ins "\n" \ - ".pushsection __bug_table,\"aw\"\n" \ - "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ - "\t.word %c0" "\t# bug_entry::flags\n" \ - "\t.org 2b+%c1\n" \ -- ".popsection" \ -+ ".popsection\n" \ -+ extra \ - : : "i" (flags), \ - "i" (sizeof(struct bug_entry))); \ - } while (0) -@@ -55,7 +57,7 @@ do { \ - - #else - --#define _BUG_FLAGS(ins, flags) asm volatile(ins) -+#define _BUG_FLAGS(ins, flags, extra) asm volatile(ins) - - #endif /* CONFIG_GENERIC_BUG */ - -@@ -63,8 +65,8 @@ do { \ - #define BUG() \ - do { \ - instrumentation_begin(); \ -- _BUG_FLAGS(ASM_UD2, 0); \ -- unreachable(); \ -+ _BUG_FLAGS(ASM_UD2, 0, ""); \ -+ __builtin_unreachable(); \ - } while (0) - - /* -@@ -75,9 +77,9 @@ do { \ - */ - #define __WARN_FLAGS(flags) \ - do { \ -+ __auto_type __flags = BUGFLAG_WARNING|(flags); \ - instrumentation_begin(); \ -- _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \ -- annotate_reachable(); \ -+ _BUG_FLAGS(ASM_UD2, __flags, ASM_REACHABLE); \ - instrumentation_end(); \ - } while (0) - -diff --git a/arch/x86/include/asm/bugs.h b/arch/x86/include/asm/bugs.h -index 92ae283899409..f25ca2d709d40 100644 ---- a/arch/x86/include/asm/bugs.h -+++ b/arch/x86/include/asm/bugs.h -@@ -4,8 +4,6 @@ - - #include - --extern void check_bugs(void); -- - #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32) - int ppro_with_ram_bug(void); - #else -diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h -index 7516e4199b3c6..20fd0acd7d800 100644 ---- a/arch/x86/include/asm/compat.h -+++ b/arch/x86/include/asm/compat.h -@@ -28,15 +28,13 @@ typedef u16 compat_ipc_pid_t; - typedef __kernel_fsid_t compat_fsid_t; - - struct compat_stat { -- compat_dev_t st_dev; -- u16 __pad1; -+ u32 st_dev; - compat_ino_t st_ino; - compat_mode_t st_mode; - compat_nlink_t st_nlink; - __compat_uid_t st_uid; - __compat_gid_t st_gid; -- compat_dev_t st_rdev; -- u16 __pad2; -+ u32 st_rdev; - u32 st_size; - u32 st_blksize; - u32 st_blocks; -diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h -index 3d52b094850a9..75efc4c6f0766 100644 ---- a/arch/x86/include/asm/cpu_entry_area.h -+++ b/arch/x86/include/asm/cpu_entry_area.h -@@ -10,6 +10,12 @@ - - #ifdef CONFIG_X86_64 - -+#ifdef CONFIG_AMD_MEM_ENCRYPT -+#define VC_EXCEPTION_STKSZ EXCEPTION_STKSZ -+#else -+#define VC_EXCEPTION_STKSZ 0 -+#endif -+ - /* Macro to enforce the same ordering and stack sizes */ - #define ESTACKS_MEMBERS(guardsize, optional_stack_size) \ - char DF_stack_guard[guardsize]; \ -@@ -28,7 +34,7 @@ - - /* The exception stacks' physical storage. No guard pages required */ - struct exception_stacks { -- ESTACKS_MEMBERS(0, 0) -+ ESTACKS_MEMBERS(0, VC_EXCEPTION_STKSZ) - }; - - /* The effective cpu entry area mapping with guard pages. */ -@@ -137,7 +143,7 @@ extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); - - extern struct cpu_entry_area *get_cpu_entry_area(int cpu); - --static inline struct entry_stack *cpu_entry_stack(int cpu) -+static __always_inline struct entry_stack *cpu_entry_stack(int cpu) - { - return &get_cpu_entry_area(cpu)->entry_stack_page.stack; - } -diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h -index 16a51e7288d58..cc3f62f5d5515 100644 ---- a/arch/x86/include/asm/cpufeature.h -+++ b/arch/x86/include/asm/cpufeature.h -@@ -32,6 +32,7 @@ enum cpuid_leafs - CPUID_8000_0007_EBX, - CPUID_7_EDX, - CPUID_8000_001F_EAX, -+ CPUID_8000_0021_EAX, - }; - - #ifdef CONFIG_X86_FEATURE_NAMES -@@ -51,7 +52,7 @@ extern const char * const x86_power_flags[32]; - extern const char * const x86_bug_flags[NBUGINTS*32]; - - #define test_cpu_cap(c, bit) \ -- test_bit(bit, (unsigned long *)((c)->x86_capability)) -+ arch_test_bit(bit, (unsigned long *)((c)->x86_capability)) - - /* - * There are 32 bits/features in each mask word. The high bits -@@ -91,8 +92,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; - CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ - CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ - CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ -+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ - REQUIRED_MASK_CHECK || \ -- BUILD_BUG_ON_ZERO(NCAPINTS != 20)) -+ BUILD_BUG_ON_ZERO(NCAPINTS != 21)) - - #define DISABLED_MASK_BIT_SET(feature_bit) \ - ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ -@@ -115,8 +117,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; - CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ - CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ - CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ -+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ - DISABLED_MASK_CHECK || \ -- BUILD_BUG_ON_ZERO(NCAPINTS != 20)) -+ BUILD_BUG_ON_ZERO(NCAPINTS != 21)) - - #define cpu_has(c, bit) \ - (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ -diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h -index d0ce5cfd3ac14..d6089072ee41f 100644 ---- a/arch/x86/include/asm/cpufeatures.h -+++ b/arch/x86/include/asm/cpufeatures.h -@@ -13,8 +13,8 @@ - /* - * Defines x86 CPU feature bits - */ --#define NCAPINTS 20 /* N 32-bit words worth of info */ --#define NBUGINTS 1 /* N 32-bit bug flags */ -+#define NCAPINTS 21 /* N 32-bit words worth of info */ -+#define NBUGINTS 2 /* N 32-bit bug flags */ - - /* - * Note: If the comment begins with a quoted string, that string is used -@@ -203,8 +203,8 @@ - #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ - /* FREE! ( 7*32+10) */ - #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ --#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ --#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ -+#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ -+#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */ - #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ - #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ - #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ -@@ -294,6 +294,21 @@ - #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ - #define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */ - #define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */ -+#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */ -+#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */ -+#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ -+#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ -+#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ -+#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ -+#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */ -+#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */ -+ -+ -+#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ -+ -+#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ -+#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ -+#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ - - /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ - #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ -@@ -313,6 +328,7 @@ - #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ - #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ - #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ -+#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */ - - /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ - #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ -@@ -400,6 +416,10 @@ - #define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */ - #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ - -+#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */ -+#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ -+#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ -+ - /* - * BUG word(s) - */ -@@ -436,5 +456,14 @@ - #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ - #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ - #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ -+#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ -+#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */ -+#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ -+#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ -+#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ -+#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ - -+/* BUG word 2 */ -+#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ -+#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ - #endif /* _ASM_X86_CPUFEATURES_H */ -diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h -index cfdf307ddc012..9ed8343c9b3cb 100644 ---- a/arch/x86/include/asm/debugreg.h -+++ b/arch/x86/include/asm/debugreg.h -@@ -39,7 +39,20 @@ static __always_inline unsigned long native_get_debugreg(int regno) - asm("mov %%db6, %0" :"=r" (val)); - break; - case 7: -- asm("mov %%db7, %0" :"=r" (val)); -+ /* -+ * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them -+ * with other code. -+ * -+ * This is needed because a DR7 access can cause a #VC exception -+ * when running under SEV-ES. Taking a #VC exception is not a -+ * safe thing to do just anywhere in the entry code and -+ * re-ordering might place the access into an unsafe location. -+ * -+ * This happened in the NMI handler, where the DR7 read was -+ * re-ordered to happen before the call to sev_es_ist_enter(), -+ * causing stack recursion. -+ */ -+ asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER); - break; - default: - BUG(); -@@ -66,7 +79,16 @@ static __always_inline void native_set_debugreg(int regno, unsigned long value) - asm("mov %0, %%db6" ::"r" (value)); - break; - case 7: -- asm("mov %0, %%db7" ::"r" (value)); -+ /* -+ * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them -+ * with other code. -+ * -+ * While is didn't happen with a DR7 write (see the DR7 read -+ * comment above which explains where it happened), add the -+ * __FORCE_ORDER here too to avoid similar problems in the -+ * future. -+ */ -+ asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER); - break; - default: - BUG(); -diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h -index 8f28fafa98b32..99a12012c66ee 100644 ---- a/arch/x86/include/asm/disabled-features.h -+++ b/arch/x86/include/asm/disabled-features.h -@@ -56,6 +56,25 @@ - # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) - #endif - -+#ifdef CONFIG_RETPOLINE -+# define DISABLE_RETPOLINE 0 -+#else -+# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \ -+ (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31))) -+#endif -+ -+#ifdef CONFIG_RETHUNK -+# define DISABLE_RETHUNK 0 -+#else -+# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31)) -+#endif -+ -+#ifdef CONFIG_CPU_UNRET_ENTRY -+# define DISABLE_UNRET 0 -+#else -+# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31)) -+#endif -+ - /* Force disable because it's broken beyond repair */ - #define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31)) - -@@ -79,7 +98,7 @@ - #define DISABLED_MASK8 0 - #define DISABLED_MASK9 (DISABLE_SMAP|DISABLE_SGX) - #define DISABLED_MASK10 0 --#define DISABLED_MASK11 0 -+#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET) - #define DISABLED_MASK12 0 - #define DISABLED_MASK13 0 - #define DISABLED_MASK14 0 -@@ -89,6 +108,7 @@ - #define DISABLED_MASK17 0 - #define DISABLED_MASK18 0 - #define DISABLED_MASK19 0 --#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) -+#define DISABLED_MASK20 0 -+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) - - #endif /* _ASM_X86_DISABLED_FEATURES_H */ -diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h -index 4d0b126835b8a..63158fd558567 100644 ---- a/arch/x86/include/asm/efi.h -+++ b/arch/x86/include/asm/efi.h -@@ -197,8 +197,6 @@ static inline bool efi_runtime_supported(void) - - extern void parse_efi_setup(u64 phys_addr, u32 data_len); - --extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); -- - extern void efi_thunk_runtime_setup(void); - efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size, - unsigned long descriptor_size, -diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h -index 43184640b579a..a12fdf01dc260 100644 ---- a/arch/x86/include/asm/entry-common.h -+++ b/arch/x86/include/asm/entry-common.h -@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, - static __always_inline void arch_exit_to_user_mode(void) - { - mds_user_clear_cpu_buffers(); -+ amd_clear_divider(); - } - #define arch_exit_to_user_mode arch_exit_to_user_mode - -diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h -index 1f0cbc52937ca..155c991ba95e2 100644 ---- a/arch/x86/include/asm/extable.h -+++ b/arch/x86/include/asm/extable.h -@@ -1,12 +1,18 @@ - /* SPDX-License-Identifier: GPL-2.0 */ - #ifndef _ASM_X86_EXTABLE_H - #define _ASM_X86_EXTABLE_H -+ -+#include -+ - /* -- * The exception table consists of triples of addresses relative to the -- * exception table entry itself. The first address is of an instruction -- * that is allowed to fault, the second is the target at which the program -- * should continue. The third is a handler function to deal with the fault -- * caused by the instruction in the first field. -+ * The exception table consists of two addresses relative to the -+ * exception table entry itself and a type selector field. -+ * -+ * The first address is of an instruction that is allowed to fault, the -+ * second is the target at which the program should continue. -+ * -+ * The type entry is used by fixup_exception() to select the handler to -+ * deal with the fault caused by the instruction in the first field. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, -@@ -15,7 +21,7 @@ - */ - - struct exception_table_entry { -- int insn, fixup, handler; -+ int insn, fixup, data; - }; - struct pt_regs; - -@@ -25,21 +31,27 @@ struct pt_regs; - do { \ - (a)->fixup = (b)->fixup + (delta); \ - (b)->fixup = (tmp).fixup - (delta); \ -- (a)->handler = (b)->handler + (delta); \ -- (b)->handler = (tmp).handler - (delta); \ -+ (a)->data = (b)->data; \ -+ (b)->data = (tmp).data; \ - } while (0) - --enum handler_type { -- EX_HANDLER_NONE, -- EX_HANDLER_FAULT, -- EX_HANDLER_UACCESS, -- EX_HANDLER_OTHER --}; -- - extern int fixup_exception(struct pt_regs *regs, int trapnr, - unsigned long error_code, unsigned long fault_addr); - extern int fixup_bug(struct pt_regs *regs, int trapnr); --extern enum handler_type ex_get_fault_handler_type(unsigned long ip); -+extern int ex_get_fixup_type(unsigned long ip); - extern void early_fixup_exception(struct pt_regs *regs, int trapnr); - -+#ifdef CONFIG_X86_MCE -+extern void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr); -+#else -+static inline void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr) { } -+#endif -+ -+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_X86_64) -+bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs); -+#else -+static inline bool ex_handler_bpf(const struct exception_table_entry *x, -+ struct pt_regs *regs) { return false; } -+#endif -+ - #endif -diff --git a/arch/x86/include/asm/extable_fixup_types.h b/arch/x86/include/asm/extable_fixup_types.h -new file mode 100644 -index 0000000000000..b3b785b9bb14c ---- /dev/null -+++ b/arch/x86/include/asm/extable_fixup_types.h -@@ -0,0 +1,58 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+#ifndef _ASM_X86_EXTABLE_FIXUP_TYPES_H -+#define _ASM_X86_EXTABLE_FIXUP_TYPES_H -+ -+/* -+ * Our IMM is signed, as such it must live at the top end of the word. Also, -+ * since C99 hex constants are of ambigious type, force cast the mask to 'int' -+ * so that FIELD_GET() will DTRT and sign extend the value when it extracts it. -+ */ -+#define EX_DATA_TYPE_MASK ((int)0x000000FF) -+#define EX_DATA_REG_MASK ((int)0x00000F00) -+#define EX_DATA_FLAG_MASK ((int)0x0000F000) -+#define EX_DATA_IMM_MASK ((int)0xFFFF0000) -+ -+#define EX_DATA_REG_SHIFT 8 -+#define EX_DATA_FLAG_SHIFT 12 -+#define EX_DATA_IMM_SHIFT 16 -+ -+#define EX_DATA_REG(reg) ((reg) << EX_DATA_REG_SHIFT) -+#define EX_DATA_FLAG(flag) ((flag) << EX_DATA_FLAG_SHIFT) -+#define EX_DATA_IMM(imm) ((imm) << EX_DATA_IMM_SHIFT) -+ -+/* segment regs */ -+#define EX_REG_DS EX_DATA_REG(8) -+#define EX_REG_ES EX_DATA_REG(9) -+#define EX_REG_FS EX_DATA_REG(10) -+#define EX_REG_GS EX_DATA_REG(11) -+ -+/* flags */ -+#define EX_FLAG_CLEAR_AX EX_DATA_FLAG(1) -+#define EX_FLAG_CLEAR_DX EX_DATA_FLAG(2) -+#define EX_FLAG_CLEAR_AX_DX EX_DATA_FLAG(3) -+ -+/* types */ -+#define EX_TYPE_NONE 0 -+#define EX_TYPE_DEFAULT 1 -+#define EX_TYPE_FAULT 2 -+#define EX_TYPE_UACCESS 3 -+#define EX_TYPE_COPY 4 -+#define EX_TYPE_CLEAR_FS 5 -+#define EX_TYPE_FPU_RESTORE 6 -+#define EX_TYPE_BPF 7 -+#define EX_TYPE_WRMSR 8 -+#define EX_TYPE_RDMSR 9 -+#define EX_TYPE_WRMSR_SAFE 10 /* reg := -EIO */ -+#define EX_TYPE_RDMSR_SAFE 11 /* reg := -EIO */ -+#define EX_TYPE_WRMSR_IN_MCE 12 -+#define EX_TYPE_RDMSR_IN_MCE 13 -+#define EX_TYPE_DEFAULT_MCE_SAFE 14 -+#define EX_TYPE_FAULT_MCE_SAFE 15 -+ -+#define EX_TYPE_POP_REG 16 /* sp += sizeof(long) */ -+#define EX_TYPE_POP_ZERO (EX_TYPE_POP_REG | EX_DATA_IMM(0)) -+ -+#define EX_TYPE_IMM_REG 17 /* reg := (long)imm */ -+#define EX_TYPE_EFAULT_REG (EX_TYPE_IMM_REG | EX_DATA_IMM(-EFAULT)) -+ -+#endif -diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h -index 5a18694a89b24..6a6f741edda36 100644 ---- a/arch/x86/include/asm/fpu/internal.h -+++ b/arch/x86/include/asm/fpu/internal.h -@@ -43,7 +43,7 @@ extern void fpu_flush_thread(void); - extern void fpu__init_cpu(void); - extern void fpu__init_system_xstate(void); - extern void fpu__init_cpu_xstate(void); --extern void fpu__init_system(struct cpuinfo_x86 *c); -+extern void fpu__init_system(void); - extern void fpu__init_check_bugs(void); - extern void fpu__resume_cpu(void); - -@@ -126,7 +126,7 @@ extern void save_fpregs_to_fpstate(struct fpu *fpu); - #define kernel_insn(insn, output, input...) \ - asm volatile("1:" #insn "\n\t" \ - "2:\n" \ -- _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \ -+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FPU_RESTORE) \ - : output : input) - - static inline int fnsave_to_user_sigframe(struct fregs_state __user *fx) -@@ -253,7 +253,7 @@ static inline void fxsave(struct fxregs_state *fx) - XRSTORS, X86_FEATURE_XSAVES) \ - "\n" \ - "3:\n" \ -- _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\ -+ _ASM_EXTABLE_TYPE(661b, 3b, EX_TYPE_FPU_RESTORE) \ - : \ - : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ - : "memory") -@@ -416,8 +416,7 @@ DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); - * FPU state for a task MUST let the rest of the kernel know that the - * FPU registers are no longer valid for this task. - * -- * Either one of these invalidation functions is enough. Invalidate -- * a resource you control: CPU if using the CPU for something else -+ * Invalidate a resource you control: CPU if using the CPU for something else - * (with preemption disabled), FPU for the current task, or a task that - * is prevented from running by the current task. - */ -diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h -index 109dfcc75299d..d91df71f60fb1 100644 ---- a/arch/x86/include/asm/fpu/xstate.h -+++ b/arch/x86/include/asm/fpu/xstate.h -@@ -136,8 +136,8 @@ extern void __init update_regset_xstate_info(unsigned int size, - - void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr); - int xfeature_size(int xfeature_nr); --int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf); --int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf); -+int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf, u32 *pkru); -+int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf); - - void xsaves(struct xregs_state *xsave, u64 mask); - void xrstors(struct xregs_state *xsave, u64 mask); -diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h -index f9c00110a69ad..99d345b686fa2 100644 ---- a/arch/x86/include/asm/futex.h -+++ b/arch/x86/include/asm/futex.h -@@ -17,13 +17,9 @@ do { \ - int oldval = 0, ret; \ - asm volatile("1:\t" insn "\n" \ - "2:\n" \ -- "\t.section .fixup,\"ax\"\n" \ -- "3:\tmov\t%3, %1\n" \ -- "\tjmp\t2b\n" \ -- "\t.previous\n" \ -- _ASM_EXTABLE_UA(1b, 3b) \ -+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %1) \ - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ -- : "i" (-EFAULT), "0" (oparg), "1" (0)); \ -+ : "0" (oparg), "1" (0)); \ - if (ret) \ - goto label; \ - *oval = oldval; \ -@@ -39,15 +35,11 @@ do { \ - "3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ - "\tjnz\t2b\n" \ - "4:\n" \ -- "\t.section .fixup,\"ax\"\n" \ -- "5:\tmov\t%5, %1\n" \ -- "\tjmp\t4b\n" \ -- "\t.previous\n" \ -- _ASM_EXTABLE_UA(1b, 5b) \ -- _ASM_EXTABLE_UA(3b, 5b) \ -+ _ASM_EXTABLE_TYPE_REG(1b, 4b, EX_TYPE_EFAULT_REG, %1) \ -+ _ASM_EXTABLE_TYPE_REG(3b, 4b, EX_TYPE_EFAULT_REG, %1) \ - : "=&a" (oldval), "=&r" (ret), \ - "+m" (*uaddr), "=&r" (tem) \ -- : "r" (oparg), "i" (-EFAULT), "1" (0)); \ -+ : "r" (oparg), "1" (0)); \ - if (ret) \ - goto label; \ - *oval = oldval; \ -@@ -95,15 +87,11 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - if (!user_access_begin(uaddr, sizeof(u32))) - return -EFAULT; - asm volatile("\n" -- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" -+ "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" - "2:\n" -- "\t.section .fixup, \"ax\"\n" -- "3:\tmov %3, %0\n" -- "\tjmp 2b\n" -- "\t.previous\n" -- _ASM_EXTABLE_UA(1b, 3b) -+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \ - : "+r" (ret), "=a" (oldval), "+m" (*uaddr) -- : "i" (-EFAULT), "r" (newval), "1" (oldval) -+ : "r" (newval), "1" (oldval) - : "memory" - ); - user_access_end(); -diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h -index 2322d6bd58833..b54b3e18d94ba 100644 ---- a/arch/x86/include/asm/hyperv-tlfs.h -+++ b/arch/x86/include/asm/hyperv-tlfs.h -@@ -529,7 +529,7 @@ struct hv_enlightened_vmcs { - u64 guest_rip; - - u32 hv_clean_fields; -- u32 hv_padding_32; -+ u32 padding32_1; - u32 hv_synthetic_controls; - struct { - u32 nested_flush_hypercall:1; -@@ -537,7 +537,7 @@ struct hv_enlightened_vmcs { - u32 reserved:30; - } __packed hv_enlightenments_control; - u32 hv_vp_id; -- -+ u32 padding32_2; - u64 hv_vm_id; - u64 partition_assist_page; - u64 padding64_4[4]; -diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h -index 91d7182ad2d6e..3df123f437c96 100644 ---- a/arch/x86/include/asm/insn-eval.h -+++ b/arch/x86/include/asm/insn-eval.h -@@ -15,12 +15,15 @@ - #define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf) - #define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4)) - -+int pt_regs_offset(struct pt_regs *regs, int regno); -+ - bool insn_has_rep_prefix(struct insn *insn); - void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); - int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); - int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs); - unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx); - int insn_get_code_seg_params(struct pt_regs *regs); -+int insn_get_effective_ip(struct pt_regs *regs, unsigned long *ip); - int insn_fetch_from_user(struct pt_regs *regs, - unsigned char buf[MAX_INSN_SIZE]); - int insn_fetch_from_user_inatomic(struct pt_regs *regs, -diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h -index 27158436f322d..d975c60f863a2 100644 ---- a/arch/x86/include/asm/intel-family.h -+++ b/arch/x86/include/asm/intel-family.h -@@ -105,10 +105,24 @@ - - #define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */ - -+#define INTEL_FAM6_EMERALDRAPIDS_X 0xCF -+ -+#define INTEL_FAM6_GRANITERAPIDS_X 0xAD -+#define INTEL_FAM6_GRANITERAPIDS_D 0xAE -+ - #define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ - #define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ -+#define INTEL_FAM6_ALDERLAKE_N 0xBE -+ -+#define INTEL_FAM6_RAPTORLAKE 0xB7 -+#define INTEL_FAM6_RAPTORLAKE_P 0xBA -+#define INTEL_FAM6_RAPTORLAKE_S 0xBF -+ -+#define INTEL_FAM6_LUNARLAKE_M 0xBD - --/* "Small Core" Processors (Atom) */ -+#define INTEL_FAM6_ARROWLAKE 0xC6 -+ -+/* "Small Core" Processors (Atom/E-Core) */ - - #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ - #define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */ -@@ -135,6 +149,10 @@ - #define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */ - #define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */ - -+#define INTEL_FAM6_SIERRAFOREST_X 0xAF -+ -+#define INTEL_FAM6_GRANDRIDGE 0xB6 -+ - /* Xeon Phi */ - - #define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ -diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h -index bf1ed2ddc74bd..7a983119bc403 100644 ---- a/arch/x86/include/asm/iommu.h -+++ b/arch/x86/include/asm/iommu.h -@@ -17,8 +17,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) - { - u64 start = rmrr->base_address; - u64 end = rmrr->end_address + 1; -+ int entry_type; - -- if (e820__mapped_all(start, end, E820_TYPE_RESERVED)) -+ entry_type = e820__get_entry_type(start, end); -+ if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS) - return 0; - - pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n", -diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h -index 562854c608082..e087cd7837c31 100644 ---- a/arch/x86/include/asm/irq_stack.h -+++ b/arch/x86/include/asm/irq_stack.h -@@ -77,11 +77,11 @@ - * Function calls can clobber anything except the callee-saved - * registers. Tell the compiler. - */ --#define call_on_irqstack(func, asm_call, argconstr...) \ -+#define call_on_stack(stack, func, asm_call, argconstr...) \ - { \ - register void *tos asm("r11"); \ - \ -- tos = ((void *)__this_cpu_read(hardirq_stack_ptr)); \ -+ tos = ((void *)(stack)); \ - \ - asm_inline volatile( \ - "movq %%rsp, (%[tos]) \n" \ -@@ -98,6 +98,26 @@ - ); \ - } - -+#define ASM_CALL_ARG0 \ -+ "call %P[__func] \n" \ -+ ASM_REACHABLE -+ -+#define ASM_CALL_ARG1 \ -+ "movq %[arg1], %%rdi \n" \ -+ ASM_CALL_ARG0 -+ -+#define ASM_CALL_ARG2 \ -+ "movq %[arg2], %%rsi \n" \ -+ ASM_CALL_ARG1 -+ -+#define ASM_CALL_ARG3 \ -+ "movq %[arg3], %%rdx \n" \ -+ ASM_CALL_ARG2 -+ -+#define call_on_irqstack(func, asm_call, argconstr...) \ -+ call_on_stack(__this_cpu_read(hardirq_stack_ptr), \ -+ func, asm_call, argconstr) -+ - /* Macros to assert type correctness for run_*_on_irqstack macros */ - #define assert_function_type(func, proto) \ - static_assert(__builtin_types_compatible_p(typeof(&func), proto)) -@@ -147,8 +167,7 @@ - */ - #define ASM_CALL_SYSVEC \ - "call irq_enter_rcu \n" \ -- "movq %[arg1], %%rdi \n" \ -- "call %P[__func] \n" \ -+ ASM_CALL_ARG1 \ - "call irq_exit_rcu \n" - - #define SYSVEC_CONSTRAINTS , [arg1] "r" (regs) -@@ -168,12 +187,10 @@ - */ - #define ASM_CALL_IRQ \ - "call irq_enter_rcu \n" \ -- "movq %[arg1], %%rdi \n" \ -- "movl %[arg2], %%esi \n" \ -- "call %P[__func] \n" \ -+ ASM_CALL_ARG2 \ - "call irq_exit_rcu \n" - --#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" (vector) -+#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector) - - #define run_irq_on_irqstack_cond(func, regs, vector) \ - { \ -@@ -185,9 +202,6 @@ - IRQ_CONSTRAINTS, regs, vector); \ - } - --#define ASM_CALL_SOFTIRQ \ -- "call %P[__func] \n" -- - /* - * Macro to invoke __do_softirq on the irq stack. This is only called from - * task context when bottom halves are about to be reenabled and soft -@@ -197,7 +211,7 @@ - #define do_softirq_own_stack() \ - { \ - __this_cpu_write(hardirq_stack_inuse, true); \ -- call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \ -+ call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \ - __this_cpu_write(hardirq_stack_inuse, false); \ - } - -diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h -index 0a6e34b070175..c7c924e15011d 100644 ---- a/arch/x86/include/asm/kexec.h -+++ b/arch/x86/include/asm/kexec.h -@@ -186,6 +186,14 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, - extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); - #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages - -+#ifdef CONFIG_KEXEC_FILE -+struct purgatory_info; -+int arch_kexec_apply_relocations_add(struct purgatory_info *pi, -+ Elf_Shdr *section, -+ const Elf_Shdr *relsec, -+ const Elf_Shdr *symtab); -+#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add -+#endif - #endif - - typedef void crash_vmclear_fn(void); -diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h -index cefe1d81e2e8b..4bdcb91478a51 100644 ---- a/arch/x86/include/asm/kvm-x86-ops.h -+++ b/arch/x86/include/asm/kvm-x86-ops.h -@@ -34,6 +34,7 @@ KVM_X86_OP(get_segment) - KVM_X86_OP(get_cpl) - KVM_X86_OP(set_segment) - KVM_X86_OP_NULL(get_cs_db_l_bits) -+KVM_X86_OP(is_valid_cr0) - KVM_X86_OP(set_cr0) - KVM_X86_OP(is_valid_cr4) - KVM_X86_OP(set_cr4) -@@ -47,6 +48,7 @@ KVM_X86_OP(set_dr7) - KVM_X86_OP(cache_reg) - KVM_X86_OP(get_rflags) - KVM_X86_OP(set_rflags) -+KVM_X86_OP(get_if_flag) - KVM_X86_OP(tlb_flush_all) - KVM_X86_OP(tlb_flush_current) - KVM_X86_OP_NULL(tlb_remote_flush) -@@ -114,6 +116,7 @@ KVM_X86_OP(enable_smi_window) - KVM_X86_OP_NULL(mem_enc_op) - KVM_X86_OP_NULL(mem_enc_reg_region) - KVM_X86_OP_NULL(mem_enc_unreg_region) -+KVM_X86_OP_NULL(guest_memory_reclaimed) - KVM_X86_OP(get_msr_feature) - KVM_X86_OP(can_emulate_instruction) - KVM_X86_OP(apic_init_signal_blocked) -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index 13f64654dfff8..08cfc26ee7c67 100644 ---- a/arch/x86/include/asm/kvm_host.h -+++ b/arch/x86/include/asm/kvm_host.h -@@ -98,7 +98,7 @@ - KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) - #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26) - #define KVM_REQ_TLB_FLUSH_GUEST \ -- KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP) -+ KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) - #define KVM_REQ_APF_READY KVM_ARCH_REQ(28) - #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29) - #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \ -@@ -364,6 +364,7 @@ union kvm_mmu_extended_role { - unsigned int cr4_smap:1; - unsigned int cr4_smep:1; - unsigned int cr4_la57:1; -+ unsigned int efer_lma:1; - }; - }; - -@@ -497,6 +498,7 @@ struct kvm_pmu { - unsigned nr_arch_fixed_counters; - unsigned available_event_types; - u64 fixed_ctr_ctrl; -+ u64 fixed_ctr_ctrl_mask; - u64 global_ctrl; - u64 global_status; - u64 global_ovf_ctrl; -@@ -504,6 +506,7 @@ struct kvm_pmu { - u64 global_ctrl_mask; - u64 global_ovf_ctrl_mask; - u64 reserved_bits; -+ u64 raw_event_mask; - u8 version; - struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; - struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; -@@ -640,6 +643,7 @@ struct kvm_vcpu_arch { - u64 ia32_misc_enable_msr; - u64 smbase; - u64 smi_count; -+ bool at_instruction_boundary; - bool tpr_access_reporting; - bool xsaves_enabled; - u64 ia32_xss; -@@ -751,7 +755,7 @@ struct kvm_vcpu_arch { - u8 preempted; - u64 msr_val; - u64 last_steal; -- struct gfn_to_pfn_cache cache; -+ struct gfn_to_hva_cache cache; - } st; - - u64 l1_tsc_offset; -@@ -1269,6 +1273,8 @@ struct kvm_vcpu_stat { - u64 nested_run; - u64 directed_yield_attempted; - u64 directed_yield_successful; -+ u64 preemption_reported; -+ u64 preemption_other; - u64 guest_mode; - }; - -@@ -1327,8 +1333,9 @@ struct kvm_x86_ops { - void (*set_segment)(struct kvm_vcpu *vcpu, - struct kvm_segment *var, int seg); - void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); -+ bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); - void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); -- bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0); -+ bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); - void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); - int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); - void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); -@@ -1340,6 +1347,7 @@ struct kvm_x86_ops { - void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); - unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); - void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); -+ bool (*get_if_flag)(struct kvm_vcpu *vcpu); - - void (*tlb_flush_all)(struct kvm_vcpu *vcpu); - void (*tlb_flush_current)(struct kvm_vcpu *vcpu); -@@ -1469,6 +1477,7 @@ struct kvm_x86_ops { - int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); - int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); - int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd); -+ void (*guest_memory_reclaimed)(struct kvm *kvm); - - int (*get_msr_feature)(struct kvm_msr_entry *entry); - -@@ -1485,6 +1494,7 @@ struct kvm_x86_ops { - }; - - struct kvm_x86_nested_ops { -+ void (*leave_nested)(struct kvm_vcpu *vcpu); - int (*check_events)(struct kvm_vcpu *vcpu); - bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); - void (*triple_fault)(struct kvm_vcpu *vcpu); -@@ -1507,6 +1517,7 @@ struct kvm_x86_init_ops { - int (*disabled_by_bios)(void); - int (*check_processor_compatibility)(void); - int (*hardware_setup)(void); -+ bool (*intel_pt_intr_in_guest)(void); - - struct kvm_x86_ops *runtime_ops; - }; -@@ -1554,8 +1565,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) - return -ENOTSUPP; - } - --int kvm_mmu_module_init(void); --void kvm_mmu_module_exit(void); -+void __init kvm_mmu_x86_module_init(void); -+int kvm_mmu_vendor_module_init(void); -+void kvm_mmu_vendor_module_exit(void); - - void kvm_mmu_destroy(struct kvm_vcpu *vcpu); - int kvm_mmu_create(struct kvm_vcpu *vcpu); -diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h -index 365111789cc68..5000cf59bdf5b 100644 ---- a/arch/x86/include/asm/linkage.h -+++ b/arch/x86/include/asm/linkage.h -@@ -18,6 +18,28 @@ - #define __ALIGN_STR __stringify(__ALIGN) - #endif - -+#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) -+#define RET jmp __x86_return_thunk -+#else /* CONFIG_RETPOLINE */ -+#ifdef CONFIG_SLS -+#define RET ret; int3 -+#else -+#define RET ret -+#endif -+#endif /* CONFIG_RETPOLINE */ -+ -+#else /* __ASSEMBLY__ */ -+ -+#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) -+#define ASM_RET "jmp __x86_return_thunk\n\t" -+#else /* CONFIG_RETPOLINE */ -+#ifdef CONFIG_SLS -+#define ASM_RET "ret; int3\n\t" -+#else -+#define ASM_RET "ret\n\t" -+#endif -+#endif /* CONFIG_RETPOLINE */ -+ - #endif /* __ASSEMBLY__ */ - - #endif /* _ASM_X86_LINKAGE_H */ -diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h -index 9c80c68d75b54..2356fdddd3e61 100644 ---- a/arch/x86/include/asm/mem_encrypt.h -+++ b/arch/x86/include/asm/mem_encrypt.h -@@ -13,6 +13,7 @@ - #ifndef __ASSEMBLY__ - - #include -+#include - - #include - -@@ -46,14 +47,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); - - void __init mem_encrypt_free_decrypted_mem(void); - --/* Architecture __weak replacement functions */ --void __init mem_encrypt_init(void); -- - void __init sev_es_init_vc_handling(void); - bool sme_active(void); - bool sev_active(void); - bool sev_es_active(void); - -+void __init mem_encrypt_init(void); -+ - #define __bss_decrypted __section(".bss..decrypted") - - #else /* !CONFIG_AMD_MEM_ENCRYPT */ -@@ -86,6 +86,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; - - static inline void mem_encrypt_free_decrypted_mem(void) { } - -+static inline void mem_encrypt_init(void) { } -+ - #define __bss_decrypted - - #endif /* CONFIG_AMD_MEM_ENCRYPT */ -diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h -index ab45a220fac47..4ca377efc9869 100644 ---- a/arch/x86/include/asm/microcode.h -+++ b/arch/x86/include/asm/microcode.h -@@ -5,10 +5,12 @@ - #include - #include - #include -+#include - - struct ucode_patch { - struct list_head plist; - void *data; /* Intel uses only this one */ -+ unsigned int size; - u32 patch_id; - u16 equiv_cpu; - }; -@@ -129,13 +131,15 @@ static inline unsigned int x86_cpuid_family(void) - #ifdef CONFIG_MICROCODE - extern void __init load_ucode_bsp(void); - extern void load_ucode_ap(void); --void reload_early_microcode(void); -+void reload_early_microcode(unsigned int cpu); - extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); - extern bool initrd_gone; -+void microcode_bsp_resume(void); - #else - static inline void __init load_ucode_bsp(void) { } - static inline void load_ucode_ap(void) { } --static inline void reload_early_microcode(void) { } -+static inline void reload_early_microcode(unsigned int cpu) { } -+static inline void microcode_bsp_resume(void) { } - static inline bool - get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; } - #endif -diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h -index 7063b5a43220a..403a8e76b310c 100644 ---- a/arch/x86/include/asm/microcode_amd.h -+++ b/arch/x86/include/asm/microcode_amd.h -@@ -47,12 +47,14 @@ struct microcode_amd { - extern void __init load_ucode_amd_bsp(unsigned int family); - extern void load_ucode_amd_ap(unsigned int family); - extern int __init save_microcode_in_initrd_amd(unsigned int family); --void reload_ucode_amd(void); -+void reload_ucode_amd(unsigned int cpu); -+extern void amd_check_microcode(void); - #else - static inline void __init load_ucode_amd_bsp(unsigned int family) {} - static inline void load_ucode_amd_ap(unsigned int family) {} - static inline int __init - save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } --static inline void reload_ucode_amd(void) {} -+static inline void reload_ucode_amd(unsigned int cpu) {} -+static inline void amd_check_microcode(void) {} - #endif - #endif /* _ASM_X86_MICROCODE_AMD_H */ -diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h -index adccbc209169a..c2b9ab94408e6 100644 ---- a/arch/x86/include/asm/mshyperv.h -+++ b/arch/x86/include/asm/mshyperv.h -@@ -176,13 +176,6 @@ bool hv_vcpu_is_preempted(int vcpu); - static inline void hv_apic_init(void) {} - #endif - --static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, -- struct msi_desc *msi_desc) --{ -- msi_entry->address.as_uint32 = msi_desc->msg.address_lo; -- msi_entry->data.as_uint32 = msi_desc->msg.data; --} -- - struct irq_domain *hv_create_pci_msi_domain(void); - - int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, -diff --git a/arch/x86/include/asm/msi.h b/arch/x86/include/asm/msi.h -index b85147d75626e..d71c7e8b738d2 100644 ---- a/arch/x86/include/asm/msi.h -+++ b/arch/x86/include/asm/msi.h -@@ -12,14 +12,17 @@ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, - /* Structs and defines for the X86 specific MSI message format */ - - typedef struct x86_msi_data { -- u32 vector : 8, -- delivery_mode : 3, -- dest_mode_logical : 1, -- reserved : 2, -- active_low : 1, -- is_level : 1; -- -- u32 dmar_subhandle; -+ union { -+ struct { -+ u32 vector : 8, -+ delivery_mode : 3, -+ dest_mode_logical : 1, -+ reserved : 2, -+ active_low : 1, -+ is_level : 1; -+ }; -+ u32 dmar_subhandle; -+ }; - } __attribute__ ((packed)) arch_msi_msg_data_t; - #define arch_msi_msg_data x86_msi_data - -diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h -index a7c413432b33d..91d8322af4139 100644 ---- a/arch/x86/include/asm/msr-index.h -+++ b/arch/x86/include/asm/msr-index.h -@@ -51,9 +51,16 @@ - #define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ - #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ - #define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ -+#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */ -+#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT) -+ -+/* A mask for bits which the kernel toggles when controlling mitigations */ -+#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \ -+ | SPEC_CTRL_RRSBA_DIS_S) - - #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ - #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ -+#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */ - - #define MSR_PPIN_CTL 0x0000004e - #define MSR_PPIN 0x0000004f -@@ -91,6 +98,7 @@ - #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a - #define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */ - #define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */ -+#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */ - #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */ - #define ARCH_CAP_SSB_NO BIT(4) /* - * Not susceptible to Speculative Store Bypass -@@ -114,6 +122,50 @@ - * Not susceptible to - * TSX Async Abort (TAA) vulnerabilities. - */ -+#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* -+ * Not susceptible to SBDR and SSDP -+ * variants of Processor MMIO stale data -+ * vulnerabilities. -+ */ -+#define ARCH_CAP_FBSDP_NO BIT(14) /* -+ * Not susceptible to FBSDP variant of -+ * Processor MMIO stale data -+ * vulnerabilities. -+ */ -+#define ARCH_CAP_PSDP_NO BIT(15) /* -+ * Not susceptible to PSDP variant of -+ * Processor MMIO stale data -+ * vulnerabilities. -+ */ -+#define ARCH_CAP_FB_CLEAR BIT(17) /* -+ * VERW clears CPU fill buffer -+ * even on MDS_NO CPUs. -+ */ -+#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* -+ * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] -+ * bit available to control VERW -+ * behavior. -+ */ -+#define ARCH_CAP_RRSBA BIT(19) /* -+ * Indicates RET may use predictors -+ * other than the RSB. With eIBRS -+ * enabled predictions in kernel mode -+ * are restricted to targets in -+ * kernel. -+ */ -+#define ARCH_CAP_PBRSB_NO BIT(24) /* -+ * Not susceptible to Post-Barrier -+ * Return Stack Buffer Predictions. -+ */ -+#define ARCH_CAP_GDS_CTRL BIT(25) /* -+ * CPU is vulnerable to Gather -+ * Data Sampling (GDS) and -+ * has controls for mitigation. -+ */ -+#define ARCH_CAP_GDS_NO BIT(26) /* -+ * CPU is not vulnerable to Gather -+ * Data Sampling (GDS). -+ */ - - #define MSR_IA32_FLUSH_CMD 0x0000010b - #define L1D_FLUSH BIT(0) /* -@@ -128,9 +180,12 @@ - #define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ - #define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ - --/* SRBDS support */ - #define MSR_IA32_MCU_OPT_CTRL 0x00000123 --#define RNGDS_MITG_DIS BIT(0) -+#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */ -+#define RTM_ALLOW BIT(1) /* TSX development mode */ -+#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ -+#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */ -+#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */ - - #define MSR_IA32_SYSENTER_CS 0x00000174 - #define MSR_IA32_SYSENTER_ESP 0x00000175 -@@ -456,6 +511,12 @@ - #define MSR_AMD64_CPUID_FN_1 0xc0011004 - #define MSR_AMD64_LS_CFG 0xc0011020 - #define MSR_AMD64_DC_CFG 0xc0011022 -+ -+#define MSR_AMD64_DE_CFG 0xc0011029 -+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 -+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) -+#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 -+ - #define MSR_AMD64_BU_CFG2 0xc001102a - #define MSR_AMD64_IBSFETCHCTL 0xc0011030 - #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 -@@ -489,6 +550,9 @@ - /* Fam 17h MSRs */ - #define MSR_F17H_IRPERF 0xc00000e9 - -+#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3 -+#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1) -+ - /* Fam 16h MSRs */ - #define MSR_F16H_L2I_PERF_CTL 0xc0010230 - #define MSR_F16H_L2I_PERF_CTR 0xc0010231 -@@ -530,9 +594,6 @@ - #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL - #define FAM10H_MMIO_CONF_BASE_SHIFT 20 - #define MSR_FAM10H_NODE_ID 0xc001100c --#define MSR_F10H_DECFG 0xc0011029 --#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 --#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) - - /* K8 MSRs */ - #define MSR_K8_TOP_MEM1 0xc001001a -diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h -index a3f87f1015d3d..d42e6c6b47b1e 100644 ---- a/arch/x86/include/asm/msr.h -+++ b/arch/x86/include/asm/msr.h -@@ -92,7 +92,7 @@ static __always_inline unsigned long long __rdmsr(unsigned int msr) - - asm volatile("1: rdmsr\n" - "2:\n" -- _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe) -+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR) - : EAX_EDX_RET(val, low, high) : "c" (msr)); - - return EAX_EDX_VAL(val, low, high); -@@ -102,7 +102,7 @@ static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high) - { - asm volatile("1: wrmsr\n" - "2:\n" -- _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) -+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) - : : "c" (msr), "a"(low), "d" (high) : "memory"); - } - -@@ -137,17 +137,11 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, - { - DECLARE_ARGS(val, low, high); - -- asm volatile("2: rdmsr ; xor %[err],%[err]\n" -- "1:\n\t" -- ".section .fixup,\"ax\"\n\t" -- "3: mov %[fault],%[err]\n\t" -- "xorl %%eax, %%eax\n\t" -- "xorl %%edx, %%edx\n\t" -- "jmp 1b\n\t" -- ".previous\n\t" -- _ASM_EXTABLE(2b, 3b) -+ asm volatile("1: rdmsr ; xor %[err],%[err]\n" -+ "2:\n\t" -+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err]) - : [err] "=r" (*err), EAX_EDX_RET(val, low, high) -- : "c" (msr), [fault] "i" (-EIO)); -+ : "c" (msr)); - if (tracepoint_enabled(read_msr)) - do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); - return EAX_EDX_VAL(val, low, high); -@@ -169,15 +163,11 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high) - { - int err; - -- asm volatile("2: wrmsr ; xor %[err],%[err]\n" -- "1:\n\t" -- ".section .fixup,\"ax\"\n\t" -- "3: mov %[fault],%[err] ; jmp 1b\n\t" -- ".previous\n\t" -- _ASM_EXTABLE(2b, 3b) -+ asm volatile("1: wrmsr ; xor %[err],%[err]\n" -+ "2:\n\t" -+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err]) - : [err] "=a" (err) -- : "c" (msr), "0" (low), "d" (high), -- [fault] "i" (-EIO) -+ : "c" (msr), "0" (low), "d" (high) - : "memory"); - if (tracepoint_enabled(write_msr)) - do_trace_write_msr(msr, ((u64)high << 32 | low), err); -diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h -index ec2d5c8c66947..940c15ee5650f 100644 ---- a/arch/x86/include/asm/nospec-branch.h -+++ b/arch/x86/include/asm/nospec-branch.h -@@ -5,11 +5,15 @@ - - #include - #include -+#include - - #include - #include - #include - #include -+#include -+ -+#define RETPOLINE_THUNK_SIZE 32 - - /* - * Fill the CPU return stack buffer. -@@ -31,32 +35,57 @@ - #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ - - /* -+ * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN. -+ */ -+#define __FILL_RETURN_SLOT \ -+ ANNOTATE_INTRA_FUNCTION_CALL; \ -+ call 772f; \ -+ int3; \ -+772: -+ -+/* -+ * Stuff the entire RSB. -+ * - * Google experimented with loop-unrolling and this turned out to be - * the optimal version - two calls, each with their own speculation - * trap should their return address end up getting used, in a loop. - */ --#define __FILL_RETURN_BUFFER(reg, nr, sp) \ -- mov $(nr/2), reg; \ --771: \ -- ANNOTATE_INTRA_FUNCTION_CALL; \ -- call 772f; \ --773: /* speculation trap */ \ -- UNWIND_HINT_EMPTY; \ -- pause; \ -- lfence; \ -- jmp 773b; \ --772: \ -- ANNOTATE_INTRA_FUNCTION_CALL; \ -- call 774f; \ --775: /* speculation trap */ \ -- UNWIND_HINT_EMPTY; \ -- pause; \ -- lfence; \ -- jmp 775b; \ --774: \ -- add $(BITS_PER_LONG/8) * 2, sp; \ -- dec reg; \ -- jnz 771b; -+#ifdef CONFIG_X86_64 -+#define __FILL_RETURN_BUFFER(reg, nr) \ -+ mov $(nr/2), reg; \ -+771: \ -+ __FILL_RETURN_SLOT \ -+ __FILL_RETURN_SLOT \ -+ add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \ -+ dec reg; \ -+ jnz 771b; \ -+ /* barrier for jnz misprediction */ \ -+ lfence; -+#else -+/* -+ * i386 doesn't unconditionally have LFENCE, as such it can't -+ * do a loop. -+ */ -+#define __FILL_RETURN_BUFFER(reg, nr) \ -+ .rept nr; \ -+ __FILL_RETURN_SLOT; \ -+ .endr; \ -+ add $(BITS_PER_LONG/8) * nr, %_ASM_SP; -+#endif -+ -+/* -+ * Stuff a single RSB slot. -+ * -+ * To mitigate Post-Barrier RSB speculation, one CALL instruction must be -+ * forced to retire before letting a RET instruction execute. -+ * -+ * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed -+ * before this point. -+ */ -+#define __FILL_ONE_RETURN \ -+ __FILL_RETURN_SLOT \ -+ add $(BITS_PER_LONG/8), %_ASM_SP; \ -+ lfence; - - #ifdef __ASSEMBLY__ - -@@ -72,6 +101,23 @@ - .popsection - .endm - -+/* -+ * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions -+ * vs RETBleed validation. -+ */ -+#define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE -+ -+/* -+ * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should -+ * eventually turn into it's own annotation. -+ */ -+.macro ANNOTATE_UNRET_END -+#if (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) -+ ANNOTATE_RETPOLINE_SAFE -+ nop -+#endif -+.endm -+ - /* - * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple - * indirect jmp/call which may be susceptible to the Spectre variant 2 -@@ -81,7 +127,7 @@ - #ifdef CONFIG_RETPOLINE - ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ - __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \ -- __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD -+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE - #else - jmp *%\reg - #endif -@@ -91,7 +137,7 @@ - #ifdef CONFIG_RETPOLINE - ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \ - __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \ -- __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD -+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE - #else - call *%\reg - #endif -@@ -101,11 +147,38 @@ - * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP - * monstrosity above, manually. - */ --.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req --#ifdef CONFIG_RETPOLINE -- ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr -- __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP) -+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS) -+ ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \ -+ __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \ -+ __stringify(__FILL_ONE_RETURN), \ftr2 -+ - .Lskip_rsb_\@: -+.endm -+ -+#ifdef CONFIG_CPU_UNRET_ENTRY -+#define CALL_UNTRAIN_RET "call entry_untrain_ret" -+#else -+#define CALL_UNTRAIN_RET "" -+#endif -+ -+/* -+ * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the -+ * return thunk isn't mapped into the userspace tables (then again, AMD -+ * typically has NO_MELTDOWN). -+ * -+ * While retbleed_untrain_ret() doesn't clobber anything but requires stack, -+ * entry_ibpb() will clobber AX, CX, DX. -+ * -+ * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point -+ * where we have a stack but before any RET instruction. -+ */ -+.macro UNTRAIN_RET -+#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ -+ defined(CONFIG_CPU_SRSO) -+ ANNOTATE_UNRET_END -+ ALTERNATIVE_2 "", \ -+ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ -+ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB - #endif - .endm - -@@ -117,7 +190,34 @@ - _ASM_PTR " 999b\n\t" \ - ".popsection\n\t" - -+#ifdef CONFIG_RETHUNK -+extern void __x86_return_thunk(void); -+#else -+static inline void __x86_return_thunk(void) {} -+#endif -+ -+extern void retbleed_return_thunk(void); -+extern void srso_return_thunk(void); -+extern void srso_alias_return_thunk(void); -+ -+extern void retbleed_untrain_ret(void); -+extern void srso_untrain_ret(void); -+extern void srso_alias_untrain_ret(void); -+ -+extern void entry_untrain_ret(void); -+extern void entry_ibpb(void); -+ - #ifdef CONFIG_RETPOLINE -+ -+typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; -+ -+#define GEN(reg) \ -+ extern retpoline_thunk_t __x86_indirect_thunk_ ## reg; -+#include -+#undef GEN -+ -+extern retpoline_thunk_t __x86_indirect_thunk_array[]; -+ - #ifdef CONFIG_X86_64 - - /* -@@ -133,7 +233,7 @@ - "lfence;\n" \ - ANNOTATE_RETPOLINE_SAFE \ - "call *%[thunk_target]\n", \ -- X86_FEATURE_RETPOLINE_AMD) -+ X86_FEATURE_RETPOLINE_LFENCE) - - # define THUNK_TARGET(addr) [thunk_target] "r" (addr) - -@@ -163,7 +263,7 @@ - "lfence;\n" \ - ANNOTATE_RETPOLINE_SAFE \ - "call *%[thunk_target]\n", \ -- X86_FEATURE_RETPOLINE_AMD) -+ X86_FEATURE_RETPOLINE_LFENCE) - - # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) - #endif -@@ -175,9 +275,12 @@ - /* The Spectre V2 mitigation variants */ - enum spectre_v2_mitigation { - SPECTRE_V2_NONE, -- SPECTRE_V2_RETPOLINE_GENERIC, -- SPECTRE_V2_RETPOLINE_AMD, -- SPECTRE_V2_IBRS_ENHANCED, -+ SPECTRE_V2_RETPOLINE, -+ SPECTRE_V2_LFENCE, -+ SPECTRE_V2_EIBRS, -+ SPECTRE_V2_EIBRS_RETPOLINE, -+ SPECTRE_V2_EIBRS_LFENCE, -+ SPECTRE_V2_IBRS, - }; - - /* The indirect branch speculation control variants */ -@@ -211,15 +314,18 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) - : "memory"); - } - -+extern u64 x86_pred_cmd; -+ - static inline void indirect_branch_prediction_barrier(void) - { -- u64 val = PRED_CMD_IBPB; -- -- alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); -+ alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB); - } - - /* The Intel SPEC CTRL MSR base value cache */ - extern u64 x86_spec_ctrl_base; -+DECLARE_PER_CPU(u64, x86_spec_ctrl_current); -+extern void update_spec_ctrl_cond(u64 val); -+extern u64 spec_ctrl_current(void); - - /* - * With retpoline, we must use IBRS to restrict branch prediction -@@ -229,18 +335,18 @@ extern u64 x86_spec_ctrl_base; - */ - #define firmware_restrict_branch_speculation_start() \ - do { \ -- u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ -- \ - preempt_disable(); \ -- alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ -+ alternative_msr_write(MSR_IA32_SPEC_CTRL, \ -+ spec_ctrl_current() | SPEC_CTRL_IBRS, \ - X86_FEATURE_USE_IBRS_FW); \ -+ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \ -+ X86_FEATURE_USE_IBPB_FW); \ - } while (0) - - #define firmware_restrict_branch_speculation_end() \ - do { \ -- u64 val = x86_spec_ctrl_base; \ -- \ -- alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ -+ alternative_msr_write(MSR_IA32_SPEC_CTRL, \ -+ spec_ctrl_current(), \ - X86_FEATURE_USE_IBRS_FW); \ - preempt_enable(); \ - } while (0) -@@ -254,6 +360,8 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); - - DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); - -+DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); -+ - #include - - /** -@@ -303,63 +411,4 @@ static inline void mds_idle_clear_cpu_buffers(void) - - #endif /* __ASSEMBLY__ */ - --/* -- * Below is used in the eBPF JIT compiler and emits the byte sequence -- * for the following assembly: -- * -- * With retpolines configured: -- * -- * callq do_rop -- * spec_trap: -- * pause -- * lfence -- * jmp spec_trap -- * do_rop: -- * mov %rcx,(%rsp) for x86_64 -- * mov %edx,(%esp) for x86_32 -- * retq -- * -- * Without retpolines configured: -- * -- * jmp *%rcx for x86_64 -- * jmp *%edx for x86_32 -- */ --#ifdef CONFIG_RETPOLINE --# ifdef CONFIG_X86_64 --# define RETPOLINE_RCX_BPF_JIT_SIZE 17 --# define RETPOLINE_RCX_BPF_JIT() \ --do { \ -- EMIT1_off32(0xE8, 7); /* callq do_rop */ \ -- /* spec_trap: */ \ -- EMIT2(0xF3, 0x90); /* pause */ \ -- EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ -- EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ -- /* do_rop: */ \ -- EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \ -- EMIT1(0xC3); /* retq */ \ --} while (0) --# else /* !CONFIG_X86_64 */ --# define RETPOLINE_EDX_BPF_JIT() \ --do { \ -- EMIT1_off32(0xE8, 7); /* call do_rop */ \ -- /* spec_trap: */ \ -- EMIT2(0xF3, 0x90); /* pause */ \ -- EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ -- EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ -- /* do_rop: */ \ -- EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \ -- EMIT1(0xC3); /* ret */ \ --} while (0) --# endif --#else /* !CONFIG_RETPOLINE */ --# ifdef CONFIG_X86_64 --# define RETPOLINE_RCX_BPF_JIT_SIZE 2 --# define RETPOLINE_RCX_BPF_JIT() \ -- EMIT2(0xFF, 0xE1); /* jmp *%rcx */ --# else /* !CONFIG_X86_64 */ --# define RETPOLINE_EDX_BPF_JIT() \ -- EMIT2(0xFF, 0xE2) /* jmp *%edx */ --# endif --#endif -- - #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ -diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h -index 4bde0dc66100c..56891399fa2a6 100644 ---- a/arch/x86/include/asm/page_64.h -+++ b/arch/x86/include/asm/page_64.h -@@ -15,7 +15,7 @@ extern unsigned long page_offset_base; - extern unsigned long vmalloc_base; - extern unsigned long vmemmap_base; - --static inline unsigned long __phys_addr_nodebug(unsigned long x) -+static __always_inline unsigned long __phys_addr_nodebug(unsigned long x) - { - unsigned long y = x - __START_KERNEL_map; - -diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h -index a8d4ad8565681..e9e2c3ba59239 100644 ---- a/arch/x86/include/asm/page_64_types.h -+++ b/arch/x86/include/asm/page_64_types.h -@@ -15,7 +15,7 @@ - #define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) - #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) - --#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER) -+#define EXCEPTION_STACK_ORDER (1 + KASAN_STACK_ORDER) - #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) - - #define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER) -diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h -index da3a1ac82be58..4d8b2731f4f85 100644 ---- a/arch/x86/include/asm/paravirt.h -+++ b/arch/x86/include/asm/paravirt.h -@@ -665,7 +665,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); - "call " #func ";" \ - PV_RESTORE_ALL_CALLER_REGS \ - FRAME_END \ -- "ret;" \ -+ ASM_RET \ - ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ - ".popsection") - -diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h -index 8fc1b5003713f..0e4efcde07831 100644 ---- a/arch/x86/include/asm/perf_event.h -+++ b/arch/x86/include/asm/perf_event.h -@@ -241,6 +241,11 @@ struct x86_pmu_capability { - #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3) - #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS) - -+static inline bool use_fixed_pseudo_encoding(u64 code) -+{ -+ return !(code & 0xff); -+} -+ - /* - * We model BTS tracing as another fixed-mode PMC. - * -@@ -422,8 +427,10 @@ struct pebs_xmm { - - #ifdef CONFIG_X86_LOCAL_APIC - extern u32 get_ibs_caps(void); -+extern int forward_event_to_ibs(struct perf_event *event); - #else - static inline u32 get_ibs_caps(void) { return 0; } -+static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; } - #endif - - #ifdef CONFIG_PERF_EVENTS -diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h -index 56d0399a0cd16..dd520b44e89cc 100644 ---- a/arch/x86/include/asm/pgtable_64.h -+++ b/arch/x86/include/asm/pgtable_64.h -@@ -235,8 +235,8 @@ static inline void native_pgd_clear(pgd_t *pgd) - - #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) - #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) --#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) --#define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val }) -+#define __swp_entry_to_pte(x) (__pte((x).val)) -+#define __swp_entry_to_pmd(x) (__pmd((x).val)) - - extern int kern_addr_valid(unsigned long addr); - extern void cleanup_highmap(void); -diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h -index 40497a9020c6e..28e59576c75be 100644 ---- a/arch/x86/include/asm/pgtable_types.h -+++ b/arch/x86/include/asm/pgtable_types.h -@@ -123,11 +123,12 @@ - * instance, and is *not* included in this mask since - * pte_modify() does modify it. - */ --#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ -- _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ -- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \ -- _PAGE_UFFD_WP) --#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) -+#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ -+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |\ -+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \ -+ _PAGE_UFFD_WP) -+#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT) -+#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE) - - /* - * The cache modes defined here are used to translate between pure SW usage -diff --git a/arch/x86/include/asm/pkru.h b/arch/x86/include/asm/pkru.h -index ccc539faa5bbc..4d8b9448fe8d2 100644 ---- a/arch/x86/include/asm/pkru.h -+++ b/arch/x86/include/asm/pkru.h -@@ -4,8 +4,8 @@ - - #include - --#define PKRU_AD_BIT 0x1 --#define PKRU_WD_BIT 0x2 -+#define PKRU_AD_BIT 0x1u -+#define PKRU_WD_BIT 0x2u - #define PKRU_BITS_PER_PKEY 2 - - #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS -diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h -index 9ad2acaaae9b8..bbbf27cfe7015 100644 ---- a/arch/x86/include/asm/processor.h -+++ b/arch/x86/include/asm/processor.h -@@ -518,6 +518,7 @@ struct thread_struct { - */ - unsigned long iopl_emul; - -+ unsigned int iopl_warn:1; - unsigned int sig_on_uaccess_err:1; - - /* -@@ -802,9 +803,13 @@ extern u16 get_llc_id(unsigned int cpu); - #ifdef CONFIG_CPU_SUP_AMD - extern u32 amd_get_nodes_per_socket(void); - extern u32 amd_get_highest_perf(void); -+extern bool cpu_has_ibpb_brtype_microcode(void); -+extern void amd_clear_divider(void); - #else - static inline u32 amd_get_nodes_per_socket(void) { return 0; } - static inline u32 amd_get_highest_perf(void) { return 0; } -+static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; } -+static inline void amd_clear_divider(void) { } - #endif - - static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) -@@ -833,8 +838,9 @@ bool xen_set_default_idle(void); - #define xen_set_default_idle 0 - #endif - --void stop_this_cpu(void *dummy); --void microcode_check(void); -+void __noreturn stop_this_cpu(void *dummy); -+void microcode_check(struct cpuinfo_x86 *prev_info); -+void store_cpu_caps(struct cpuinfo_x86 *info); - - enum l1tf_mitigations { - L1TF_MITIGATION_OFF, -@@ -853,4 +859,6 @@ enum mds_mitigations { - MDS_MITIGATION_VMWERV, - }; - -+extern bool gds_ucode_mitigated(void); -+ - #endif /* _ASM_X86_PROCESSOR_H */ -diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h -index 159622ee06748..1474cf96251dd 100644 ---- a/arch/x86/include/asm/qspinlock_paravirt.h -+++ b/arch/x86/include/asm/qspinlock_paravirt.h -@@ -48,7 +48,7 @@ asm (".pushsection .text;" - "jne .slowpath;" - "pop %rdx;" - FRAME_END -- "ret;" -+ ASM_RET - ".slowpath: " - "push %rsi;" - "movzbl %al,%esi;" -@@ -56,7 +56,7 @@ asm (".pushsection .text;" - "pop %rsi;" - "pop %rdx;" - FRAME_END -- "ret;" -+ ASM_RET - ".size " PV_UNLOCK ", .-" PV_UNLOCK ";" - ".popsection"); - -diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h -index 5db5d083c8732..331474b150f16 100644 ---- a/arch/x86/include/asm/realmode.h -+++ b/arch/x86/include/asm/realmode.h -@@ -89,6 +89,7 @@ static inline void set_real_mode_mem(phys_addr_t mem) - } - - void reserve_real_mode(void); -+void load_trampoline_pgtable(void); - - #endif /* __ASSEMBLY__ */ - -diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h -index 04c17be9b5fda..bc5b4d788c08d 100644 ---- a/arch/x86/include/asm/reboot.h -+++ b/arch/x86/include/asm/reboot.h -@@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type); - #define MRR_BIOS 0 - #define MRR_APM 1 - -+void cpu_emergency_disable_virtualization(void); -+ - typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); - void nmi_panic_self_stop(struct pt_regs *regs); - void nmi_shootdown_cpus(nmi_shootdown_cb callback); -diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h -index b2d504f119370..9bf60a8b9e9c2 100644 ---- a/arch/x86/include/asm/required-features.h -+++ b/arch/x86/include/asm/required-features.h -@@ -102,6 +102,7 @@ - #define REQUIRED_MASK17 0 - #define REQUIRED_MASK18 0 - #define REQUIRED_MASK19 0 --#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) -+#define REQUIRED_MASK20 0 -+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) - - #endif /* _ASM_X86_REQUIRED_FEATURES_H */ -diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h -index d60ed0668a593..b9ccdf5ea98ba 100644 ---- a/arch/x86/include/asm/resctrl.h -+++ b/arch/x86/include/asm/resctrl.h -@@ -51,7 +51,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); - * simple as possible. - * Must be called with preemption disabled. - */ --static void __resctrl_sched_in(void) -+static inline void __resctrl_sched_in(struct task_struct *tsk) - { - struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); - u32 closid = state->default_closid; -@@ -63,13 +63,13 @@ static void __resctrl_sched_in(void) - * Else use the closid/rmid assigned to this cpu. - */ - if (static_branch_likely(&rdt_alloc_enable_key)) { -- tmp = READ_ONCE(current->closid); -+ tmp = READ_ONCE(tsk->closid); - if (tmp) - closid = tmp; - } - - if (static_branch_likely(&rdt_mon_enable_key)) { -- tmp = READ_ONCE(current->rmid); -+ tmp = READ_ONCE(tsk->rmid); - if (tmp) - rmid = tmp; - } -@@ -81,17 +81,17 @@ static void __resctrl_sched_in(void) - } - } - --static inline void resctrl_sched_in(void) -+static inline void resctrl_sched_in(struct task_struct *tsk) - { - if (static_branch_likely(&rdt_enable_key)) -- __resctrl_sched_in(); -+ __resctrl_sched_in(tsk); - } - - void resctrl_cpu_detect(struct cpuinfo_x86 *c); - - #else - --static inline void resctrl_sched_in(void) {} -+static inline void resctrl_sched_in(struct task_struct *tsk) {} - static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {} - - #endif /* CONFIG_X86_CPU_RESCTRL */ -diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h -index 72044026eb3c2..8dd8e8ec9fa55 100644 ---- a/arch/x86/include/asm/segment.h -+++ b/arch/x86/include/asm/segment.h -@@ -339,7 +339,7 @@ static inline void __loadsegment_fs(unsigned short value) - "1: movw %0, %%fs \n" - "2: \n" - -- _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_clear_fs) -+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_CLEAR_FS) - - : : "rm" (value) : "memory"); - } -diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h -index 5b1ed650b1248..84eab27248754 100644 ---- a/arch/x86/include/asm/sigframe.h -+++ b/arch/x86/include/asm/sigframe.h -@@ -85,6 +85,4 @@ struct rt_sigframe_x32 { - - #endif /* CONFIG_X86_64 */ - --void __init init_sigframe_size(void); -- - #endif /* _ASM_X86_SIGFRAME_H */ -diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h -index f248eb2ac2d4a..3881b5333eb81 100644 ---- a/arch/x86/include/asm/stacktrace.h -+++ b/arch/x86/include/asm/stacktrace.h -@@ -38,6 +38,16 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, - bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, - struct stack_info *info); - -+static __always_inline -+bool get_stack_guard_info(unsigned long *stack, struct stack_info *info) -+{ -+ /* make sure it's not in the stack proper */ -+ if (get_stack_info_noinstr(stack, current, info)) -+ return false; -+ /* but if it is in the page below it, we hit a guard */ -+ return get_stack_info_noinstr((void *)stack + PAGE_SIZE, current, info); -+} -+ - const char *stack_type_name(enum stack_type type); - - static inline bool on_stack(struct stack_info *info, void *addr, size_t len) -diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h -index cbb67b6030f97..491aadfac6117 100644 ---- a/arch/x86/include/asm/static_call.h -+++ b/arch/x86/include/asm/static_call.h -@@ -21,6 +21,16 @@ - * relative displacement across sections. - */ - -+/* -+ * The trampoline is 8 bytes and of the general form: -+ * -+ * jmp.d32 \func -+ * ud1 %esp, %ecx -+ * -+ * That trailing #UD provides both a speculation stop and serves as a unique -+ * 3 byte signature identifying static call trampolines. Also see tramp_ud[] -+ * and __static_call_fixup(). -+ */ - #define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns) \ - asm(".pushsection .static_call.text, \"ax\" \n" \ - ".align 4 \n" \ -@@ -34,8 +44,13 @@ - #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \ - __ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)") - -+#ifdef CONFIG_RETHUNK -+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ -+ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk") -+#else - #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ -- __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop") -+ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop") -+#endif - - - #define ARCH_ADD_TRAMP_KEY(name) \ -@@ -44,4 +59,6 @@ - ".long " STATIC_CALL_KEY_STR(name) " - . \n" \ - ".popsection \n") - -+extern bool __static_call_fixup(void *tramp, u8 op, void *dest); -+ - #endif /* _ASM_STATIC_CALL_H */ -diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h -index 7b132d0312ebf..a800abb1a9925 100644 ---- a/arch/x86/include/asm/suspend_32.h -+++ b/arch/x86/include/asm/suspend_32.h -@@ -19,7 +19,6 @@ struct saved_context { - u16 gs; - unsigned long cr0, cr2, cr3, cr4; - u64 misc_enable; -- bool misc_enable_saved; - struct saved_msrs saved_msrs; - struct desc_ptr gdt_desc; - struct desc_ptr idt; -@@ -28,6 +27,7 @@ struct saved_context { - unsigned long tr; - unsigned long safety; - unsigned long return_address; -+ bool misc_enable_saved; - } __attribute__((packed)); - - /* routines for saving/restoring kernel state */ -diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h -index 35bb35d28733e..54df06687d834 100644 ---- a/arch/x86/include/asm/suspend_64.h -+++ b/arch/x86/include/asm/suspend_64.h -@@ -14,9 +14,13 @@ - * Image of the saved processor state, used by the low level ACPI suspend to - * RAM code and by the low level hibernation code. - * -- * If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that -- * __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c, -- * still work as required. -+ * If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S -+ * and make sure that __save/__restore_processor_state(), defined in -+ * arch/x86/power/cpu.c, still work as required. -+ * -+ * Because the structure is packed, make sure to avoid unaligned members. For -+ * optimisation purposes but also because tools like kmemleak only search for -+ * pointers that are aligned. - */ - struct saved_context { - struct pt_regs regs; -@@ -36,7 +40,6 @@ struct saved_context { - - unsigned long cr0, cr2, cr3, cr4; - u64 misc_enable; -- bool misc_enable_saved; - struct saved_msrs saved_msrs; - unsigned long efer; - u16 gdt_pad; /* Unused */ -@@ -48,6 +51,7 @@ struct saved_context { - unsigned long tr; - unsigned long safety; - unsigned long return_address; -+ bool misc_enable_saved; - } __attribute__((packed)); - - #define loaddebug(thread,register) \ -diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h -index 6a2827d0681fc..e8ab7c1f1080a 100644 ---- a/arch/x86/include/asm/syscall_wrapper.h -+++ b/arch/x86/include/asm/syscall_wrapper.h -@@ -6,7 +6,7 @@ - #ifndef _ASM_X86_SYSCALL_WRAPPER_H - #define _ASM_X86_SYSCALL_WRAPPER_H - --struct pt_regs; -+#include - - extern long __x64_sys_ni_syscall(const struct pt_regs *regs); - extern long __ia32_sys_ni_syscall(const struct pt_regs *regs); -diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h -index a4a8b1b16c0c1..956e4145311b1 100644 ---- a/arch/x86/include/asm/timex.h -+++ b/arch/x86/include/asm/timex.h -@@ -5,6 +5,15 @@ - #include - #include - -+static inline unsigned long random_get_entropy(void) -+{ -+ if (!IS_ENABLED(CONFIG_X86_TSC) && -+ !cpu_feature_enabled(X86_FEATURE_TSC)) -+ return random_get_entropy_fallback(); -+ return rdtsc(); -+} -+#define random_get_entropy random_get_entropy -+ - /* Assume we use the PIT time source for the clock tick */ - #define CLOCK_TICK_RATE PIT_TICK_RATE - -diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h -index 9239399e54914..55160445ea78b 100644 ---- a/arch/x86/include/asm/topology.h -+++ b/arch/x86/include/asm/topology.h -@@ -218,7 +218,7 @@ static inline void arch_set_max_freq_ratio(bool turbo_disabled) - } - #endif - --#ifdef CONFIG_ACPI_CPPC_LIB -+#if defined(CONFIG_ACPI_CPPC_LIB) && defined(CONFIG_SMP) - void init_freq_invariance_cppc(void); - #define init_freq_invariance_cppc init_freq_invariance_cppc - #endif -diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h -index 7f7200021bd13..1cdd7e8bcba78 100644 ---- a/arch/x86/include/asm/traps.h -+++ b/arch/x86/include/asm/traps.h -@@ -13,7 +13,7 @@ - #ifdef CONFIG_X86_64 - asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); - asmlinkage __visible notrace --struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s); -+struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs); - void __init trap_init(void); - asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *eregs); - #endif -@@ -40,9 +40,9 @@ void math_emulate(struct math_emu_info *); - bool fault_in_kernel_space(unsigned long address); - - #ifdef CONFIG_VMAP_STACK --void __noreturn handle_stack_overflow(const char *message, -- struct pt_regs *regs, -- unsigned long fault_address); -+void __noreturn handle_stack_overflow(struct pt_regs *regs, -+ unsigned long fault_address, -+ struct stack_info *info); - #endif - - #endif /* _ASM_X86_TRAPS_H */ -diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h -index 01a300a9700b9..fbdc3d9514943 100644 ---- a/arch/x86/include/asm/tsc.h -+++ b/arch/x86/include/asm/tsc.h -@@ -20,13 +20,12 @@ extern void disable_TSC(void); - - static inline cycles_t get_cycles(void) - { --#ifndef CONFIG_X86_TSC -- if (!boot_cpu_has(X86_FEATURE_TSC)) -+ if (!IS_ENABLED(CONFIG_X86_TSC) && -+ !cpu_feature_enabled(X86_FEATURE_TSC)) - return 0; --#endif -- - return rdtsc(); - } -+#define get_cycles get_cycles - - extern struct system_counterval_t convert_art_to_tsc(u64 art); - extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); -diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h -index 5c95d242f38d7..ab5e577373093 100644 ---- a/arch/x86/include/asm/uaccess.h -+++ b/arch/x86/include/asm/uaccess.h -@@ -314,11 +314,12 @@ do { \ - do { \ - __chk_user_ptr(ptr); \ - switch (size) { \ -- unsigned char x_u8__; \ -- case 1: \ -+ case 1: { \ -+ unsigned char x_u8__; \ - __get_user_asm(x_u8__, ptr, "b", "=q", label); \ - (x) = x_u8__; \ - break; \ -+ } \ - case 2: \ - __get_user_asm(x, ptr, "w", "=r", label); \ - break; \ -@@ -413,6 +414,103 @@ do { \ - - #endif // CONFIG_CC_ASM_GOTO_OUTPUT - -+#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT -+#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \ -+ bool success; \ -+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ -+ __typeof__(*(_ptr)) __old = *_old; \ -+ __typeof__(*(_ptr)) __new = (_new); \ -+ asm_volatile_goto("\n" \ -+ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ -+ _ASM_EXTABLE_UA(1b, %l[label]) \ -+ : CC_OUT(z) (success), \ -+ [ptr] "+m" (*_ptr), \ -+ [old] "+a" (__old) \ -+ : [new] ltype (__new) \ -+ : "memory" \ -+ : label); \ -+ if (unlikely(!success)) \ -+ *_old = __old; \ -+ likely(success); }) -+ -+#ifdef CONFIG_X86_32 -+#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \ -+ bool success; \ -+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ -+ __typeof__(*(_ptr)) __old = *_old; \ -+ __typeof__(*(_ptr)) __new = (_new); \ -+ asm_volatile_goto("\n" \ -+ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ -+ _ASM_EXTABLE_UA(1b, %l[label]) \ -+ : CC_OUT(z) (success), \ -+ "+A" (__old), \ -+ [ptr] "+m" (*_ptr) \ -+ : "b" ((u32)__new), \ -+ "c" ((u32)((u64)__new >> 32)) \ -+ : "memory" \ -+ : label); \ -+ if (unlikely(!success)) \ -+ *_old = __old; \ -+ likely(success); }) -+#endif // CONFIG_X86_32 -+#else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT -+#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \ -+ int __err = 0; \ -+ bool success; \ -+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ -+ __typeof__(*(_ptr)) __old = *_old; \ -+ __typeof__(*(_ptr)) __new = (_new); \ -+ asm volatile("\n" \ -+ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ -+ CC_SET(z) \ -+ "2:\n" \ -+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ -+ %[errout]) \ -+ : CC_OUT(z) (success), \ -+ [errout] "+r" (__err), \ -+ [ptr] "+m" (*_ptr), \ -+ [old] "+a" (__old) \ -+ : [new] ltype (__new) \ -+ : "memory"); \ -+ if (unlikely(__err)) \ -+ goto label; \ -+ if (unlikely(!success)) \ -+ *_old = __old; \ -+ likely(success); }) -+ -+#ifdef CONFIG_X86_32 -+/* -+ * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error. -+ * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are -+ * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses -+ * both ESI and EDI for the memory operand, compilation will fail if the error -+ * is an input+output as there will be no register available for input. -+ */ -+#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \ -+ int __result; \ -+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ -+ __typeof__(*(_ptr)) __old = *_old; \ -+ __typeof__(*(_ptr)) __new = (_new); \ -+ asm volatile("\n" \ -+ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ -+ "mov $0, %%ecx\n\t" \ -+ "setz %%cl\n" \ -+ "2:\n" \ -+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \ -+ : [result]"=c" (__result), \ -+ "+A" (__old), \ -+ [ptr] "+m" (*_ptr) \ -+ : "b" ((u32)__new), \ -+ "c" ((u32)((u64)__new >> 32)) \ -+ : "memory", "cc"); \ -+ if (unlikely(__result < 0)) \ -+ goto label; \ -+ if (unlikely(!__result)) \ -+ *_old = __old; \ -+ likely(__result); }) -+#endif // CONFIG_X86_32 -+#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT -+ - /* FIXME: this hack is definitely wrong -AK */ - struct __large_struct { unsigned long buf[100]; }; - #define __m(x) (*(struct __large_struct __user *)(x)) -@@ -505,6 +603,51 @@ do { \ - } while (0) - #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT - -+extern void __try_cmpxchg_user_wrong_size(void); -+ -+#ifndef CONFIG_X86_32 -+#define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \ -+ __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label) -+#endif -+ -+/* -+ * Force the pointer to u to match the size expected by the asm helper. -+ * clang/LLVM compiles all cases and only discards the unused paths after -+ * processing errors, which breaks i386 if the pointer is an 8-byte value. -+ */ -+#define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \ -+ bool __ret; \ -+ __chk_user_ptr(_ptr); \ -+ switch (sizeof(*(_ptr))) { \ -+ case 1: __ret = __try_cmpxchg_user_asm("b", "q", \ -+ (__force u8 *)(_ptr), (_oldp), \ -+ (_nval), _label); \ -+ break; \ -+ case 2: __ret = __try_cmpxchg_user_asm("w", "r", \ -+ (__force u16 *)(_ptr), (_oldp), \ -+ (_nval), _label); \ -+ break; \ -+ case 4: __ret = __try_cmpxchg_user_asm("l", "r", \ -+ (__force u32 *)(_ptr), (_oldp), \ -+ (_nval), _label); \ -+ break; \ -+ case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\ -+ (_nval), _label); \ -+ break; \ -+ default: __try_cmpxchg_user_wrong_size(); \ -+ } \ -+ __ret; }) -+ -+/* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */ -+#define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \ -+ int __ret = -EFAULT; \ -+ __uaccess_begin_nospec(); \ -+ __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \ -+_label: \ -+ __uaccess_end(); \ -+ __ret; \ -+ }) -+ - /* - * We want the unsafe accessors to always be inlined and use - * the error labels - thus the macro games. -diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h -index 8e574c0afef80..56664b31b6dad 100644 ---- a/arch/x86/include/asm/unwind_hints.h -+++ b/arch/x86/include/asm/unwind_hints.h -@@ -8,7 +8,11 @@ - #ifdef __ASSEMBLY__ - - .macro UNWIND_HINT_EMPTY -- UNWIND_HINT sp_reg=ORC_REG_UNDEFINED type=UNWIND_HINT_TYPE_CALL end=1 -+ UNWIND_HINT type=UNWIND_HINT_TYPE_CALL end=1 -+.endm -+ -+.macro UNWIND_HINT_ENTRY -+ UNWIND_HINT type=UNWIND_HINT_TYPE_ENTRY end=1 - .endm - - .macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0 -@@ -52,6 +56,14 @@ - UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC - .endm - -+.macro UNWIND_HINT_SAVE -+ UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE -+.endm -+ -+.macro UNWIND_HINT_RESTORE -+ UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE -+.endm -+ - #endif /* __ASSEMBLY__ */ - - #endif /* _ASM_X86_UNWIND_HINTS_H */ -diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h -index 8757078d4442a..6c2e3ff3cb28f 100644 ---- a/arch/x86/include/asm/virtext.h -+++ b/arch/x86/include/asm/virtext.h -@@ -101,12 +101,6 @@ static inline int cpu_has_svm(const char **msg) - return 0; - } - -- if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) { -- if (msg) -- *msg = "can't execute cpuid_8000000a"; -- return 0; -- } -- - if (!boot_cpu_has(X86_FEATURE_SVM)) { - if (msg) - *msg = "svm not available"; -@@ -126,7 +120,21 @@ static inline void cpu_svm_disable(void) - - wrmsrl(MSR_VM_HSAVE_PA, 0); - rdmsrl(MSR_EFER, efer); -- wrmsrl(MSR_EFER, efer & ~EFER_SVME); -+ if (efer & EFER_SVME) { -+ /* -+ * Force GIF=1 prior to disabling SVM to ensure INIT and NMI -+ * aren't blocked, e.g. if a fatal error occurred between CLGI -+ * and STGI. Note, STGI may #UD if SVM is disabled from NMI -+ * context between reading EFER and executing STGI. In that -+ * case, GIF must already be set, otherwise the NMI would have -+ * been blocked, so just eat the fault. -+ */ -+ asm_volatile_goto("1: stgi\n\t" -+ _ASM_EXTABLE(1b, %l[fault]) -+ ::: "memory" : fault); -+fault: -+ wrmsrl(MSR_EFER, efer & ~EFER_SVME); -+ } - } - - /** Makes sure SVM is disabled, if it is supported on the CPU -diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h -index ff4b52e37e60d..5adab895127e1 100644 ---- a/arch/x86/include/asm/xen/hypervisor.h -+++ b/arch/x86/include/asm/xen/hypervisor.h -@@ -62,4 +62,9 @@ void xen_arch_register_cpu(int num); - void xen_arch_unregister_cpu(int num); - #endif - -+#ifdef CONFIG_PVH -+void __init xen_pvh_init(struct boot_params *boot_params); -+void __init mem_map_via_hcall(struct boot_params *boot_params_p); -+#endif -+ - #endif /* _ASM_X86_XEN_HYPERVISOR_H */ -diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile -index 8f4e8fa6ed759..2ff3e600f4269 100644 ---- a/arch/x86/kernel/Makefile -+++ b/arch/x86/kernel/Makefile -@@ -21,6 +21,7 @@ CFLAGS_REMOVE_ftrace.o = -pg - CFLAGS_REMOVE_early_printk.o = -pg - CFLAGS_REMOVE_head64.o = -pg - CFLAGS_REMOVE_sev.o = -pg -+CFLAGS_REMOVE_cc_platform.o = -pg - endif - - KASAN_SANITIZE_head$(BITS).o := n -@@ -29,6 +30,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n - KASAN_SANITIZE_stacktrace.o := n - KASAN_SANITIZE_paravirt.o := n - KASAN_SANITIZE_sev.o := n -+KASAN_SANITIZE_cc_platform.o := n - - # With some compiler versions the generated code results in boot hangs, caused - # by several compilation units. To be safe, disable all instrumentation. -@@ -47,6 +49,7 @@ endif - KCOV_INSTRUMENT := n - - CFLAGS_head$(BITS).o += -fno-stack-protector -+CFLAGS_cc_platform.o += -fno-stack-protector - - CFLAGS_irq.o := -I $(srctree)/$(src)/../include/asm/trace - -@@ -147,6 +150,9 @@ obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o - obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o - - obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o -+ -+obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += cc_platform.o -+ - ### - # 64 bit specific files - ifeq ($(CONFIG_X86_64),y) -diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c -index 14bcd59bcdee2..94ac7402c1ac2 100644 ---- a/arch/x86/kernel/acpi/boot.c -+++ b/arch/x86/kernel/acpi/boot.c -@@ -1319,6 +1319,17 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d) - return 0; - } - -+static int __init disable_acpi_xsdt(const struct dmi_system_id *d) -+{ -+ if (!acpi_force) { -+ pr_notice("%s detected: force use of acpi=rsdt\n", d->ident); -+ acpi_gbl_do_not_use_xsdt = TRUE; -+ } else { -+ pr_notice("Warning: DMI blacklist says broken, but acpi XSDT forced\n"); -+ } -+ return 0; -+} -+ - static int __init dmi_disable_acpi(const struct dmi_system_id *d) - { - if (!acpi_force) { -@@ -1442,6 +1453,19 @@ static const struct dmi_system_id acpi_dmi_table[] __initconst = { - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), - }, - }, -+ /* -+ * Boxes that need ACPI XSDT use disabled due to corrupted tables -+ */ -+ { -+ .callback = disable_acpi_xsdt, -+ .ident = "Advantech DAC-BJ01", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "NEC"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "Bearlake CRB Board"), -+ DMI_MATCH(DMI_BIOS_VERSION, "V1.12"), -+ DMI_MATCH(DMI_BIOS_DATE, "02/01/2011"), -+ }, -+ }, - {} - }; - -diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c -index 7de599eba7f04..7945eae5b315f 100644 ---- a/arch/x86/kernel/acpi/cstate.c -+++ b/arch/x86/kernel/acpi/cstate.c -@@ -79,6 +79,21 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, - */ - flags->bm_control = 0; - } -+ if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) { -+ /* -+ * For all AMD Zen or newer CPUs that support C3, caches -+ * should not be flushed by software while entering C3 -+ * type state. Set bm->check to 1 so that kernel doesn't -+ * need to execute cache flush operation. -+ */ -+ flags->bm_check = 1; -+ /* -+ * In current AMD C state implementation ARB_DIS is no longer -+ * used. So set bm_control to zero to indicate ARB_DIS is not -+ * required while entering C3 type state. -+ */ -+ flags->bm_control = 0; -+ } - } - EXPORT_SYMBOL(acpi_processor_power_init_bm_check); - -diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S -index daf88f8143c5f..cf69081073b54 100644 ---- a/arch/x86/kernel/acpi/wakeup_32.S -+++ b/arch/x86/kernel/acpi/wakeup_32.S -@@ -60,7 +60,7 @@ save_registers: - popl saved_context_eflags - - movl $ret_point, saved_eip -- ret -+ RET - - - restore_registers: -@@ -70,7 +70,7 @@ restore_registers: - movl saved_context_edi, %edi - pushl saved_context_eflags - popfl -- ret -+ RET - - SYM_CODE_START(do_suspend_lowlevel) - call save_processor_state -@@ -86,7 +86,7 @@ SYM_CODE_START(do_suspend_lowlevel) - ret_point: - call restore_registers - call restore_processor_state -- ret -+ RET - SYM_CODE_END(do_suspend_lowlevel) - - .data -diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c -index e9da3dc712541..43dd7f281a216 100644 ---- a/arch/x86/kernel/alternative.c -+++ b/arch/x86/kernel/alternative.c -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - - int __read_mostly alternatives_patched; - -@@ -113,6 +114,8 @@ static void __init_or_module add_nops(void *insns, unsigned int len) - } - } - -+extern s32 __retpoline_sites[], __retpoline_sites_end[]; -+extern s32 __return_sites[], __return_sites_end[]; - extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; - extern s32 __smp_locks[], __smp_locks_end[]; - void text_poke_early(void *addr, const void *opcode, size_t len); -@@ -221,7 +224,7 @@ static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off) - * "noinline" to cause control flow change and thus invalidate I$ and - * cause refetch after modification. - */ --static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) -+static void __init_or_module noinline optimize_nops(u8 *instr, size_t len) - { - struct insn insn; - int i = 0; -@@ -239,11 +242,11 @@ static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *ins - * optimized. - */ - if (insn.length == 1 && insn.opcode.bytes[0] == 0x90) -- i += optimize_nops_range(instr, a->instrlen, i); -+ i += optimize_nops_range(instr, len, i); - else - i += insn.length; - -- if (i >= a->instrlen) -+ if (i >= len) - return; - } - } -@@ -331,10 +334,254 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, - text_poke_early(instr, insn_buff, insn_buff_sz); - - next: -- optimize_nops(a, instr); -+ optimize_nops(instr, a->instrlen); - } - } - -+#if defined(CONFIG_RETPOLINE) && defined(CONFIG_STACK_VALIDATION) -+ -+/* -+ * CALL/JMP *%\reg -+ */ -+static int emit_indirect(int op, int reg, u8 *bytes) -+{ -+ int i = 0; -+ u8 modrm; -+ -+ switch (op) { -+ case CALL_INSN_OPCODE: -+ modrm = 0x10; /* Reg = 2; CALL r/m */ -+ break; -+ -+ case JMP32_INSN_OPCODE: -+ modrm = 0x20; /* Reg = 4; JMP r/m */ -+ break; -+ -+ default: -+ WARN_ON_ONCE(1); -+ return -1; -+ } -+ -+ if (reg >= 8) { -+ bytes[i++] = 0x41; /* REX.B prefix */ -+ reg -= 8; -+ } -+ -+ modrm |= 0xc0; /* Mod = 3 */ -+ modrm += reg; -+ -+ bytes[i++] = 0xff; /* opcode */ -+ bytes[i++] = modrm; -+ -+ return i; -+} -+ -+/* -+ * Rewrite the compiler generated retpoline thunk calls. -+ * -+ * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate -+ * indirect instructions, avoiding the extra indirection. -+ * -+ * For example, convert: -+ * -+ * CALL __x86_indirect_thunk_\reg -+ * -+ * into: -+ * -+ * CALL *%\reg -+ * -+ * It also tries to inline spectre_v2=retpoline,amd when size permits. -+ */ -+static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) -+{ -+ retpoline_thunk_t *target; -+ int reg, ret, i = 0; -+ u8 op, cc; -+ -+ target = addr + insn->length + insn->immediate.value; -+ reg = target - __x86_indirect_thunk_array; -+ -+ if (WARN_ON_ONCE(reg & ~0xf)) -+ return -1; -+ -+ /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */ -+ BUG_ON(reg == 4); -+ -+ if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) && -+ !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) -+ return -1; -+ -+ op = insn->opcode.bytes[0]; -+ -+ /* -+ * Convert: -+ * -+ * Jcc.d32 __x86_indirect_thunk_\reg -+ * -+ * into: -+ * -+ * Jncc.d8 1f -+ * [ LFENCE ] -+ * JMP *%\reg -+ * [ NOP ] -+ * 1: -+ */ -+ /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */ -+ if (op == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80) { -+ cc = insn->opcode.bytes[1] & 0xf; -+ cc ^= 1; /* invert condition */ -+ -+ bytes[i++] = 0x70 + cc; /* Jcc.d8 */ -+ bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */ -+ -+ /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */ -+ op = JMP32_INSN_OPCODE; -+ } -+ -+ /* -+ * For RETPOLINE_AMD: prepend the indirect CALL/JMP with an LFENCE. -+ */ -+ if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { -+ bytes[i++] = 0x0f; -+ bytes[i++] = 0xae; -+ bytes[i++] = 0xe8; /* LFENCE */ -+ } -+ -+ ret = emit_indirect(op, reg, bytes + i); -+ if (ret < 0) -+ return ret; -+ i += ret; -+ -+ for (; i < insn->length;) -+ bytes[i++] = BYTES_NOP1; -+ -+ return i; -+} -+ -+/* -+ * Generated by 'objtool --retpoline'. -+ */ -+void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) -+{ -+ s32 *s; -+ -+ for (s = start; s < end; s++) { -+ void *addr = (void *)s + *s; -+ struct insn insn; -+ int len, ret; -+ u8 bytes[16]; -+ u8 op1, op2; -+ -+ ret = insn_decode_kernel(&insn, addr); -+ if (WARN_ON_ONCE(ret < 0)) -+ continue; -+ -+ op1 = insn.opcode.bytes[0]; -+ op2 = insn.opcode.bytes[1]; -+ -+ switch (op1) { -+ case CALL_INSN_OPCODE: -+ case JMP32_INSN_OPCODE: -+ break; -+ -+ case 0x0f: /* escape */ -+ if (op2 >= 0x80 && op2 <= 0x8f) -+ break; -+ fallthrough; -+ default: -+ WARN_ON_ONCE(1); -+ continue; -+ } -+ -+ DPRINTK("retpoline at: %pS (%px) len: %d to: %pS", -+ addr, addr, insn.length, -+ addr + insn.length + insn.immediate.value); -+ -+ len = patch_retpoline(addr, &insn, bytes); -+ if (len == insn.length) { -+ optimize_nops(bytes, len); -+ DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr); -+ DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr); -+ text_poke_early(addr, bytes, len); -+ } -+ } -+} -+ -+#ifdef CONFIG_RETHUNK -+/* -+ * Rewrite the compiler generated return thunk tail-calls. -+ * -+ * For example, convert: -+ * -+ * JMP __x86_return_thunk -+ * -+ * into: -+ * -+ * RET -+ */ -+static int patch_return(void *addr, struct insn *insn, u8 *bytes) -+{ -+ int i = 0; -+ -+ if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) -+ return -1; -+ -+ bytes[i++] = RET_INSN_OPCODE; -+ -+ for (; i < insn->length;) -+ bytes[i++] = INT3_INSN_OPCODE; -+ -+ return i; -+} -+ -+void __init_or_module noinline apply_returns(s32 *start, s32 *end) -+{ -+ s32 *s; -+ -+ for (s = start; s < end; s++) { -+ void *dest = NULL, *addr = (void *)s + *s; -+ struct insn insn; -+ int len, ret; -+ u8 bytes[16]; -+ u8 op; -+ -+ ret = insn_decode_kernel(&insn, addr); -+ if (WARN_ON_ONCE(ret < 0)) -+ continue; -+ -+ op = insn.opcode.bytes[0]; -+ if (op == JMP32_INSN_OPCODE) -+ dest = addr + insn.length + insn.immediate.value; -+ -+ if (__static_call_fixup(addr, op, dest) || -+ WARN_ONCE(dest != &__x86_return_thunk, -+ "missing return thunk: %pS-%pS: %*ph", -+ addr, dest, 5, addr)) -+ continue; -+ -+ DPRINTK("return thunk at: %pS (%px) len: %d to: %pS", -+ addr, addr, insn.length, -+ addr + insn.length + insn.immediate.value); -+ -+ len = patch_return(addr, &insn, bytes); -+ if (len == insn.length) { -+ DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr); -+ DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr); -+ text_poke_early(addr, bytes, len); -+ } -+ } -+} -+#else -+void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } -+#endif /* CONFIG_RETHUNK */ -+ -+#else /* !RETPOLINES || !CONFIG_STACK_VALIDATION */ -+ -+void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { } -+void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } -+ -+#endif /* CONFIG_RETPOLINE && CONFIG_STACK_VALIDATION */ -+ - #ifdef CONFIG_SMP - static void alternatives_smp_lock(const s32 *start, const s32 *end, - u8 *text, u8 *text_end) -@@ -537,7 +784,7 @@ asm ( - " .type int3_magic, @function\n" - "int3_magic:\n" - " movl $1, (%" _ASM_ARG1 ")\n" --" ret\n" -+ ASM_RET - " .size int3_magic, .-int3_magic\n" - " .popsection\n" - ); -@@ -642,6 +889,13 @@ void __init alternative_instructions(void) - */ - apply_paravirt(__parainstructions, __parainstructions_end); - -+ /* -+ * Rewrite the retpolines, must be done before alternatives since -+ * those can rewrite the retpoline thunks. -+ */ -+ apply_retpolines(__retpoline_sites, __retpoline_sites_end); -+ apply_returns(__return_sites, __return_sites_end); -+ - /* - * Then patch alternatives, such that those paravirt calls that are in - * alternatives can be overwritten by their immediate fragments. -@@ -930,10 +1184,13 @@ void text_poke_sync(void) - } - - struct text_poke_loc { -- s32 rel_addr; /* addr := _stext + rel_addr */ -- s32 rel32; -+ /* addr := _stext + rel_addr */ -+ s32 rel_addr; -+ s32 disp; -+ u8 len; - u8 opcode; - const u8 text[POKE_MAX_OPCODE_SIZE]; -+ /* see text_poke_bp_batch() */ - u8 old; - }; - -@@ -943,21 +1200,23 @@ struct bp_patching_desc { - atomic_t refs; - }; - --static struct bp_patching_desc *bp_desc; -+static struct bp_patching_desc bp_desc; - - static __always_inline --struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp) -+struct bp_patching_desc *try_get_desc(void) - { -- struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */ -+ struct bp_patching_desc *desc = &bp_desc; - -- if (!desc || !arch_atomic_inc_not_zero(&desc->refs)) -+ if (!arch_atomic_inc_not_zero(&desc->refs)) - return NULL; - - return desc; - } - --static __always_inline void put_desc(struct bp_patching_desc *desc) -+static __always_inline void put_desc(void) - { -+ struct bp_patching_desc *desc = &bp_desc; -+ - smp_mb__before_atomic(); - arch_atomic_dec(&desc->refs); - } -@@ -982,7 +1241,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs) - { - struct bp_patching_desc *desc; - struct text_poke_loc *tp; -- int len, ret = 0; -+ int ret = 0; - void *ip; - - if (user_mode(regs)) -@@ -990,15 +1249,15 @@ noinstr int poke_int3_handler(struct pt_regs *regs) - - /* - * Having observed our INT3 instruction, we now must observe -- * bp_desc: -+ * bp_desc with non-zero refcount: - * -- * bp_desc = desc INT3 -+ * bp_desc.refs = 1 INT3 - * WMB RMB -- * write INT3 if (desc) -+ * write INT3 if (bp_desc.refs != 0) - */ - smp_rmb(); - -- desc = try_get_desc(&bp_desc); -+ desc = try_get_desc(); - if (!desc) - return 0; - -@@ -1022,8 +1281,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs) - goto out_put; - } - -- len = text_opcode_size(tp->opcode); -- ip += len; -+ ip += tp->len; - - switch (tp->opcode) { - case INT3_INSN_OPCODE: -@@ -1038,12 +1296,12 @@ noinstr int poke_int3_handler(struct pt_regs *regs) - break; - - case CALL_INSN_OPCODE: -- int3_emulate_call(regs, (long)ip + tp->rel32); -+ int3_emulate_call(regs, (long)ip + tp->disp); - break; - - case JMP32_INSN_OPCODE: - case JMP8_INSN_OPCODE: -- int3_emulate_jmp(regs, (long)ip + tp->rel32); -+ int3_emulate_jmp(regs, (long)ip + tp->disp); - break; - - default: -@@ -1053,7 +1311,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs) - ret = 1; - - out_put: -- put_desc(desc); -+ put_desc(); - return ret; - } - -@@ -1084,18 +1342,20 @@ static int tp_vec_nr; - */ - static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) - { -- struct bp_patching_desc desc = { -- .vec = tp, -- .nr_entries = nr_entries, -- .refs = ATOMIC_INIT(1), -- }; - unsigned char int3 = INT3_INSN_OPCODE; - unsigned int i; - int do_sync; - - lockdep_assert_held(&text_mutex); - -- smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */ -+ bp_desc.vec = tp; -+ bp_desc.nr_entries = nr_entries; -+ -+ /* -+ * Corresponds to the implicit memory barrier in try_get_desc() to -+ * ensure reading a non-zero refcount provides up to date bp_desc data. -+ */ -+ atomic_set_release(&bp_desc.refs, 1); - - /* - * Corresponding read barrier in int3 notifier for making sure the -@@ -1118,7 +1378,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries - */ - for (do_sync = 0, i = 0; i < nr_entries; i++) { - u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, }; -- int len = text_opcode_size(tp[i].opcode); -+ int len = tp[i].len; - - if (len - INT3_INSN_SIZE > 0) { - memcpy(old + INT3_INSN_SIZE, -@@ -1183,32 +1443,46 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries - text_poke_sync(); - - /* -- * Remove and synchronize_rcu(), except we have a very primitive -- * refcount based completion. -+ * Remove and wait for refs to be zero. - */ -- WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */ -- if (!atomic_dec_and_test(&desc.refs)) -- atomic_cond_read_acquire(&desc.refs, !VAL); -+ if (!atomic_dec_and_test(&bp_desc.refs)) -+ atomic_cond_read_acquire(&bp_desc.refs, !VAL); - } - - static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, - const void *opcode, size_t len, const void *emulate) - { - struct insn insn; -- int ret; -+ int ret, i; - - memcpy((void *)tp->text, opcode, len); - if (!emulate) - emulate = opcode; - - ret = insn_decode_kernel(&insn, emulate); -- - BUG_ON(ret < 0); -- BUG_ON(len != insn.length); - - tp->rel_addr = addr - (void *)_stext; -+ tp->len = len; - tp->opcode = insn.opcode.bytes[0]; - -+ switch (tp->opcode) { -+ case RET_INSN_OPCODE: -+ case JMP32_INSN_OPCODE: -+ case JMP8_INSN_OPCODE: -+ /* -+ * Control flow instructions without implied execution of the -+ * next instruction can be padded with INT3. -+ */ -+ for (i = insn.length; i < len; i++) -+ BUG_ON(tp->text[i] != INT3_INSN_OPCODE); -+ break; -+ -+ default: -+ BUG_ON(len != insn.length); -+ }; -+ -+ - switch (tp->opcode) { - case INT3_INSN_OPCODE: - case RET_INSN_OPCODE: -@@ -1217,7 +1491,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, - case CALL_INSN_OPCODE: - case JMP32_INSN_OPCODE: - case JMP8_INSN_OPCODE: -- tp->rel32 = insn.immediate.value; -+ tp->disp = insn.immediate.value; - break; - - default: /* assume NOP */ -@@ -1225,13 +1499,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, - case 2: /* NOP2 -- emulate as JMP8+0 */ - BUG_ON(memcmp(emulate, x86_nops[len], len)); - tp->opcode = JMP8_INSN_OPCODE; -- tp->rel32 = 0; -+ tp->disp = 0; - break; - - case 5: /* NOP5 -- emulate as JMP32+0 */ - BUG_ON(memcmp(emulate, x86_nops[len], len)); - tp->opcode = JMP32_INSN_OPCODE; -- tp->rel32 = 0; -+ tp->disp = 0; - break; - - default: /* unknown instruction */ -diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c -index b70344bf66008..4df7d694369a5 100644 ---- a/arch/x86/kernel/apic/apic.c -+++ b/arch/x86/kernel/apic/apic.c -@@ -170,7 +170,7 @@ static __init int setup_apicpmtimer(char *s) - { - apic_calibrate_pmtmr = 1; - notsc_setup(NULL); -- return 0; -+ return 1; - } - __setup("apicpmtimer", setup_apicpmtimer); - #endif -@@ -412,10 +412,9 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new) - if (vector && !eilvt_entry_is_changeable(vector, new)) - /* may not change if vectors are different */ - return rsvd; -- rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); -- } while (rsvd != new); -+ } while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new)); - -- rsvd &= ~APIC_EILVT_MASKED; -+ rsvd = new & ~APIC_EILVT_MASKED; - if (rsvd && rsvd != vector) - pr_info("LVT offset %d assigned for vector 0x%02x\n", - offset, rsvd); -diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c -index c1bb384935b05..bb71b628edcb4 100644 ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -2479,17 +2479,21 @@ static int io_apic_get_redir_entries(int ioapic) - - unsigned int arch_dynirq_lower_bound(unsigned int from) - { -+ unsigned int ret; -+ - /* - * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use - * gsi_top if ioapic_dynirq_base hasn't been initialized yet. - */ -- if (!ioapic_initialized) -- return gsi_top; -+ ret = ioapic_dynirq_base ? : gsi_top; -+ - /* -- * For DT enabled machines ioapic_dynirq_base is irrelevant and not -- * updated. So simply return @from if ioapic_dynirq_base == 0. -+ * For DT enabled machines ioapic_dynirq_base is irrelevant and -+ * always 0. gsi_top can be 0 if there is no IO/APIC registered. -+ * 0 is an invalid interrupt number for dynamic allocations. Return -+ * @from instead. - */ -- return ioapic_dynirq_base ? : from; -+ return ret ? : from; - } - - #ifdef CONFIG_X86_32 -diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c -index 6bde05a86b4ed..896bc41cb2ba7 100644 ---- a/arch/x86/kernel/apic/x2apic_phys.c -+++ b/arch/x86/kernel/apic/x2apic_phys.c -@@ -97,7 +97,10 @@ static void init_x2apic_ldr(void) - - static int x2apic_phys_probe(void) - { -- if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys())) -+ if (!x2apic_mode) -+ return 0; -+ -+ if (x2apic_phys || x2apic_fadt_phys()) - return 1; - - return apic == &apic_x2apic_phys; -diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c -index f5a48e66e4f54..a6e9c2794ef56 100644 ---- a/arch/x86/kernel/apic/x2apic_uv_x.c -+++ b/arch/x86/kernel/apic/x2apic_uv_x.c -@@ -199,7 +199,13 @@ static void __init uv_tsc_check_sync(void) - int mmr_shift; - char *state; - -- /* Different returns from different UV BIOS versions */ -+ /* UV5 guarantees synced TSCs; do not zero TSC_ADJUST */ -+ if (!is_uv(UV2|UV3|UV4)) { -+ mark_tsc_async_resets("UV5+"); -+ return; -+ } -+ -+ /* UV2,3,4, UV BIOS TSC sync state available */ - mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR); - mmr_shift = - is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT; -diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c -index 241dda687eb9f..06978a1194f24 100644 ---- a/arch/x86/kernel/apm_32.c -+++ b/arch/x86/kernel/apm_32.c -@@ -237,12 +237,6 @@ - extern int (*console_blank_hook)(int); - #endif - --/* -- * The apm_bios device is one of the misc char devices. -- * This is its minor number. -- */ --#define APM_MINOR_DEV 134 -- - /* - * Various options can be changed at boot time as follows: - * (We allow underscores for compatibility with the modules code) -diff --git a/arch/x86/kernel/cc_platform.c b/arch/x86/kernel/cc_platform.c -new file mode 100644 -index 0000000000000..03bb2f343ddb7 ---- /dev/null -+++ b/arch/x86/kernel/cc_platform.c -@@ -0,0 +1,69 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Confidential Computing Platform Capability checks -+ * -+ * Copyright (C) 2021 Advanced Micro Devices, Inc. -+ * -+ * Author: Tom Lendacky -+ */ -+ -+#include -+#include -+#include -+ -+#include -+ -+static bool __maybe_unused intel_cc_platform_has(enum cc_attr attr) -+{ -+#ifdef CONFIG_INTEL_TDX_GUEST -+ return false; -+#else -+ return false; -+#endif -+} -+ -+/* -+ * SME and SEV are very similar but they are not the same, so there are -+ * times that the kernel will need to distinguish between SME and SEV. The -+ * cc_platform_has() function is used for this. When a distinction isn't -+ * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used. -+ * -+ * The trampoline code is a good example for this requirement. Before -+ * paging is activated, SME will access all memory as decrypted, but SEV -+ * will access all memory as encrypted. So, when APs are being brought -+ * up under SME the trampoline area cannot be encrypted, whereas under SEV -+ * the trampoline area must be encrypted. -+ */ -+static bool amd_cc_platform_has(enum cc_attr attr) -+{ -+#ifdef CONFIG_AMD_MEM_ENCRYPT -+ switch (attr) { -+ case CC_ATTR_MEM_ENCRYPT: -+ return sme_me_mask; -+ -+ case CC_ATTR_HOST_MEM_ENCRYPT: -+ return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED); -+ -+ case CC_ATTR_GUEST_MEM_ENCRYPT: -+ return sev_status & MSR_AMD64_SEV_ENABLED; -+ -+ case CC_ATTR_GUEST_STATE_ENCRYPT: -+ return sev_status & MSR_AMD64_SEV_ES_ENABLED; -+ -+ default: -+ return false; -+ } -+#else -+ return false; -+#endif -+} -+ -+ -+bool cc_platform_has(enum cc_attr attr) -+{ -+ if (sme_me_mask) -+ return amd_cc_platform_has(attr); -+ -+ return false; -+} -+EXPORT_SYMBOL_GPL(cc_platform_has); -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 2131af9f2fa23..0a0230bd5089a 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -27,11 +27,6 @@ - - #include "cpu.h" - --static const int amd_erratum_383[]; --static const int amd_erratum_400[]; --static const int amd_erratum_1054[]; --static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); -- - /* - * nodes_per_socket: Stores the number of nodes per socket. - * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX -@@ -39,6 +34,83 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); - */ - static u32 nodes_per_socket = 1; - -+/* -+ * AMD errata checking -+ * -+ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or -+ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that -+ * have an OSVW id assigned, which it takes as first argument. Both take a -+ * variable number of family-specific model-stepping ranges created by -+ * AMD_MODEL_RANGE(). -+ * -+ * Example: -+ * -+ * const int amd_erratum_319[] = -+ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), -+ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), -+ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); -+ */ -+ -+#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } -+#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } -+#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ -+ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) -+#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) -+#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) -+#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -+ -+static const int amd_erratum_400[] = -+ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), -+ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -+ -+static const int amd_erratum_383[] = -+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -+ -+/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ -+static const int amd_erratum_1054[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); -+ -+static const int amd_zenbleed[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf), -+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); -+ -+static const int amd_div0[] = -+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), -+ AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); -+ -+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) -+{ -+ int osvw_id = *erratum++; -+ u32 range; -+ u32 ms; -+ -+ if (osvw_id >= 0 && osvw_id < 65536 && -+ cpu_has(cpu, X86_FEATURE_OSVW)) { -+ u64 osvw_len; -+ -+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); -+ if (osvw_id < osvw_len) { -+ u64 osvw_bits; -+ -+ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), -+ osvw_bits); -+ return osvw_bits & (1ULL << (osvw_id & 0x3f)); -+ } -+ } -+ -+ /* OSVW unavailable or ID unknown, match family-model-stepping range */ -+ ms = (cpu->x86_model << 4) | cpu->x86_stepping; -+ while ((range = *erratum++)) -+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && -+ (ms >= AMD_MODEL_RANGE_START(range)) && -+ (ms <= AMD_MODEL_RANGE_END(range))) -+ return true; -+ -+ return false; -+} -+ - static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) - { - u32 gprs[8] = { 0 }; -@@ -794,8 +866,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c) - set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); - } - --#define MSR_AMD64_DE_CFG 0xC0011029 -- - static void init_amd_ln(struct cpuinfo_x86 *c) - { - /* -@@ -886,6 +956,37 @@ static void init_amd_bd(struct cpuinfo_x86 *c) - clear_rdrand_cpuid_bit(c); - } - -+void init_spectral_chicken(struct cpuinfo_x86 *c) -+{ -+#ifdef CONFIG_CPU_UNRET_ENTRY -+ u64 value; -+ -+ /* -+ * On Zen2 we offer this chicken (bit) on the altar of Speculation. -+ * -+ * This suppresses speculation from the middle of a basic block, i.e. it -+ * suppresses non-branch predictions. -+ * -+ * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H -+ */ -+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) { -+ if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { -+ value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; -+ wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); -+ } -+ } -+#endif -+ /* -+ * Work around Erratum 1386. The XSAVES instruction malfunctions in -+ * certain circumstances on Zen1/2 uarch, and not all parts have had -+ * updated microcode at the time of writing (March 2023). -+ * -+ * Affected parts all have no supervisor XSAVE states, meaning that -+ * the XSAVEC instruction (which works fine) is equivalent. -+ */ -+ clear_cpu_cap(c, X86_FEATURE_XSAVES); -+} -+ - static void init_amd_zn(struct cpuinfo_x86 *c) - { - set_cpu_cap(c, X86_FEATURE_ZEN); -@@ -894,12 +995,62 @@ static void init_amd_zn(struct cpuinfo_x86 *c) - node_reclaim_distance = 32; - #endif - -- /* -- * Fix erratum 1076: CPB feature bit not being set in CPUID. -- * Always set it, except when running under a hypervisor. -- */ -- if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB)) -- set_cpu_cap(c, X86_FEATURE_CPB); -+ /* Fix up CPUID bits, but only if not virtualised. */ -+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { -+ -+ /* Erratum 1076: CPB feature bit not being set in CPUID. */ -+ if (!cpu_has(c, X86_FEATURE_CPB)) -+ set_cpu_cap(c, X86_FEATURE_CPB); -+ -+ /* -+ * Zen3 (Fam19 model < 0x10) parts are not susceptible to -+ * Branch Type Confusion, but predate the allocation of the -+ * BTC_NO bit. -+ */ -+ if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO)) -+ set_cpu_cap(c, X86_FEATURE_BTC_NO); -+ } -+} -+ -+static bool cpu_has_zenbleed_microcode(void) -+{ -+ u32 good_rev = 0; -+ -+ switch (boot_cpu_data.x86_model) { -+ case 0x30 ... 0x3f: good_rev = 0x0830107a; break; -+ case 0x60 ... 0x67: good_rev = 0x0860010b; break; -+ case 0x68 ... 0x6f: good_rev = 0x08608105; break; -+ case 0x70 ... 0x7f: good_rev = 0x08701032; break; -+ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; -+ -+ default: -+ return false; -+ break; -+ } -+ -+ if (boot_cpu_data.microcode < good_rev) -+ return false; -+ -+ return true; -+} -+ -+static void zenbleed_check(struct cpuinfo_x86 *c) -+{ -+ if (!cpu_has_amd_erratum(c, amd_zenbleed)) -+ return; -+ -+ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) -+ return; -+ -+ if (!cpu_has(c, X86_FEATURE_AVX)) -+ return; -+ -+ if (!cpu_has_zenbleed_microcode()) { -+ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); -+ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); -+ } else { -+ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); -+ } - } - - static void init_amd(struct cpuinfo_x86 *c) -@@ -931,7 +1082,8 @@ static void init_amd(struct cpuinfo_x86 *c) - case 0x12: init_amd_ln(c); break; - case 0x15: init_amd_bd(c); break; - case 0x16: init_amd_jg(c); break; -- case 0x17: fallthrough; -+ case 0x17: init_spectral_chicken(c); -+ fallthrough; - case 0x19: init_amd_zn(c); break; - } - -@@ -958,8 +1110,8 @@ static void init_amd(struct cpuinfo_x86 *c) - * msr_set_bit() uses the safe accessors, too, even if the MSR - * is not present. - */ -- msr_set_bit(MSR_F10H_DECFG, -- MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); -+ msr_set_bit(MSR_AMD64_DE_CFG, -+ MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); - - /* A serializing LFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); -@@ -989,6 +1141,15 @@ static void init_amd(struct cpuinfo_x86 *c) - if (cpu_has(c, X86_FEATURE_IRPERF) && - !cpu_has_amd_erratum(c, amd_erratum_1054)) - msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); -+ -+ check_null_seg_clears_base(c); -+ -+ zenbleed_check(c); -+ -+ if (cpu_has_amd_erratum(c, amd_div0)) { -+ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); -+ setup_force_cpu_bug(X86_BUG_DIV0); -+ } - } - - #ifdef CONFIG_X86_32 -@@ -1084,73 +1245,6 @@ static const struct cpu_dev amd_cpu_dev = { - - cpu_dev_register(amd_cpu_dev); - --/* -- * AMD errata checking -- * -- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or -- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that -- * have an OSVW id assigned, which it takes as first argument. Both take a -- * variable number of family-specific model-stepping ranges created by -- * AMD_MODEL_RANGE(). -- * -- * Example: -- * -- * const int amd_erratum_319[] = -- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), -- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), -- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); -- */ -- --#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } --#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } --#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ -- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) --#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) --#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) --#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -- --static const int amd_erratum_400[] = -- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), -- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -- --static const int amd_erratum_383[] = -- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -- --/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ --static const int amd_erratum_1054[] = -- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); -- --static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) --{ -- int osvw_id = *erratum++; -- u32 range; -- u32 ms; -- -- if (osvw_id >= 0 && osvw_id < 65536 && -- cpu_has(cpu, X86_FEATURE_OSVW)) { -- u64 osvw_len; -- -- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); -- if (osvw_id < osvw_len) { -- u64 osvw_bits; -- -- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), -- osvw_bits); -- return osvw_bits & (1ULL << (osvw_id & 0x3f)); -- } -- } -- -- /* OSVW unavailable or ID unknown, match family-model-stepping range */ -- ms = (cpu->x86_model << 4) | cpu->x86_stepping; -- while ((range = *erratum++)) -- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && -- (ms >= AMD_MODEL_RANGE_START(range)) && -- (ms <= AMD_MODEL_RANGE_END(range))) -- return true; -- -- return false; --} -- - void set_dr_addr_mask(unsigned long mask, int dr) - { - if (!boot_cpu_has(X86_FEATURE_BPEXT)) -@@ -1185,3 +1279,45 @@ u32 amd_get_highest_perf(void) - return 255; - } - EXPORT_SYMBOL_GPL(amd_get_highest_perf); -+ -+bool cpu_has_ibpb_brtype_microcode(void) -+{ -+ switch (boot_cpu_data.x86) { -+ /* Zen1/2 IBPB flushes branch type predictions too. */ -+ case 0x17: -+ return boot_cpu_has(X86_FEATURE_AMD_IBPB); -+ case 0x19: -+ /* Poke the MSR bit on Zen3/4 to check its presence. */ -+ if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { -+ setup_force_cpu_cap(X86_FEATURE_SBPB); -+ return true; -+ } else { -+ return false; -+ } -+ default: -+ return false; -+ } -+} -+ -+static void zenbleed_check_cpu(void *unused) -+{ -+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); -+ -+ zenbleed_check(c); -+} -+ -+void amd_check_microcode(void) -+{ -+ on_each_cpu(zenbleed_check_cpu, NULL, 1); -+} -+ -+/* -+ * Issue a DIV 0/1 insn to clear any division data from previous DIV -+ * operations. -+ */ -+void noinstr amd_clear_divider(void) -+{ -+ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) -+ :: "a" (0), "d" (0), "r" (1)); -+} -+EXPORT_SYMBOL_GPL(amd_clear_divider); -diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c -index ecfca3bbcd968..0d2c5fe841414 100644 ---- a/arch/x86/kernel/cpu/bugs.c -+++ b/arch/x86/kernel/cpu/bugs.c -@@ -9,13 +9,13 @@ - * - Andrew D. Balsa (code cleanup). - */ - #include --#include - #include - #include - #include - #include - #include - #include -+#include - - #include - #include -@@ -26,8 +26,6 @@ - #include - #include - #include --#include --#include - #include - #include - #include -@@ -37,24 +35,66 @@ - - static void __init spectre_v1_select_mitigation(void); - static void __init spectre_v2_select_mitigation(void); -+static void __init retbleed_select_mitigation(void); -+static void __init spectre_v2_user_select_mitigation(void); - static void __init ssb_select_mitigation(void); - static void __init l1tf_select_mitigation(void); - static void __init mds_select_mitigation(void); --static void __init mds_print_mitigation(void); -+static void __init md_clear_update_mitigation(void); -+static void __init md_clear_select_mitigation(void); - static void __init taa_select_mitigation(void); -+static void __init mmio_select_mitigation(void); - static void __init srbds_select_mitigation(void); - static void __init l1d_flush_select_mitigation(void); -+static void __init gds_select_mitigation(void); -+static void __init srso_select_mitigation(void); - --/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ -+/* The base value of the SPEC_CTRL MSR without task-specific bits set */ - u64 x86_spec_ctrl_base; - EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); -+ -+/* The current value of the SPEC_CTRL MSR with task-specific bits set */ -+DEFINE_PER_CPU(u64, x86_spec_ctrl_current); -+EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); -+ -+u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; -+EXPORT_SYMBOL_GPL(x86_pred_cmd); -+ - static DEFINE_MUTEX(spec_ctrl_mutex); - -+void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; -+ -+/* Update SPEC_CTRL MSR and its cached copy unconditionally */ -+static void update_spec_ctrl(u64 val) -+{ -+ this_cpu_write(x86_spec_ctrl_current, val); -+ wrmsrl(MSR_IA32_SPEC_CTRL, val); -+} -+ - /* -- * The vendor and possibly platform specific bits which can be modified in -- * x86_spec_ctrl_base. -+ * Keep track of the SPEC_CTRL MSR value for the current task, which may differ -+ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). - */ --static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; -+void update_spec_ctrl_cond(u64 val) -+{ -+ if (this_cpu_read(x86_spec_ctrl_current) == val) -+ return; -+ -+ this_cpu_write(x86_spec_ctrl_current, val); -+ -+ /* -+ * When KERNEL_IBRS this MSR is written on return-to-user, unless -+ * forced the update can be delayed until that time. -+ */ -+ if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) -+ wrmsrl(MSR_IA32_SPEC_CTRL, val); -+} -+ -+u64 spec_ctrl_current(void) -+{ -+ return this_cpu_read(x86_spec_ctrl_current); -+} -+EXPORT_SYMBOL_GPL(spec_ctrl_current); - - /* - * AMD specific MSR info for Speculative Store Bypass control. -@@ -84,108 +124,68 @@ EXPORT_SYMBOL_GPL(mds_idle_clear); - */ - DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); - --void __init check_bugs(void) --{ -- identify_boot_cpu(); -- -- /* -- * identify_boot_cpu() initialized SMT support information, let the -- * core code know. -- */ -- cpu_smt_check_topology(); -- -- if (!IS_ENABLED(CONFIG_SMP)) { -- pr_info("CPU: "); -- print_cpu_info(&boot_cpu_data); -- } -+/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ -+DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); -+EXPORT_SYMBOL_GPL(mmio_stale_data_clear); - -+void __init cpu_select_mitigations(void) -+{ - /* - * Read the SPEC_CTRL MSR to account for reserved bits which may - * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD - * init code as it is not enumerated and depends on the family. - */ -- if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) -+ if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { - rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); - -- /* Allow STIBP in MSR_SPEC_CTRL if supported */ -- if (boot_cpu_has(X86_FEATURE_STIBP)) -- x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; -+ /* -+ * Previously running kernel (kexec), may have some controls -+ * turned ON. Clear them and let the mitigations setup below -+ * rediscover them based on configuration. -+ */ -+ x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; -+ } - - /* Select the proper CPU mitigations before patching alternatives: */ - spectre_v1_select_mitigation(); - spectre_v2_select_mitigation(); -- ssb_select_mitigation(); -- l1tf_select_mitigation(); -- mds_select_mitigation(); -- taa_select_mitigation(); -- srbds_select_mitigation(); -- l1d_flush_select_mitigation(); -- - /* -- * As MDS and TAA mitigations are inter-related, print MDS -- * mitigation until after TAA mitigation selection is done. -+ * retbleed_select_mitigation() relies on the state set by -+ * spectre_v2_select_mitigation(); specifically it wants to know about -+ * spectre_v2=ibrs. - */ -- mds_print_mitigation(); -- -- arch_smt_update(); -- --#ifdef CONFIG_X86_32 -+ retbleed_select_mitigation(); - /* -- * Check whether we are able to run this kernel safely on SMP. -- * -- * - i386 is no longer supported. -- * - In order to run on anything without a TSC, we need to be -- * compiled for a i486. -+ * spectre_v2_user_select_mitigation() relies on the state set by -+ * retbleed_select_mitigation(); specifically the STIBP selection is -+ * forced for UNRET or IBPB. - */ -- if (boot_cpu_data.x86 < 4) -- panic("Kernel requires i486+ for 'invlpg' and other features"); -- -- init_utsname()->machine[1] = -- '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); -- alternative_instructions(); -- -- fpu__init_check_bugs(); --#else /* CONFIG_X86_64 */ -- alternative_instructions(); -+ spectre_v2_user_select_mitigation(); -+ ssb_select_mitigation(); -+ l1tf_select_mitigation(); -+ md_clear_select_mitigation(); -+ srbds_select_mitigation(); -+ l1d_flush_select_mitigation(); - - /* -- * Make sure the first 2MB area is not mapped by huge pages -- * There are typically fixed size MTRRs in there and overlapping -- * MTRRs into large pages causes slow downs. -- * -- * Right now we don't do that with gbpages because there seems -- * very little benefit for that case. -+ * srso_select_mitigation() depends and must run after -+ * retbleed_select_mitigation(). - */ -- if (!direct_gbpages) -- set_memory_4k((unsigned long)__va(0), 1); --#endif -+ srso_select_mitigation(); -+ gds_select_mitigation(); - } - -+/* -+ * NOTE: For VMX, this function is not called in the vmexit path. -+ * It uses vmx_spec_ctrl_restore_host() instead. -+ */ - void - x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) - { -- u64 msrval, guestval, hostval = x86_spec_ctrl_base; -+ u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current(); - struct thread_info *ti = current_thread_info(); - -- /* Is MSR_SPEC_CTRL implemented ? */ - if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { -- /* -- * Restrict guest_spec_ctrl to supported values. Clear the -- * modifiable bits in the host base value and or the -- * modifiable bits from the guest value. -- */ -- guestval = hostval & ~x86_spec_ctrl_mask; -- guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; -- -- /* SSBD controlled in MSR_SPEC_CTRL */ -- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || -- static_cpu_has(X86_FEATURE_AMD_SSBD)) -- hostval |= ssbd_tif_to_spec_ctrl(ti->flags); -- -- /* Conditional STIBP enabled? */ -- if (static_branch_unlikely(&switch_to_cond_stibp)) -- hostval |= stibp_tif_to_spec_ctrl(ti->flags); -- - if (hostval != guestval) { - msrval = setguest ? guestval : hostval; - wrmsrl(MSR_IA32_SPEC_CTRL, msrval); -@@ -266,14 +266,6 @@ static void __init mds_select_mitigation(void) - } - } - --static void __init mds_print_mitigation(void) --{ -- if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) -- return; -- -- pr_info("%s\n", mds_strings[mds_mitigation]); --} -- - static int __init mds_cmdline(char *str) - { - if (!boot_cpu_has_bug(X86_BUG_MDS)) -@@ -328,7 +320,7 @@ static void __init taa_select_mitigation(void) - /* TSX previously disabled by tsx=off */ - if (!boot_cpu_has(X86_FEATURE_RTM)) { - taa_mitigation = TAA_MITIGATION_TSX_DISABLED; -- goto out; -+ return; - } - - if (cpu_mitigations_off()) { -@@ -342,7 +334,7 @@ static void __init taa_select_mitigation(void) - */ - if (taa_mitigation == TAA_MITIGATION_OFF && - mds_mitigation == MDS_MITIGATION_OFF) -- goto out; -+ return; - - if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) - taa_mitigation = TAA_MITIGATION_VERW; -@@ -374,18 +366,6 @@ static void __init taa_select_mitigation(void) - - if (taa_nosmt || cpu_mitigations_auto_nosmt()) - cpu_smt_disable(false); -- -- /* -- * Update MDS mitigation, if necessary, as the mds_user_clear is -- * now enabled for TAA mitigation. -- */ -- if (mds_mitigation == MDS_MITIGATION_OFF && -- boot_cpu_has_bug(X86_BUG_MDS)) { -- mds_mitigation = MDS_MITIGATION_FULL; -- mds_select_mitigation(); -- } --out: -- pr_info("%s\n", taa_strings[taa_mitigation]); - } - - static int __init tsx_async_abort_parse_cmdline(char *str) -@@ -409,6 +389,154 @@ static int __init tsx_async_abort_parse_cmdline(char *str) - } - early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); - -+#undef pr_fmt -+#define pr_fmt(fmt) "MMIO Stale Data: " fmt -+ -+enum mmio_mitigations { -+ MMIO_MITIGATION_OFF, -+ MMIO_MITIGATION_UCODE_NEEDED, -+ MMIO_MITIGATION_VERW, -+}; -+ -+/* Default mitigation for Processor MMIO Stale Data vulnerabilities */ -+static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; -+static bool mmio_nosmt __ro_after_init = false; -+ -+static const char * const mmio_strings[] = { -+ [MMIO_MITIGATION_OFF] = "Vulnerable", -+ [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", -+ [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", -+}; -+ -+static void __init mmio_select_mitigation(void) -+{ -+ u64 ia32_cap; -+ -+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || -+ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || -+ cpu_mitigations_off()) { -+ mmio_mitigation = MMIO_MITIGATION_OFF; -+ return; -+ } -+ -+ if (mmio_mitigation == MMIO_MITIGATION_OFF) -+ return; -+ -+ ia32_cap = x86_read_arch_cap_msr(); -+ -+ /* -+ * Enable CPU buffer clear mitigation for host and VMM, if also affected -+ * by MDS or TAA. Otherwise, enable mitigation for VMM only. -+ */ -+ if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && -+ boot_cpu_has(X86_FEATURE_RTM))) -+ static_branch_enable(&mds_user_clear); -+ else -+ static_branch_enable(&mmio_stale_data_clear); -+ -+ /* -+ * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can -+ * be propagated to uncore buffers, clearing the Fill buffers on idle -+ * is required irrespective of SMT state. -+ */ -+ if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) -+ static_branch_enable(&mds_idle_clear); -+ -+ /* -+ * Check if the system has the right microcode. -+ * -+ * CPU Fill buffer clear mitigation is enumerated by either an explicit -+ * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS -+ * affected systems. -+ */ -+ if ((ia32_cap & ARCH_CAP_FB_CLEAR) || -+ (boot_cpu_has(X86_FEATURE_MD_CLEAR) && -+ boot_cpu_has(X86_FEATURE_FLUSH_L1D) && -+ !(ia32_cap & ARCH_CAP_MDS_NO))) -+ mmio_mitigation = MMIO_MITIGATION_VERW; -+ else -+ mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; -+ -+ if (mmio_nosmt || cpu_mitigations_auto_nosmt()) -+ cpu_smt_disable(false); -+} -+ -+static int __init mmio_stale_data_parse_cmdline(char *str) -+{ -+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) -+ return 0; -+ -+ if (!str) -+ return -EINVAL; -+ -+ if (!strcmp(str, "off")) { -+ mmio_mitigation = MMIO_MITIGATION_OFF; -+ } else if (!strcmp(str, "full")) { -+ mmio_mitigation = MMIO_MITIGATION_VERW; -+ } else if (!strcmp(str, "full,nosmt")) { -+ mmio_mitigation = MMIO_MITIGATION_VERW; -+ mmio_nosmt = true; -+ } -+ -+ return 0; -+} -+early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "" fmt -+ -+static void __init md_clear_update_mitigation(void) -+{ -+ if (cpu_mitigations_off()) -+ return; -+ -+ if (!static_key_enabled(&mds_user_clear)) -+ goto out; -+ -+ /* -+ * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data -+ * mitigation, if necessary. -+ */ -+ if (mds_mitigation == MDS_MITIGATION_OFF && -+ boot_cpu_has_bug(X86_BUG_MDS)) { -+ mds_mitigation = MDS_MITIGATION_FULL; -+ mds_select_mitigation(); -+ } -+ if (taa_mitigation == TAA_MITIGATION_OFF && -+ boot_cpu_has_bug(X86_BUG_TAA)) { -+ taa_mitigation = TAA_MITIGATION_VERW; -+ taa_select_mitigation(); -+ } -+ if (mmio_mitigation == MMIO_MITIGATION_OFF && -+ boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { -+ mmio_mitigation = MMIO_MITIGATION_VERW; -+ mmio_select_mitigation(); -+ } -+out: -+ if (boot_cpu_has_bug(X86_BUG_MDS)) -+ pr_info("MDS: %s\n", mds_strings[mds_mitigation]); -+ if (boot_cpu_has_bug(X86_BUG_TAA)) -+ pr_info("TAA: %s\n", taa_strings[taa_mitigation]); -+ if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) -+ pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); -+ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) -+ pr_info("MMIO Stale Data: Unknown: No mitigations\n"); -+} -+ -+static void __init md_clear_select_mitigation(void) -+{ -+ mds_select_mitigation(); -+ taa_select_mitigation(); -+ mmio_select_mitigation(); -+ -+ /* -+ * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update -+ * and print their mitigation after MDS, TAA and MMIO Stale Data -+ * mitigation selection is done. -+ */ -+ md_clear_update_mitigation(); -+} -+ - #undef pr_fmt - #define pr_fmt(fmt) "SRBDS: " fmt - -@@ -470,11 +598,13 @@ static void __init srbds_select_mitigation(void) - return; - - /* -- * Check to see if this is one of the MDS_NO systems supporting -- * TSX that are only exposed to SRBDS when TSX is enabled. -+ * Check to see if this is one of the MDS_NO systems supporting TSX that -+ * are only exposed to SRBDS when TSX is enabled or when CPU is affected -+ * by Processor MMIO Stale Data vulnerability. - */ - ia32_cap = x86_read_arch_cap_msr(); -- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) -+ if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && -+ !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) - srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; - else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) - srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; -@@ -528,6 +658,149 @@ static int __init l1d_flush_parse_cmdline(char *str) - } - early_param("l1d_flush", l1d_flush_parse_cmdline); - -+#undef pr_fmt -+#define pr_fmt(fmt) "GDS: " fmt -+ -+enum gds_mitigations { -+ GDS_MITIGATION_OFF, -+ GDS_MITIGATION_UCODE_NEEDED, -+ GDS_MITIGATION_FORCE, -+ GDS_MITIGATION_FULL, -+ GDS_MITIGATION_FULL_LOCKED, -+ GDS_MITIGATION_HYPERVISOR, -+}; -+ -+#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION) -+static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; -+#else -+static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; -+#endif -+ -+static const char * const gds_strings[] = { -+ [GDS_MITIGATION_OFF] = "Vulnerable", -+ [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", -+ [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", -+ [GDS_MITIGATION_FULL] = "Mitigation: Microcode", -+ [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", -+ [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", -+}; -+ -+bool gds_ucode_mitigated(void) -+{ -+ return (gds_mitigation == GDS_MITIGATION_FULL || -+ gds_mitigation == GDS_MITIGATION_FULL_LOCKED); -+} -+EXPORT_SYMBOL_GPL(gds_ucode_mitigated); -+ -+void update_gds_msr(void) -+{ -+ u64 mcu_ctrl_after; -+ u64 mcu_ctrl; -+ -+ switch (gds_mitigation) { -+ case GDS_MITIGATION_OFF: -+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); -+ mcu_ctrl |= GDS_MITG_DIS; -+ break; -+ case GDS_MITIGATION_FULL_LOCKED: -+ /* -+ * The LOCKED state comes from the boot CPU. APs might not have -+ * the same state. Make sure the mitigation is enabled on all -+ * CPUs. -+ */ -+ case GDS_MITIGATION_FULL: -+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); -+ mcu_ctrl &= ~GDS_MITG_DIS; -+ break; -+ case GDS_MITIGATION_FORCE: -+ case GDS_MITIGATION_UCODE_NEEDED: -+ case GDS_MITIGATION_HYPERVISOR: -+ return; -+ }; -+ -+ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); -+ -+ /* -+ * Check to make sure that the WRMSR value was not ignored. Writes to -+ * GDS_MITG_DIS will be ignored if this processor is locked but the boot -+ * processor was not. -+ */ -+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); -+ WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); -+} -+ -+static void __init gds_select_mitigation(void) -+{ -+ u64 mcu_ctrl; -+ -+ if (!boot_cpu_has_bug(X86_BUG_GDS)) -+ return; -+ -+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { -+ gds_mitigation = GDS_MITIGATION_HYPERVISOR; -+ goto out; -+ } -+ -+ if (cpu_mitigations_off()) -+ gds_mitigation = GDS_MITIGATION_OFF; -+ /* Will verify below that mitigation _can_ be disabled */ -+ -+ /* No microcode */ -+ if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { -+ if (gds_mitigation == GDS_MITIGATION_FORCE) { -+ /* -+ * This only needs to be done on the boot CPU so do it -+ * here rather than in update_gds_msr() -+ */ -+ setup_clear_cpu_cap(X86_FEATURE_AVX); -+ pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); -+ } else { -+ gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; -+ } -+ goto out; -+ } -+ -+ /* Microcode has mitigation, use it */ -+ if (gds_mitigation == GDS_MITIGATION_FORCE) -+ gds_mitigation = GDS_MITIGATION_FULL; -+ -+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); -+ if (mcu_ctrl & GDS_MITG_LOCKED) { -+ if (gds_mitigation == GDS_MITIGATION_OFF) -+ pr_warn("Mitigation locked. Disable failed.\n"); -+ -+ /* -+ * The mitigation is selected from the boot CPU. All other CPUs -+ * _should_ have the same state. If the boot CPU isn't locked -+ * but others are then update_gds_msr() will WARN() of the state -+ * mismatch. If the boot CPU is locked update_gds_msr() will -+ * ensure the other CPUs have the mitigation enabled. -+ */ -+ gds_mitigation = GDS_MITIGATION_FULL_LOCKED; -+ } -+ -+ update_gds_msr(); -+out: -+ pr_info("%s\n", gds_strings[gds_mitigation]); -+} -+ -+static int __init gds_parse_cmdline(char *str) -+{ -+ if (!str) -+ return -EINVAL; -+ -+ if (!boot_cpu_has_bug(X86_BUG_GDS)) -+ return 0; -+ -+ if (!strcmp(str, "off")) -+ gds_mitigation = GDS_MITIGATION_OFF; -+ else if (!strcmp(str, "force")) -+ gds_mitigation = GDS_MITIGATION_FORCE; -+ -+ return 0; -+} -+early_param("gather_data_sampling", gds_parse_cmdline); -+ - #undef pr_fmt - #define pr_fmt(fmt) "Spectre V1 : " fmt - -@@ -608,22 +881,193 @@ static void __init spectre_v1_select_mitigation(void) - } - } - -- pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); --} -- --static int __init nospectre_v1_cmdline(char *str) --{ -- spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; -- return 0; -+ pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); -+} -+ -+static int __init nospectre_v1_cmdline(char *str) -+{ -+ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; -+ return 0; -+} -+early_param("nospectre_v1", nospectre_v1_cmdline); -+ -+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = -+ SPECTRE_V2_NONE; -+ -+#undef pr_fmt -+#define pr_fmt(fmt) "RETBleed: " fmt -+ -+enum retbleed_mitigation { -+ RETBLEED_MITIGATION_NONE, -+ RETBLEED_MITIGATION_UNRET, -+ RETBLEED_MITIGATION_IBPB, -+ RETBLEED_MITIGATION_IBRS, -+ RETBLEED_MITIGATION_EIBRS, -+}; -+ -+enum retbleed_mitigation_cmd { -+ RETBLEED_CMD_OFF, -+ RETBLEED_CMD_AUTO, -+ RETBLEED_CMD_UNRET, -+ RETBLEED_CMD_IBPB, -+}; -+ -+const char * const retbleed_strings[] = { -+ [RETBLEED_MITIGATION_NONE] = "Vulnerable", -+ [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", -+ [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", -+ [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", -+ [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", -+}; -+ -+static enum retbleed_mitigation retbleed_mitigation __ro_after_init = -+ RETBLEED_MITIGATION_NONE; -+static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = -+ RETBLEED_CMD_AUTO; -+ -+static int __ro_after_init retbleed_nosmt = false; -+ -+static int __init retbleed_parse_cmdline(char *str) -+{ -+ if (!str) -+ return -EINVAL; -+ -+ while (str) { -+ char *next = strchr(str, ','); -+ if (next) { -+ *next = 0; -+ next++; -+ } -+ -+ if (!strcmp(str, "off")) { -+ retbleed_cmd = RETBLEED_CMD_OFF; -+ } else if (!strcmp(str, "auto")) { -+ retbleed_cmd = RETBLEED_CMD_AUTO; -+ } else if (!strcmp(str, "unret")) { -+ retbleed_cmd = RETBLEED_CMD_UNRET; -+ } else if (!strcmp(str, "ibpb")) { -+ retbleed_cmd = RETBLEED_CMD_IBPB; -+ } else if (!strcmp(str, "nosmt")) { -+ retbleed_nosmt = true; -+ } else { -+ pr_err("Ignoring unknown retbleed option (%s).", str); -+ } -+ -+ str = next; -+ } -+ -+ return 0; -+} -+early_param("retbleed", retbleed_parse_cmdline); -+ -+#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" -+#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" -+ -+static void __init retbleed_select_mitigation(void) -+{ -+ bool mitigate_smt = false; -+ -+ if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) -+ return; -+ -+ switch (retbleed_cmd) { -+ case RETBLEED_CMD_OFF: -+ return; -+ -+ case RETBLEED_CMD_UNRET: -+ if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { -+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET; -+ } else { -+ pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); -+ goto do_cmd_auto; -+ } -+ break; -+ -+ case RETBLEED_CMD_IBPB: -+ if (!boot_cpu_has(X86_FEATURE_IBPB)) { -+ pr_err("WARNING: CPU does not support IBPB.\n"); -+ goto do_cmd_auto; -+ } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { -+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB; -+ } else { -+ pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); -+ goto do_cmd_auto; -+ } -+ break; -+ -+do_cmd_auto: -+ case RETBLEED_CMD_AUTO: -+ default: -+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || -+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { -+ if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) -+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET; -+ else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB)) -+ retbleed_mitigation = RETBLEED_MITIGATION_IBPB; -+ } -+ -+ /* -+ * The Intel mitigation (IBRS or eIBRS) was already selected in -+ * spectre_v2_select_mitigation(). 'retbleed_mitigation' will -+ * be set accordingly below. -+ */ -+ -+ break; -+ } -+ -+ switch (retbleed_mitigation) { -+ case RETBLEED_MITIGATION_UNRET: -+ setup_force_cpu_cap(X86_FEATURE_RETHUNK); -+ setup_force_cpu_cap(X86_FEATURE_UNRET); -+ -+ if (IS_ENABLED(CONFIG_RETHUNK)) -+ x86_return_thunk = retbleed_return_thunk; -+ -+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && -+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) -+ pr_err(RETBLEED_UNTRAIN_MSG); -+ -+ mitigate_smt = true; -+ break; -+ -+ case RETBLEED_MITIGATION_IBPB: -+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); -+ mitigate_smt = true; -+ break; -+ -+ default: -+ break; -+ } -+ -+ if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && -+ (retbleed_nosmt || cpu_mitigations_auto_nosmt())) -+ cpu_smt_disable(false); -+ -+ /* -+ * Let IBRS trump all on Intel without affecting the effects of the -+ * retbleed= cmdline option. -+ */ -+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { -+ switch (spectre_v2_enabled) { -+ case SPECTRE_V2_IBRS: -+ retbleed_mitigation = RETBLEED_MITIGATION_IBRS; -+ break; -+ case SPECTRE_V2_EIBRS: -+ case SPECTRE_V2_EIBRS_RETPOLINE: -+ case SPECTRE_V2_EIBRS_LFENCE: -+ retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; -+ break; -+ default: -+ pr_err(RETBLEED_INTEL_MSG); -+ } -+ } -+ -+ pr_info("%s\n", retbleed_strings[retbleed_mitigation]); - } --early_param("nospectre_v1", nospectre_v1_cmdline); - - #undef pr_fmt - #define pr_fmt(fmt) "Spectre V2 : " fmt - --static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = -- SPECTRE_V2_NONE; -- - static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = - SPECTRE_V2_USER_NONE; - static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = -@@ -650,6 +1094,33 @@ static inline const char *spectre_v2_module_string(void) - static inline const char *spectre_v2_module_string(void) { return ""; } - #endif - -+#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" -+#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" -+#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" -+#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" -+ -+#ifdef CONFIG_BPF_SYSCALL -+void unpriv_ebpf_notify(int new_state) -+{ -+ if (new_state) -+ return; -+ -+ /* Unprivileged eBPF is enabled */ -+ -+ switch (spectre_v2_enabled) { -+ case SPECTRE_V2_EIBRS: -+ pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); -+ break; -+ case SPECTRE_V2_EIBRS_LFENCE: -+ if (sched_smt_active()) -+ pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); -+ break; -+ default: -+ break; -+ } -+} -+#endif -+ - static inline bool match_option(const char *arg, int arglen, const char *opt) - { - int len = strlen(opt); -@@ -664,7 +1135,11 @@ enum spectre_v2_mitigation_cmd { - SPECTRE_V2_CMD_FORCE, - SPECTRE_V2_CMD_RETPOLINE, - SPECTRE_V2_CMD_RETPOLINE_GENERIC, -- SPECTRE_V2_CMD_RETPOLINE_AMD, -+ SPECTRE_V2_CMD_RETPOLINE_LFENCE, -+ SPECTRE_V2_CMD_EIBRS, -+ SPECTRE_V2_CMD_EIBRS_RETPOLINE, -+ SPECTRE_V2_CMD_EIBRS_LFENCE, -+ SPECTRE_V2_CMD_IBRS, - }; - - enum spectre_v2_user_cmd { -@@ -705,13 +1180,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure) - pr_info("spectre_v2_user=%s forced on command line.\n", reason); - } - -+static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; -+ - static enum spectre_v2_user_cmd __init --spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) -+spectre_v2_parse_user_cmdline(void) - { - char arg[20]; - int ret, i; - -- switch (v2_cmd) { -+ switch (spectre_v2_cmd) { - case SPECTRE_V2_CMD_NONE: - return SPECTRE_V2_USER_CMD_NONE; - case SPECTRE_V2_CMD_FORCE: -@@ -737,8 +1214,20 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) - return SPECTRE_V2_USER_CMD_AUTO; - } - -+static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode) -+{ -+ return mode == SPECTRE_V2_EIBRS || -+ mode == SPECTRE_V2_EIBRS_RETPOLINE || -+ mode == SPECTRE_V2_EIBRS_LFENCE; -+} -+ -+static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) -+{ -+ return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; -+} -+ - static void __init --spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) -+spectre_v2_user_select_mitigation(void) - { - enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; - bool smt_possible = IS_ENABLED(CONFIG_SMP); -@@ -751,7 +1240,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) - cpu_smt_control == CPU_SMT_NOT_SUPPORTED) - smt_possible = false; - -- cmd = spectre_v2_parse_user_cmdline(v2_cmd); -+ cmd = spectre_v2_parse_user_cmdline(); - switch (cmd) { - case SPECTRE_V2_USER_CMD_NONE: - goto set_mode; -@@ -799,12 +1288,19 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) - } - - /* -- * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not -- * required. -+ * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP -+ * is not required. -+ * -+ * Enhanced IBRS also protects against cross-thread branch target -+ * injection in user-mode as the IBRS bit remains always set which -+ * implicitly enables cross-thread protections. However, in legacy IBRS -+ * mode, the IBRS bit is set only on kernel entry and cleared on return -+ * to userspace. This disables the implicit cross-thread protection, -+ * so allow for STIBP to be selected in that case. - */ - if (!boot_cpu_has(X86_FEATURE_STIBP) || - !smt_possible || -- spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) -+ spectre_v2_in_eibrs_mode(spectre_v2_enabled)) - return; - - /* -@@ -816,6 +1312,14 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) - boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) - mode = SPECTRE_V2_USER_STRICT_PREFERRED; - -+ if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || -+ retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { -+ if (mode != SPECTRE_V2_USER_STRICT && -+ mode != SPECTRE_V2_USER_STRICT_PREFERRED) -+ pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); -+ mode = SPECTRE_V2_USER_STRICT_PREFERRED; -+ } -+ - spectre_v2_user_stibp = mode; - - set_mode: -@@ -824,9 +1328,12 @@ set_mode: - - static const char * const spectre_v2_strings[] = { - [SPECTRE_V2_NONE] = "Vulnerable", -- [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", -- [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", -- [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", -+ [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", -+ [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", -+ [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", -+ [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", -+ [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", -+ [SPECTRE_V2_IBRS] = "Mitigation: IBRS", - }; - - static const struct { -@@ -837,9 +1344,14 @@ static const struct { - { "off", SPECTRE_V2_CMD_NONE, false }, - { "on", SPECTRE_V2_CMD_FORCE, true }, - { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, -- { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, -+ { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, -+ { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, - { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, -+ { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, -+ { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, -+ { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, - { "auto", SPECTRE_V2_CMD_AUTO, false }, -+ { "ibrs", SPECTRE_V2_CMD_IBRS, false }, - }; - - static void __init spec_v2_print_cond(const char *reason, bool secure) -@@ -875,17 +1387,54 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) - } - - if ((cmd == SPECTRE_V2_CMD_RETPOLINE || -- cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || -- cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && -+ cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || -+ cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || -+ cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || -+ cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && - !IS_ENABLED(CONFIG_RETPOLINE)) { -- pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); -+ pr_err("%s selected but not compiled in. Switching to AUTO select\n", -+ mitigation_options[i].option); -+ return SPECTRE_V2_CMD_AUTO; -+ } -+ -+ if ((cmd == SPECTRE_V2_CMD_EIBRS || -+ cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || -+ cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && -+ !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { -+ pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n", -+ mitigation_options[i].option); -+ return SPECTRE_V2_CMD_AUTO; -+ } -+ -+ if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || -+ cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && -+ !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { -+ pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", -+ mitigation_options[i].option); -+ return SPECTRE_V2_CMD_AUTO; -+ } -+ -+ if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { -+ pr_err("%s selected but not compiled in. Switching to AUTO select\n", -+ mitigation_options[i].option); -+ return SPECTRE_V2_CMD_AUTO; -+ } -+ -+ if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { -+ pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", -+ mitigation_options[i].option); -+ return SPECTRE_V2_CMD_AUTO; -+ } -+ -+ if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { -+ pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", -+ mitigation_options[i].option); - return SPECTRE_V2_CMD_AUTO; - } - -- if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && -- boot_cpu_data.x86_vendor != X86_VENDOR_HYGON && -- boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { -- pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); -+ if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) { -+ pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", -+ mitigation_options[i].option); - return SPECTRE_V2_CMD_AUTO; - } - -@@ -894,6 +1443,79 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) - return cmd; - } - -+static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) -+{ -+ if (!IS_ENABLED(CONFIG_RETPOLINE)) { -+ pr_err("Kernel not compiled with retpoline; no mitigation available!"); -+ return SPECTRE_V2_NONE; -+ } -+ -+ return SPECTRE_V2_RETPOLINE; -+} -+ -+/* Disable in-kernel use of non-RSB RET predictors */ -+static void __init spec_ctrl_disable_kernel_rrsba(void) -+{ -+ u64 ia32_cap; -+ -+ if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) -+ return; -+ -+ ia32_cap = x86_read_arch_cap_msr(); -+ -+ if (ia32_cap & ARCH_CAP_RRSBA) { -+ x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; -+ update_spec_ctrl(x86_spec_ctrl_base); -+ } -+} -+ -+static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) -+{ -+ /* -+ * Similar to context switches, there are two types of RSB attacks -+ * after VM exit: -+ * -+ * 1) RSB underflow -+ * -+ * 2) Poisoned RSB entry -+ * -+ * When retpoline is enabled, both are mitigated by filling/clearing -+ * the RSB. -+ * -+ * When IBRS is enabled, while #1 would be mitigated by the IBRS branch -+ * prediction isolation protections, RSB still needs to be cleared -+ * because of #2. Note that SMEP provides no protection here, unlike -+ * user-space-poisoned RSB entries. -+ * -+ * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB -+ * bug is present then a LITE version of RSB protection is required, -+ * just a single call needs to retire before a RET is executed. -+ */ -+ switch (mode) { -+ case SPECTRE_V2_NONE: -+ return; -+ -+ case SPECTRE_V2_EIBRS_LFENCE: -+ case SPECTRE_V2_EIBRS: -+ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { -+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); -+ pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); -+ } -+ return; -+ -+ case SPECTRE_V2_EIBRS_RETPOLINE: -+ case SPECTRE_V2_RETPOLINE: -+ case SPECTRE_V2_LFENCE: -+ case SPECTRE_V2_IBRS: -+ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); -+ pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); -+ return; -+ } -+ -+ pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); -+ dump_stack(); -+} -+ - static void __init spectre_v2_select_mitigation(void) - { - enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); -@@ -914,86 +1536,172 @@ static void __init spectre_v2_select_mitigation(void) - case SPECTRE_V2_CMD_FORCE: - case SPECTRE_V2_CMD_AUTO: - if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { -- mode = SPECTRE_V2_IBRS_ENHANCED; -- /* Force it so VMEXIT will restore correctly */ -- x86_spec_ctrl_base |= SPEC_CTRL_IBRS; -- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); -- goto specv2_set_mode; -+ mode = SPECTRE_V2_EIBRS; -+ break; -+ } -+ -+ if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && -+ boot_cpu_has_bug(X86_BUG_RETBLEED) && -+ retbleed_cmd != RETBLEED_CMD_OFF && -+ boot_cpu_has(X86_FEATURE_IBRS) && -+ boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { -+ mode = SPECTRE_V2_IBRS; -+ break; - } -- if (IS_ENABLED(CONFIG_RETPOLINE)) -- goto retpoline_auto; -+ -+ mode = spectre_v2_select_retpoline(); - break; -- case SPECTRE_V2_CMD_RETPOLINE_AMD: -- if (IS_ENABLED(CONFIG_RETPOLINE)) -- goto retpoline_amd; -+ -+ case SPECTRE_V2_CMD_RETPOLINE_LFENCE: -+ pr_err(SPECTRE_V2_LFENCE_MSG); -+ mode = SPECTRE_V2_LFENCE; - break; -+ - case SPECTRE_V2_CMD_RETPOLINE_GENERIC: -- if (IS_ENABLED(CONFIG_RETPOLINE)) -- goto retpoline_generic; -+ mode = SPECTRE_V2_RETPOLINE; - break; -+ - case SPECTRE_V2_CMD_RETPOLINE: -- if (IS_ENABLED(CONFIG_RETPOLINE)) -- goto retpoline_auto; -+ mode = spectre_v2_select_retpoline(); -+ break; -+ -+ case SPECTRE_V2_CMD_IBRS: -+ mode = SPECTRE_V2_IBRS; -+ break; -+ -+ case SPECTRE_V2_CMD_EIBRS: -+ mode = SPECTRE_V2_EIBRS; -+ break; -+ -+ case SPECTRE_V2_CMD_EIBRS_LFENCE: -+ mode = SPECTRE_V2_EIBRS_LFENCE; -+ break; -+ -+ case SPECTRE_V2_CMD_EIBRS_RETPOLINE: -+ mode = SPECTRE_V2_EIBRS_RETPOLINE; - break; - } -- pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); -- return; - --retpoline_auto: -- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || -- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { -- retpoline_amd: -- if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { -- pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); -- goto retpoline_generic; -- } -- mode = SPECTRE_V2_RETPOLINE_AMD; -- setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); -- setup_force_cpu_cap(X86_FEATURE_RETPOLINE); -- } else { -- retpoline_generic: -- mode = SPECTRE_V2_RETPOLINE_GENERIC; -+ if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) -+ pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); -+ -+ if (spectre_v2_in_ibrs_mode(mode)) { -+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS; -+ update_spec_ctrl(x86_spec_ctrl_base); -+ } -+ -+ switch (mode) { -+ case SPECTRE_V2_NONE: -+ case SPECTRE_V2_EIBRS: -+ break; -+ -+ case SPECTRE_V2_IBRS: -+ setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); -+ if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) -+ pr_warn(SPECTRE_V2_IBRS_PERF_MSG); -+ break; -+ -+ case SPECTRE_V2_LFENCE: -+ case SPECTRE_V2_EIBRS_LFENCE: -+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); -+ fallthrough; -+ -+ case SPECTRE_V2_RETPOLINE: -+ case SPECTRE_V2_EIBRS_RETPOLINE: - setup_force_cpu_cap(X86_FEATURE_RETPOLINE); -+ break; - } - --specv2_set_mode: -+ /* -+ * Disable alternate RSB predictions in kernel when indirect CALLs and -+ * JMPs gets protection against BHI and Intramode-BTI, but RET -+ * prediction from a non-RSB predictor is still a risk. -+ */ -+ if (mode == SPECTRE_V2_EIBRS_LFENCE || -+ mode == SPECTRE_V2_EIBRS_RETPOLINE || -+ mode == SPECTRE_V2_RETPOLINE) -+ spec_ctrl_disable_kernel_rrsba(); -+ - spectre_v2_enabled = mode; - pr_info("%s\n", spectre_v2_strings[mode]); - - /* -- * If spectre v2 protection has been enabled, unconditionally fill -- * RSB during a context switch; this protects against two independent -- * issues: -+ * If Spectre v2 protection has been enabled, fill the RSB during a -+ * context switch. In general there are two types of RSB attacks -+ * across context switches, for which the CALLs/RETs may be unbalanced. -+ * -+ * 1) RSB underflow -+ * -+ * Some Intel parts have "bottomless RSB". When the RSB is empty, -+ * speculated return targets may come from the branch predictor, -+ * which could have a user-poisoned BTB or BHB entry. -+ * -+ * AMD has it even worse: *all* returns are speculated from the BTB, -+ * regardless of the state of the RSB. -+ * -+ * When IBRS or eIBRS is enabled, the "user -> kernel" attack -+ * scenario is mitigated by the IBRS branch prediction isolation -+ * properties, so the RSB buffer filling wouldn't be necessary to -+ * protect against this type of attack. -+ * -+ * The "user -> user" attack scenario is mitigated by RSB filling. -+ * -+ * 2) Poisoned RSB entry -+ * -+ * If the 'next' in-kernel return stack is shorter than 'prev', -+ * 'next' could be tricked into speculating with a user-poisoned RSB -+ * entry. -+ * -+ * The "user -> kernel" attack scenario is mitigated by SMEP and -+ * eIBRS. -+ * -+ * The "user -> user" scenario, also known as SpectreBHB, requires -+ * RSB clearing. -+ * -+ * So to mitigate all cases, unconditionally fill RSB on context -+ * switches. - * -- * - RSB underflow (and switch to BTB) on Skylake+ -- * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs -+ * FIXME: Is this pointless for retbleed-affected AMD? - */ - setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); - pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); - -+ spectre_v2_determine_rsb_fill_type_at_vmexit(mode); -+ - /* -- * Retpoline means the kernel is safe because it has no indirect -- * branches. Enhanced IBRS protects firmware too, so, enable restricted -- * speculation around firmware calls only when Enhanced IBRS isn't -- * supported. -+ * Retpoline protects the kernel, but doesn't protect firmware. IBRS -+ * and Enhanced IBRS protect firmware too, so enable IBRS around -+ * firmware calls only when IBRS / Enhanced IBRS aren't otherwise -+ * enabled. - * - * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because - * the user might select retpoline on the kernel command line and if - * the CPU supports Enhanced IBRS, kernel might un-intentionally not - * enable IBRS around firmware calls. - */ -- if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) { -+ if (boot_cpu_has_bug(X86_BUG_RETBLEED) && -+ boot_cpu_has(X86_FEATURE_IBPB) && -+ (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || -+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { -+ -+ if (retbleed_cmd != RETBLEED_CMD_IBPB) { -+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); -+ pr_info("Enabling Speculation Barrier for firmware calls\n"); -+ } -+ -+ } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { - setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); - pr_info("Enabling Restricted Speculation for firmware calls\n"); - } - - /* Set up IBPB and STIBP depending on the general spectre V2 command */ -- spectre_v2_user_select_mitigation(cmd); -+ spectre_v2_cmd = cmd; - } - - static void update_stibp_msr(void * __unused) - { -- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); -+ u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); -+ update_spec_ctrl(val); - } - - /* Update x86_spec_ctrl_base in case SMT state changed. */ -@@ -1028,6 +1736,8 @@ static void update_indir_branch_cond(void) - /* Update the static key controlling the MDS CPU buffer clear in idle */ - static void update_mds_branch_idle(void) - { -+ u64 ia32_cap = x86_read_arch_cap_msr(); -+ - /* - * Enable the idle clearing if SMT is active on CPUs which are - * affected only by MSBDS and not any other MDS variant. -@@ -1039,19 +1749,26 @@ static void update_mds_branch_idle(void) - if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) - return; - -- if (sched_smt_active()) -+ if (sched_smt_active()) { - static_branch_enable(&mds_idle_clear); -- else -+ } else if (mmio_mitigation == MMIO_MITIGATION_OFF || -+ (ia32_cap & ARCH_CAP_FBSDP_NO)) { - static_branch_disable(&mds_idle_clear); -+ } - } - - #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" - #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" -+#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" - - void cpu_bugs_smt_update(void) - { - mutex_lock(&spec_ctrl_mutex); - -+ if (sched_smt_active() && unprivileged_ebpf_enabled() && -+ spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) -+ pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); -+ - switch (spectre_v2_user_stibp) { - case SPECTRE_V2_USER_NONE: - break; -@@ -1087,6 +1804,16 @@ void cpu_bugs_smt_update(void) - break; - } - -+ switch (mmio_mitigation) { -+ case MMIO_MITIGATION_VERW: -+ case MMIO_MITIGATION_UCODE_NEEDED: -+ if (sched_smt_active()) -+ pr_warn_once(MMIO_MSG_SMT); -+ break; -+ case MMIO_MITIGATION_OFF: -+ break; -+ } -+ - mutex_unlock(&spec_ctrl_mutex); - } - -@@ -1190,16 +1917,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) - break; - } - -- /* -- * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper -- * bit in the mask to allow guests to use the mitigation even in the -- * case where the host does not enable it. -- */ -- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || -- static_cpu_has(X86_FEATURE_AMD_SSBD)) { -- x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; -- } -- - /* - * We have three CPU feature flags that are in play here: - * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. -@@ -1217,7 +1934,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) - x86_amd_ssb_disable(); - } else { - x86_spec_ctrl_base |= SPEC_CTRL_SSBD; -- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); -+ update_spec_ctrl(x86_spec_ctrl_base); - } - } - -@@ -1364,6 +2081,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) - if (ctrl == PR_SPEC_FORCE_DISABLE) - task_set_spec_ib_force_disable(task); - task_update_spec_tif(task); -+ if (task == current) -+ indirect_branch_prediction_barrier(); - break; - default: - return -ERANGE; -@@ -1468,7 +2187,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) - void x86_spec_ctrl_setup_ap(void) - { - if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) -- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); -+ update_spec_ctrl(x86_spec_ctrl_base); - - if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) - x86_amd_ssb_disable(); -@@ -1599,6 +2318,170 @@ static int __init l1tf_cmdline(char *str) - } - early_param("l1tf", l1tf_cmdline); - -+#undef pr_fmt -+#define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt -+ -+enum srso_mitigation { -+ SRSO_MITIGATION_NONE, -+ SRSO_MITIGATION_MICROCODE, -+ SRSO_MITIGATION_SAFE_RET, -+ SRSO_MITIGATION_IBPB, -+ SRSO_MITIGATION_IBPB_ON_VMEXIT, -+}; -+ -+enum srso_mitigation_cmd { -+ SRSO_CMD_OFF, -+ SRSO_CMD_MICROCODE, -+ SRSO_CMD_SAFE_RET, -+ SRSO_CMD_IBPB, -+ SRSO_CMD_IBPB_ON_VMEXIT, -+}; -+ -+static const char * const srso_strings[] = { -+ [SRSO_MITIGATION_NONE] = "Vulnerable", -+ [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", -+ [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", -+ [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", -+ [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" -+}; -+ -+static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; -+static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET; -+ -+static int __init srso_parse_cmdline(char *str) -+{ -+ if (!str) -+ return -EINVAL; -+ -+ if (!strcmp(str, "off")) -+ srso_cmd = SRSO_CMD_OFF; -+ else if (!strcmp(str, "microcode")) -+ srso_cmd = SRSO_CMD_MICROCODE; -+ else if (!strcmp(str, "safe-ret")) -+ srso_cmd = SRSO_CMD_SAFE_RET; -+ else if (!strcmp(str, "ibpb")) -+ srso_cmd = SRSO_CMD_IBPB; -+ else if (!strcmp(str, "ibpb-vmexit")) -+ srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT; -+ else -+ pr_err("Ignoring unknown SRSO option (%s).", str); -+ -+ return 0; -+} -+early_param("spec_rstack_overflow", srso_parse_cmdline); -+ -+#define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." -+ -+static void __init srso_select_mitigation(void) -+{ -+ bool has_microcode; -+ -+ if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) -+ goto pred_cmd; -+ -+ /* -+ * The first check is for the kernel running as a guest in order -+ * for guests to verify whether IBPB is a viable mitigation. -+ */ -+ has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode(); -+ if (!has_microcode) { -+ pr_warn("IBPB-extending microcode not applied!\n"); -+ pr_warn(SRSO_NOTICE); -+ } else { -+ /* -+ * Enable the synthetic (even if in a real CPUID leaf) -+ * flags for guests. -+ */ -+ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); -+ -+ /* -+ * Zen1/2 with SMT off aren't vulnerable after the right -+ * IBPB microcode has been applied. -+ */ -+ if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { -+ setup_force_cpu_cap(X86_FEATURE_SRSO_NO); -+ return; -+ } -+ } -+ -+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { -+ if (has_microcode) { -+ pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n"); -+ srso_mitigation = SRSO_MITIGATION_IBPB; -+ goto pred_cmd; -+ } -+ } -+ -+ switch (srso_cmd) { -+ case SRSO_CMD_OFF: -+ return; -+ -+ case SRSO_CMD_MICROCODE: -+ if (has_microcode) { -+ srso_mitigation = SRSO_MITIGATION_MICROCODE; -+ pr_warn(SRSO_NOTICE); -+ } -+ break; -+ -+ case SRSO_CMD_SAFE_RET: -+ if (IS_ENABLED(CONFIG_CPU_SRSO)) { -+ /* -+ * Enable the return thunk for generated code -+ * like ftrace, static_call, etc. -+ */ -+ setup_force_cpu_cap(X86_FEATURE_RETHUNK); -+ setup_force_cpu_cap(X86_FEATURE_UNRET); -+ -+ if (boot_cpu_data.x86 == 0x19) { -+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); -+ x86_return_thunk = srso_alias_return_thunk; -+ } else { -+ setup_force_cpu_cap(X86_FEATURE_SRSO); -+ x86_return_thunk = srso_return_thunk; -+ } -+ srso_mitigation = SRSO_MITIGATION_SAFE_RET; -+ } else { -+ pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); -+ goto pred_cmd; -+ } -+ break; -+ -+ case SRSO_CMD_IBPB: -+ if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { -+ if (has_microcode) { -+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); -+ srso_mitigation = SRSO_MITIGATION_IBPB; -+ } -+ } else { -+ pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); -+ goto pred_cmd; -+ } -+ break; -+ -+ case SRSO_CMD_IBPB_ON_VMEXIT: -+ if (IS_ENABLED(CONFIG_CPU_SRSO)) { -+ if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { -+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); -+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; -+ } -+ } else { -+ pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); -+ goto pred_cmd; -+ } -+ break; -+ -+ default: -+ break; -+ } -+ -+ pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); -+ -+pred_cmd: -+ if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) && -+ boot_cpu_has(X86_FEATURE_SBPB)) -+ x86_pred_cmd = PRED_CMD_SBPB; -+} -+ - #undef pr_fmt - #define pr_fmt(fmt) fmt - -@@ -1689,9 +2572,26 @@ static ssize_t tsx_async_abort_show_state(char *buf) - sched_smt_active() ? "vulnerable" : "disabled"); - } - -+static ssize_t mmio_stale_data_show_state(char *buf) -+{ -+ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) -+ return sysfs_emit(buf, "Unknown: No mitigations\n"); -+ -+ if (mmio_mitigation == MMIO_MITIGATION_OFF) -+ return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); -+ -+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { -+ return sysfs_emit(buf, "%s; SMT Host state unknown\n", -+ mmio_strings[mmio_mitigation]); -+ } -+ -+ return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], -+ sched_smt_active() ? "vulnerable" : "disabled"); -+} -+ - static char *stibp_state(void) - { -- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) -+ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) - return ""; - - switch (spectre_v2_user_stibp) { -@@ -1721,11 +2621,80 @@ static char *ibpb_state(void) - return ""; - } - -+static char *pbrsb_eibrs_state(void) -+{ -+ if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { -+ if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || -+ boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) -+ return ", PBRSB-eIBRS: SW sequence"; -+ else -+ return ", PBRSB-eIBRS: Vulnerable"; -+ } else { -+ return ", PBRSB-eIBRS: Not affected"; -+ } -+} -+ -+static ssize_t spectre_v2_show_state(char *buf) -+{ -+ if (spectre_v2_enabled == SPECTRE_V2_LFENCE) -+ return sprintf(buf, "Vulnerable: LFENCE\n"); -+ -+ if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) -+ return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); -+ -+ if (sched_smt_active() && unprivileged_ebpf_enabled() && -+ spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) -+ return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); -+ -+ return sprintf(buf, "%s%s%s%s%s%s%s\n", -+ spectre_v2_strings[spectre_v2_enabled], -+ ibpb_state(), -+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", -+ stibp_state(), -+ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", -+ pbrsb_eibrs_state(), -+ spectre_v2_module_string()); -+} -+ - static ssize_t srbds_show_state(char *buf) - { - return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); - } - -+static ssize_t retbleed_show_state(char *buf) -+{ -+ if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || -+ retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { -+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && -+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) -+ return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); -+ -+ return sprintf(buf, "%s; SMT %s\n", -+ retbleed_strings[retbleed_mitigation], -+ !sched_smt_active() ? "disabled" : -+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || -+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? -+ "enabled with STIBP protection" : "vulnerable"); -+ } -+ -+ return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); -+} -+ -+static ssize_t gds_show_state(char *buf) -+{ -+ return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); -+} -+ -+static ssize_t srso_show_state(char *buf) -+{ -+ if (boot_cpu_has(X86_FEATURE_SRSO_NO)) -+ return sysfs_emit(buf, "Mitigation: SMT disabled\n"); -+ -+ return sysfs_emit(buf, "%s%s\n", -+ srso_strings[srso_mitigation], -+ (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); -+} -+ - static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, - char *buf, unsigned int bug) - { -@@ -1746,12 +2715,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr - return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); - - case X86_BUG_SPECTRE_V2: -- return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], -- ibpb_state(), -- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", -- stibp_state(), -- boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", -- spectre_v2_module_string()); -+ return spectre_v2_show_state(buf); - - case X86_BUG_SPEC_STORE_BYPASS: - return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); -@@ -1773,6 +2737,19 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr - case X86_BUG_SRBDS: - return srbds_show_state(buf); - -+ case X86_BUG_MMIO_STALE_DATA: -+ case X86_BUG_MMIO_UNKNOWN: -+ return mmio_stale_data_show_state(buf); -+ -+ case X86_BUG_RETBLEED: -+ return retbleed_show_state(buf); -+ -+ case X86_BUG_GDS: -+ return gds_show_state(buf); -+ -+ case X86_BUG_SRSO: -+ return srso_show_state(buf); -+ - default: - break; - } -@@ -1824,4 +2801,27 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char * - { - return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); - } -+ -+ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) -+ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); -+ else -+ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); -+} -+ -+ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); -+} -+ -+ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return cpu_show_common(dev, attr, buf, X86_BUG_GDS); -+} -+ -+ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) -+{ -+ return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); -+} - #endif -diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c -index b3410f1ac2175..3151c08bb54a5 100644 ---- a/arch/x86/kernel/cpu/common.c -+++ b/arch/x86/kernel/cpu/common.c -@@ -18,11 +18,15 @@ - #include - #include - #include -+#include - #include -+#include - #include - #include - #include -+#include - -+#include - #include - #include - #include -@@ -58,7 +62,7 @@ - #include - #include - #include --#include -+#include - - #include "cpu.h" - -@@ -964,6 +968,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c) - if (c->extended_cpuid_level >= 0x8000001f) - c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); - -+ if (c->extended_cpuid_level >= 0x80000021) -+ c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); -+ - init_scattered_cpuid_features(c); - init_speculation_control(c); - -@@ -1027,6 +1034,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) - #define NO_SWAPGS BIT(6) - #define NO_ITLB_MULTIHIT BIT(7) - #define NO_SPECTRE_V2 BIT(8) -+#define NO_MMIO BIT(9) -+#define NO_EIBRS_PBRSB BIT(10) - - #define VULNWL(vendor, family, model, whitelist) \ - X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist) -@@ -1047,6 +1056,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { - VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), - - /* Intel Family 6 */ -+ VULNWL_INTEL(TIGERLAKE, NO_MMIO), -+ VULNWL_INTEL(TIGERLAKE_L, NO_MMIO), -+ VULNWL_INTEL(ALDERLAKE, NO_MMIO), -+ VULNWL_INTEL(ALDERLAKE_L, NO_MMIO), -+ - VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), -@@ -1065,9 +1079,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { - VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), - VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), - -- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), -- VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), -- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), -+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), -+ VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), -+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), - - /* - * Technically, swapgs isn't serializing on AMD (despite it previously -@@ -1077,42 +1091,89 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { - * good enough for our purposes. - */ - -- VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT), -+ VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB), -+ VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB), -+ VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), - - /* AMD Family 0xf - 0x12 */ -- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), -- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), -- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), -- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), -+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), -+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), -+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), -+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), - - /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ -- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), -- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), -+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), -+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), - - /* Zhaoxin Family 7 */ -- VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), -- VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), -+ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), -+ VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), - {} - }; - -+#define VULNBL(vendor, family, model, blacklist) \ -+ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist) -+ - #define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ - X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ - INTEL_FAM6_##model, steppings, \ - X86_FEATURE_ANY, issues) - -+#define VULNBL_AMD(family, blacklist) \ -+ VULNBL(AMD, family, X86_MODEL_ANY, blacklist) -+ -+#define VULNBL_HYGON(family, blacklist) \ -+ VULNBL(HYGON, family, X86_MODEL_ANY, blacklist) -+ - #define SRBDS BIT(0) -+/* CPU is affected by X86_BUG_MMIO_STALE_DATA */ -+#define MMIO BIT(1) -+/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ -+#define MMIO_SBDS BIT(2) -+/* CPU is affected by RETbleed, speculating where you would not expect it */ -+#define RETBLEED BIT(3) -+/* CPU is affected by SMT (cross-thread) return predictions */ -+#define SMT_RSB BIT(4) -+/* CPU is affected by SRSO */ -+#define SRSO BIT(5) -+/* CPU is affected by GDS */ -+#define GDS BIT(6) - - static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { - VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), -+ VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO), -+ VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO), - VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), -+ VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), - VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), -- VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), -- VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), -- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), -- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), -+ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), -+ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), -+ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), -+ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), -+ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), -+ VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), -+ VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), -+ VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), -+ VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), -+ VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), -+ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), -+ VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), -+ VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), -+ VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), -+ VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), -+ VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), -+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS), -+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), -+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS), -+ -+ VULNBL_AMD(0x15, RETBLEED), -+ VULNBL_AMD(0x16, RETBLEED), -+ VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), -+ VULNBL_HYGON(0x18, RETBLEED | SMT_RSB), -+ VULNBL_AMD(0x19, SRSO), - {} - }; - -@@ -1133,6 +1194,13 @@ u64 x86_read_arch_cap_msr(void) - return ia32_cap; - } - -+static bool arch_cap_mmio_immune(u64 ia32_cap) -+{ -+ return (ia32_cap & ARCH_CAP_FBSDP_NO && -+ ia32_cap & ARCH_CAP_PSDP_NO && -+ ia32_cap & ARCH_CAP_SBDR_SSDP_NO); -+} -+ - static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) - { - u64 ia32_cap = x86_read_arch_cap_msr(); -@@ -1186,12 +1254,61 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) - /* - * SRBDS affects CPUs which support RDRAND or RDSEED and are listed - * in the vulnerability blacklist. -+ * -+ * Some of the implications and mitigation of Shared Buffers Data -+ * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as -+ * SRBDS. - */ - if ((cpu_has(c, X86_FEATURE_RDRAND) || - cpu_has(c, X86_FEATURE_RDSEED)) && -- cpu_matches(cpu_vuln_blacklist, SRBDS)) -+ cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) - setup_force_cpu_bug(X86_BUG_SRBDS); - -+ /* -+ * Processor MMIO Stale Data bug enumeration -+ * -+ * Affected CPU list is generally enough to enumerate the vulnerability, -+ * but for virtualization case check for ARCH_CAP MSR bits also, VMM may -+ * not want the guest to enumerate the bug. -+ * -+ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, -+ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. -+ */ -+ if (!arch_cap_mmio_immune(ia32_cap)) { -+ if (cpu_matches(cpu_vuln_blacklist, MMIO)) -+ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); -+ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) -+ setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN); -+ } -+ -+ if (!cpu_has(c, X86_FEATURE_BTC_NO)) { -+ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)) -+ setup_force_cpu_bug(X86_BUG_RETBLEED); -+ } -+ -+ if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) && -+ !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && -+ !(ia32_cap & ARCH_CAP_PBRSB_NO)) -+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); -+ -+ if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) -+ setup_force_cpu_bug(X86_BUG_SMT_RSB); -+ -+ /* -+ * Check if CPU is vulnerable to GDS. If running in a virtual machine on -+ * an affected processor, the VMM may have disabled the use of GATHER by -+ * disabling AVX2. The only way to do this in HW is to clear XCR0[2], -+ * which means that AVX will be disabled. -+ */ -+ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && -+ boot_cpu_has(X86_FEATURE_AVX)) -+ setup_force_cpu_bug(X86_BUG_GDS); -+ -+ if (!cpu_has(c, X86_FEATURE_SRSO_NO)) { -+ if (cpu_matches(cpu_vuln_blacklist, SRSO)) -+ setup_force_cpu_bug(X86_BUG_SRSO); -+ } -+ - if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) - return; - -@@ -1333,10 +1450,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) - - sld_setup(c); - -- fpu__init_system(c); -- -- init_sigframe_size(); -- - #ifdef CONFIG_X86_32 - /* - * Regardless of whether PCID is enumerated, the SDM says -@@ -1396,9 +1509,8 @@ void __init early_cpu_init(void) - early_identify_cpu(&boot_cpu_data); - } - --static void detect_null_seg_behavior(struct cpuinfo_x86 *c) -+static bool detect_null_seg_behavior(void) - { --#ifdef CONFIG_X86_64 - /* - * Empirically, writing zero to a segment selector on AMD does - * not clear the base, whereas writing zero to a segment -@@ -1419,10 +1531,43 @@ static void detect_null_seg_behavior(struct cpuinfo_x86 *c) - wrmsrl(MSR_FS_BASE, 1); - loadsegment(fs, 0); - rdmsrl(MSR_FS_BASE, tmp); -- if (tmp != 0) -- set_cpu_bug(c, X86_BUG_NULL_SEG); - wrmsrl(MSR_FS_BASE, old_base); --#endif -+ return tmp == 0; -+} -+ -+void check_null_seg_clears_base(struct cpuinfo_x86 *c) -+{ -+ /* BUG_NULL_SEG is only relevant with 64bit userspace */ -+ if (!IS_ENABLED(CONFIG_X86_64)) -+ return; -+ -+ /* Zen3 CPUs advertise Null Selector Clears Base in CPUID. */ -+ if (c->extended_cpuid_level >= 0x80000021 && -+ cpuid_eax(0x80000021) & BIT(6)) -+ return; -+ -+ /* -+ * CPUID bit above wasn't set. If this kernel is still running -+ * as a HV guest, then the HV has decided not to advertize -+ * that CPUID bit for whatever reason. For example, one -+ * member of the migration pool might be vulnerable. Which -+ * means, the bug is present: set the BUG flag and return. -+ */ -+ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) { -+ set_cpu_bug(c, X86_BUG_NULL_SEG); -+ return; -+ } -+ -+ /* -+ * Zen2 CPUs also have this behaviour, but no CPUID bit. -+ * 0x18 is the respective family for Hygon. -+ */ -+ if ((c->x86 == 0x17 || c->x86 == 0x18) && -+ detect_null_seg_behavior()) -+ return; -+ -+ /* All the remaining ones are affected */ -+ set_cpu_bug(c, X86_BUG_NULL_SEG); - } - - static void generic_identify(struct cpuinfo_x86 *c) -@@ -1458,8 +1603,6 @@ static void generic_identify(struct cpuinfo_x86 *c) - - get_model_name(c); /* Default name */ - -- detect_null_seg_behavior(c); -- - /* - * ESPFIX is a strange bug. All real CPUs have it. Paravirt - * systems that run Linux at CPL > 0 may or may not have the -@@ -1684,6 +1827,10 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) - validate_apic_and_package_id(c); - x86_spec_ctrl_setup_ap(); - update_srbds_msr(); -+ if (boot_cpu_has_bug(X86_BUG_GDS)) -+ update_gds_msr(); -+ -+ tsx_ap_init(); - } - - static __init int setup_noclflush(char *arg) -@@ -2009,8 +2156,6 @@ void cpu_init(void) - - doublefault_init_cpu_tss(); - -- fpu__init_cpu(); -- - if (is_uv_system()) - uv_cpu_init(); - -@@ -2026,38 +2171,58 @@ void cpu_init_secondary(void) - */ - cpu_init_exception_handling(); - cpu_init(); -+ fpu__init_cpu(); - } - #endif - --/* -+#ifdef CONFIG_MICROCODE_LATE_LOADING -+/** -+ * store_cpu_caps() - Store a snapshot of CPU capabilities -+ * @curr_info: Pointer where to store it -+ * -+ * Returns: None -+ */ -+void store_cpu_caps(struct cpuinfo_x86 *curr_info) -+{ -+ /* Reload CPUID max function as it might've changed. */ -+ curr_info->cpuid_level = cpuid_eax(0); -+ -+ /* Copy all capability leafs and pick up the synthetic ones. */ -+ memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability, -+ sizeof(curr_info->x86_capability)); -+ -+ /* Get the hardware CPUID leafs */ -+ get_cpu_cap(curr_info); -+} -+ -+/** -+ * microcode_check() - Check if any CPU capabilities changed after an update. -+ * @prev_info: CPU capabilities stored before an update. -+ * - * The microcode loader calls this upon late microcode load to recheck features, - * only when microcode has been updated. Caller holds microcode_mutex and CPU - * hotplug lock. -+ * -+ * Return: None - */ --void microcode_check(void) -+void microcode_check(struct cpuinfo_x86 *prev_info) - { -- struct cpuinfo_x86 info; -+ struct cpuinfo_x86 curr_info; - - perf_check_microcode(); - -- /* Reload CPUID max function as it might've changed. */ -- info.cpuid_level = cpuid_eax(0); -- -- /* -- * Copy all capability leafs to pick up the synthetic ones so that -- * memcmp() below doesn't fail on that. The ones coming from CPUID will -- * get overwritten in get_cpu_cap(). -- */ -- memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); -+ amd_check_microcode(); - -- get_cpu_cap(&info); -+ store_cpu_caps(&curr_info); - -- if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) -+ if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, -+ sizeof(prev_info->x86_capability))) - return; - - pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); - pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); - } -+#endif - - /* - * Invoked from core CPU hotplug code after hotplug operations -@@ -2069,3 +2234,69 @@ void arch_smt_update(void) - /* Check whether IPI broadcasting can be enabled */ - apic_smt_update(); - } -+ -+void __init arch_cpu_finalize_init(void) -+{ -+ identify_boot_cpu(); -+ -+ /* -+ * identify_boot_cpu() initialized SMT support information, let the -+ * core code know. -+ */ -+ cpu_smt_check_topology(); -+ -+ if (!IS_ENABLED(CONFIG_SMP)) { -+ pr_info("CPU: "); -+ print_cpu_info(&boot_cpu_data); -+ } -+ -+ cpu_select_mitigations(); -+ -+ arch_smt_update(); -+ -+ if (IS_ENABLED(CONFIG_X86_32)) { -+ /* -+ * Check whether this is a real i386 which is not longer -+ * supported and fixup the utsname. -+ */ -+ if (boot_cpu_data.x86 < 4) -+ panic("Kernel requires i486+ for 'invlpg' and other features"); -+ -+ init_utsname()->machine[1] = -+ '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); -+ } -+ -+ /* -+ * Must be before alternatives because it might set or clear -+ * feature bits. -+ */ -+ fpu__init_system(); -+ fpu__init_cpu(); -+ -+ alternative_instructions(); -+ -+ if (IS_ENABLED(CONFIG_X86_64)) { -+ /* -+ * Make sure the first 2MB area is not mapped by huge pages -+ * There are typically fixed size MTRRs in there and overlapping -+ * MTRRs into large pages causes slow downs. -+ * -+ * Right now we don't do that with gbpages because there seems -+ * very little benefit for that case. -+ */ -+ if (!direct_gbpages) -+ set_memory_4k((unsigned long)__va(0), 1); -+ } else { -+ fpu__init_check_bugs(); -+ } -+ -+ /* -+ * This needs to be called before any devices perform DMA -+ * operations that might use the SWIOTLB bounce buffers. It will -+ * mark the bounce buffers as decrypted so that their usage will -+ * not cause "plain-text" data to be decrypted when accessed. It -+ * must be called after late_time_init() so that Hyper-V x86/x64 -+ * hypercalls work when the SWIOTLB bounce buffers are decrypted. -+ */ -+ mem_encrypt_init(); -+} -diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h -index 95521302630d4..d9aeb335002dd 100644 ---- a/arch/x86/kernel/cpu/cpu.h -+++ b/arch/x86/kernel/cpu/cpu.h -@@ -55,13 +55,14 @@ enum tsx_ctrl_states { - extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state; - - extern void __init tsx_init(void); --extern void tsx_enable(void); --extern void tsx_disable(void); --extern void tsx_clear_cpuid(void); -+void tsx_ap_init(void); - #else - static inline void tsx_init(void) { } -+static inline void tsx_ap_init(void) { } - #endif /* CONFIG_CPU_SUP_INTEL */ - -+extern void init_spectral_chicken(struct cpuinfo_x86 *c); -+ - extern void get_cpu_cap(struct cpuinfo_x86 *c); - extern void get_cpu_address_sizes(struct cpuinfo_x86 *c); - extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); -@@ -75,11 +76,14 @@ extern int detect_extended_topology_early(struct cpuinfo_x86 *c); - extern int detect_extended_topology(struct cpuinfo_x86 *c); - extern int detect_ht_early(struct cpuinfo_x86 *c); - extern void detect_ht(struct cpuinfo_x86 *c); -+extern void check_null_seg_clears_base(struct cpuinfo_x86 *c); - - unsigned int aperfmperf_get_khz(int cpu); -+void cpu_select_mitigations(void); - - extern void x86_spec_ctrl_setup_ap(void); - extern void update_srbds_msr(void); -+extern void update_gds_msr(void); - - extern u64 x86_read_arch_cap_msr(void); - -diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c -index da696eb4821a0..e77032c5f85cc 100644 ---- a/arch/x86/kernel/cpu/feat_ctl.c -+++ b/arch/x86/kernel/cpu/feat_ctl.c -@@ -1,11 +1,11 @@ - // SPDX-License-Identifier: GPL-2.0 - #include - -+#include - #include - #include - #include - #include --#include "cpu.h" - - #undef pr_fmt - #define pr_fmt(fmt) "x86/cpu: " fmt -diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c -index 6d50136f7ab98..c393b8773ace6 100644 ---- a/arch/x86/kernel/cpu/hygon.c -+++ b/arch/x86/kernel/cpu/hygon.c -@@ -302,6 +302,12 @@ static void init_hygon(struct cpuinfo_x86 *c) - /* get apicid instead of initial apic id from cpuid */ - c->apicid = hard_smp_processor_id(); - -+ /* -+ * XXX someone from Hygon needs to confirm this DTRT -+ * -+ init_spectral_chicken(c); -+ */ -+ - set_cpu_cap(c, X86_FEATURE_ZEN); - set_cpu_cap(c, X86_FEATURE_CPB); - -@@ -320,8 +326,8 @@ static void init_hygon(struct cpuinfo_x86 *c) - * msr_set_bit() uses the safe accessors, too, even if the MSR - * is not present. - */ -- msr_set_bit(MSR_F10H_DECFG, -- MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); -+ msr_set_bit(MSR_AMD64_DE_CFG, -+ MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); - - /* A serializing LFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); -@@ -335,6 +341,8 @@ static void init_hygon(struct cpuinfo_x86 *c) - /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */ - if (!cpu_has(c, X86_FEATURE_XENPV)) - set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); -+ -+ check_null_seg_clears_base(c); - } - - static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) -diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c -index 8321c43554a1d..ae7d4c85f4f43 100644 ---- a/arch/x86/kernel/cpu/intel.c -+++ b/arch/x86/kernel/cpu/intel.c -@@ -91,7 +91,7 @@ static bool ring3mwait_disabled __read_mostly; - static int __init ring3mwait_disable(char *__unused) - { - ring3mwait_disabled = true; -- return 0; -+ return 1; - } - __setup("ring3mwait=disable", ring3mwait_disable); - -@@ -717,13 +717,6 @@ static void init_intel(struct cpuinfo_x86 *c) - - init_intel_misc_features(c); - -- if (tsx_ctrl_state == TSX_CTRL_ENABLE) -- tsx_enable(); -- else if (tsx_ctrl_state == TSX_CTRL_DISABLE) -- tsx_disable(); -- else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT) -- tsx_clear_cpuid(); -- - split_lock_init(); - bus_lock_init(); - -@@ -1152,22 +1145,23 @@ static void bus_lock_init(void) - { - u64 val; - -- /* -- * Warn and fatal are handled by #AC for split lock if #AC for -- * split lock is supported. -- */ -- if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) || -- (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && -- (sld_state == sld_warn || sld_state == sld_fatal)) || -- sld_state == sld_off) -+ if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) - return; - -- /* -- * Enable #DB for bus lock. All bus locks are handled in #DB except -- * split locks are handled in #AC in the fatal case. -- */ - rdmsrl(MSR_IA32_DEBUGCTLMSR, val); -- val |= DEBUGCTLMSR_BUS_LOCK_DETECT; -+ -+ if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && -+ (sld_state == sld_warn || sld_state == sld_fatal)) || -+ sld_state == sld_off) { -+ /* -+ * Warn and fatal are handled by #AC for split lock if #AC for -+ * split lock is supported. -+ */ -+ val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; -+ } else { -+ val |= DEBUGCTLMSR_BUS_LOCK_DETECT; -+ } -+ - wrmsrl(MSR_IA32_DEBUGCTLMSR, val); - } - -diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c -index 08831acc1d036..d4e75be64a4c5 100644 ---- a/arch/x86/kernel/cpu/mce/amd.c -+++ b/arch/x86/kernel/cpu/mce/amd.c -@@ -210,10 +210,10 @@ static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); - * A list of the banks enabled on each logical CPU. Controls which respective - * descriptors to initialize later in mce_threshold_create_device(). - */ --static DEFINE_PER_CPU(unsigned int, bank_map); -+static DEFINE_PER_CPU(u64, bank_map); - - /* Map of banks that have more than MCA_MISC0 available. */ --static DEFINE_PER_CPU(u32, smca_misc_banks_map); -+static DEFINE_PER_CPU(u64, smca_misc_banks_map); - - static void amd_threshold_interrupt(void); - static void amd_deferred_error_interrupt(void); -@@ -242,7 +242,7 @@ static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu) - return; - - if (low & MASK_BLKPTR_LO) -- per_cpu(smca_misc_banks_map, cpu) |= BIT(bank); -+ per_cpu(smca_misc_banks_map, cpu) |= BIT_ULL(bank); - - } - -@@ -400,7 +400,7 @@ static void threshold_restart_bank(void *_tr) - u32 hi, lo; - - /* sysfs write might race against an offline operation */ -- if (this_cpu_read(threshold_banks)) -+ if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off) - return; - - rdmsr(tr->b->address, lo, hi); -@@ -505,7 +505,7 @@ static u32 smca_get_block_address(unsigned int bank, unsigned int block, - if (!block) - return MSR_AMD64_SMCA_MCx_MISC(bank); - -- if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank))) -+ if (!(per_cpu(smca_misc_banks_map, cpu) & BIT_ULL(bank))) - return 0; - - return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); -@@ -526,7 +526,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high, - /* Fall back to method we used for older processors: */ - switch (block) { - case 0: -- addr = msr_ops.misc(bank); -+ addr = mca_msr_reg(bank, MCA_MISC); - break; - case 1: - offset = ((low & MASK_BLKPTR_LO) >> 21); -@@ -549,7 +549,7 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, - int new; - - if (!block) -- per_cpu(bank_map, cpu) |= (1 << bank); -+ per_cpu(bank_map, cpu) |= BIT_ULL(bank); - - memset(&b, 0, sizeof(b)); - b.cpu = cpu; -@@ -965,6 +965,24 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) - return status & MCI_STATUS_DEFERRED; - } - -+static bool _log_error_deferred(unsigned int bank, u32 misc) -+{ -+ if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), -+ mca_msr_reg(bank, MCA_ADDR), misc)) -+ return false; -+ -+ /* -+ * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers. -+ * Return true here to avoid accessing these registers. -+ */ -+ if (!mce_flags.smca) -+ return true; -+ -+ /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ -+ wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); -+ return true; -+} -+ - /* - * We have three scenarios for checking for Deferred errors: - * -@@ -976,20 +994,9 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) - */ - static void log_error_deferred(unsigned int bank) - { -- bool defrd; -- -- defrd = _log_error_bank(bank, msr_ops.status(bank), -- msr_ops.addr(bank), 0); -- -- if (!mce_flags.smca) -+ if (_log_error_deferred(bank, 0)) - return; - -- /* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */ -- if (defrd) { -- wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); -- return; -- } -- - /* - * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check - * for a valid error. -@@ -1009,7 +1016,7 @@ static void amd_deferred_error_interrupt(void) - - static void log_error_thresholding(unsigned int bank, u64 misc) - { -- _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc); -+ _log_error_deferred(bank, misc); - } - - static void log_and_reset_block(struct threshold_block *block) -@@ -1054,7 +1061,7 @@ static void amd_threshold_interrupt(void) - return; - - for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { -- if (!(per_cpu(bank_map, cpu) & (1 << bank))) -+ if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank))) - continue; - - first_block = bp[bank]->blocks; -@@ -1397,7 +1404,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu, - } - } - -- err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); -+ err = allocate_threshold_blocks(cpu, b, bank, 0, mca_msr_reg(bank, MCA_MISC)); - if (err) - goto out_kobj; - -@@ -1470,10 +1477,23 @@ out_free: - kfree(bank); - } - -+static void __threshold_remove_device(struct threshold_bank **bp) -+{ -+ unsigned int bank, numbanks = this_cpu_read(mce_num_banks); -+ -+ for (bank = 0; bank < numbanks; bank++) { -+ if (!bp[bank]) -+ continue; -+ -+ threshold_remove_bank(bp[bank]); -+ bp[bank] = NULL; -+ } -+ kfree(bp); -+} -+ - int mce_threshold_remove_device(unsigned int cpu) - { - struct threshold_bank **bp = this_cpu_read(threshold_banks); -- unsigned int bank, numbanks = this_cpu_read(mce_num_banks); - - if (!bp) - return 0; -@@ -1484,13 +1504,7 @@ int mce_threshold_remove_device(unsigned int cpu) - */ - this_cpu_write(threshold_banks, NULL); - -- for (bank = 0; bank < numbanks; bank++) { -- if (bp[bank]) { -- threshold_remove_bank(bp[bank]); -- bp[bank] = NULL; -- } -- } -- kfree(bp); -+ __threshold_remove_device(bp); - return 0; - } - -@@ -1524,18 +1538,17 @@ int mce_threshold_create_device(unsigned int cpu) - return -ENOMEM; - - for (bank = 0; bank < numbanks; ++bank) { -- if (!(this_cpu_read(bank_map) & (1 << bank))) -+ if (!(this_cpu_read(bank_map) & BIT_ULL(bank))) - continue; - err = threshold_create_bank(bp, cpu, bank); -- if (err) -- goto out_err; -+ if (err) { -+ __threshold_remove_device(bp); -+ return err; -+ } - } - this_cpu_write(threshold_banks, bp); - - if (thresholding_irq_en) - mce_threshold_vector = amd_threshold_interrupt; - return 0; --out_err: -- mce_threshold_remove_device(cpu); -- return err; - } -diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c -index 0e3ae64d3b76b..b08b90cdc2a3e 100644 ---- a/arch/x86/kernel/cpu/mce/apei.c -+++ b/arch/x86/kernel/cpu/mce/apei.c -@@ -29,15 +29,26 @@ - void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) - { - struct mce m; -+ int lsb; - - if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) - return; - -+ /* -+ * Even if the ->validation_bits are set for address mask, -+ * to be extra safe, check and reject an error radius '0', -+ * and fall back to the default page size. -+ */ -+ if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK) -+ lsb = find_first_bit((void *)&mem_err->physical_addr_mask, PAGE_SHIFT); -+ else -+ lsb = PAGE_SHIFT; -+ - mce_setup(&m); - m.bank = -1; - /* Fake a memory read error with unknown channel */ - m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f; -- m.misc = (MCI_MISC_ADDR_PHYS << 6) | PAGE_SHIFT; -+ m.misc = (MCI_MISC_ADDR_PHYS << 6) | lsb; - - if (severity >= GHES_SEV_RECOVERABLE) - m.status |= MCI_STATUS_UC; -diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c -index 193204aee8801..a0727723676b4 100644 ---- a/arch/x86/kernel/cpu/mce/core.c -+++ b/arch/x86/kernel/cpu/mce/core.c -@@ -176,53 +176,27 @@ void mce_unregister_decode_chain(struct notifier_block *nb) - } - EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); - --static inline u32 ctl_reg(int bank) -+u32 mca_msr_reg(int bank, enum mca_msr reg) - { -- return MSR_IA32_MCx_CTL(bank); --} -- --static inline u32 status_reg(int bank) --{ -- return MSR_IA32_MCx_STATUS(bank); --} -- --static inline u32 addr_reg(int bank) --{ -- return MSR_IA32_MCx_ADDR(bank); --} -- --static inline u32 misc_reg(int bank) --{ -- return MSR_IA32_MCx_MISC(bank); --} -- --static inline u32 smca_ctl_reg(int bank) --{ -- return MSR_AMD64_SMCA_MCx_CTL(bank); --} -- --static inline u32 smca_status_reg(int bank) --{ -- return MSR_AMD64_SMCA_MCx_STATUS(bank); --} -+ if (mce_flags.smca) { -+ switch (reg) { -+ case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank); -+ case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank); -+ case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank); -+ case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank); -+ } -+ } - --static inline u32 smca_addr_reg(int bank) --{ -- return MSR_AMD64_SMCA_MCx_ADDR(bank); --} -+ switch (reg) { -+ case MCA_CTL: return MSR_IA32_MCx_CTL(bank); -+ case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank); -+ case MCA_MISC: return MSR_IA32_MCx_MISC(bank); -+ case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank); -+ } - --static inline u32 smca_misc_reg(int bank) --{ -- return MSR_AMD64_SMCA_MCx_MISC(bank); -+ return 0; - } - --struct mca_msr_regs msr_ops = { -- .ctl = ctl_reg, -- .status = status_reg, -- .addr = addr_reg, -- .misc = misc_reg --}; -- - static void __print_mce(struct mce *m) - { - pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", -@@ -295,11 +269,17 @@ static void wait_for_panic(void) - panic("Panicing machine check CPU died"); - } - --static void mce_panic(const char *msg, struct mce *final, char *exp) -+static noinstr void mce_panic(const char *msg, struct mce *final, char *exp) - { -- int apei_err = 0; - struct llist_node *pending; - struct mce_evt_llist *l; -+ int apei_err = 0; -+ -+ /* -+ * Allow instrumentation around external facilities usage. Not that it -+ * matters a whole lot since the machine is going to panic anyway. -+ */ -+ instrumentation_begin(); - - if (!fake_panic) { - /* -@@ -314,7 +294,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp) - } else { - /* Don't log too much for fake panic */ - if (atomic_inc_return(&mce_fake_panicked) > 1) -- return; -+ goto out; - } - pending = mce_gen_pool_prepare_records(); - /* First print corrected ones that are still unlogged */ -@@ -352,6 +332,9 @@ static void mce_panic(const char *msg, struct mce *final, char *exp) - panic(msg); - } else - pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); -+ -+out: -+ instrumentation_end(); - } - - /* Support code for software error injection */ -@@ -362,24 +345,27 @@ static int msr_to_offset(u32 msr) - - if (msr == mca_cfg.rip_msr) - return offsetof(struct mce, ip); -- if (msr == msr_ops.status(bank)) -+ if (msr == mca_msr_reg(bank, MCA_STATUS)) - return offsetof(struct mce, status); -- if (msr == msr_ops.addr(bank)) -+ if (msr == mca_msr_reg(bank, MCA_ADDR)) - return offsetof(struct mce, addr); -- if (msr == msr_ops.misc(bank)) -+ if (msr == mca_msr_reg(bank, MCA_MISC)) - return offsetof(struct mce, misc); - if (msr == MSR_IA32_MCG_STATUS) - return offsetof(struct mce, mcgstatus); - return -1; - } - --__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) -+void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr) - { -- pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", -- (unsigned int)regs->cx, regs->ip, (void *)regs->ip); -+ if (wrmsr) { -+ pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", -+ (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax, -+ regs->ip, (void *)regs->ip); -+ } else { -+ pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", -+ (unsigned int)regs->cx, regs->ip, (void *)regs->ip); -+ } - - show_stack_regs(regs); - -@@ -387,8 +373,6 @@ __visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, - - while (true) - cpu_relax(); -- -- return true; - } - - /* MSR access wrappers used for error injection */ -@@ -420,32 +404,13 @@ static noinstr u64 mce_rdmsrl(u32 msr) - */ - asm volatile("1: rdmsr\n" - "2:\n" -- _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault) -+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR_IN_MCE) - : EAX_EDX_RET(val, low, high) : "c" (msr)); - - - return EAX_EDX_VAL(val, low, high); - } - --__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) --{ -- pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", -- (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax, -- regs->ip, (void *)regs->ip); -- -- show_stack_regs(regs); -- -- panic("MCA architectural violation!\n"); -- -- while (true) -- cpu_relax(); -- -- return true; --} -- - static noinstr void mce_wrmsrl(u32 msr, u64 v) - { - u32 low, high; -@@ -470,7 +435,7 @@ static noinstr void mce_wrmsrl(u32 msr, u64 v) - /* See comment in mce_rdmsrl() */ - asm volatile("1: wrmsr\n" - "2:\n" -- _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault) -+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE) - : : "c" (msr), "a"(low), "d" (high) : "memory"); - } - -@@ -682,13 +647,13 @@ static struct notifier_block mce_default_nb = { - /* - * Read ADDR and MISC registers. - */ --static void mce_read_aux(struct mce *m, int i) -+static noinstr void mce_read_aux(struct mce *m, int i) - { - if (m->status & MCI_STATUS_MISCV) -- m->misc = mce_rdmsrl(msr_ops.misc(i)); -+ m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC)); - - if (m->status & MCI_STATUS_ADDRV) { -- m->addr = mce_rdmsrl(msr_ops.addr(i)); -+ m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR)); - - /* - * Mask the reported address by the reported granularity. -@@ -758,7 +723,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) - m.bank = i; - - barrier(); -- m.status = mce_rdmsrl(msr_ops.status(i)); -+ m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); - - /* If this entry is not valid, ignore it */ - if (!(m.status & MCI_STATUS_VAL)) -@@ -826,7 +791,7 @@ clear_it: - /* - * Clear state for this bank. - */ -- mce_wrmsrl(msr_ops.status(i), 0); -+ mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); - } - - /* -@@ -851,7 +816,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, - int i; - - for (i = 0; i < this_cpu_read(mce_num_banks); i++) { -- m->status = mce_rdmsrl(msr_ops.status(i)); -+ m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); - if (!(m->status & MCI_STATUS_VAL)) - continue; - -@@ -1072,10 +1037,13 @@ static int mce_start(int *no_way_out) - * Synchronize between CPUs after main scanning loop. - * This invokes the bulk of the Monarch processing. - */ --static int mce_end(int order) -+static noinstr int mce_end(int order) - { -- int ret = -1; - u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; -+ int ret = -1; -+ -+ /* Allow instrumentation around external facilities. */ -+ instrumentation_begin(); - - if (!timeout) - goto reset; -@@ -1119,7 +1087,8 @@ static int mce_end(int order) - /* - * Don't reset anything. That's done by the Monarch. - */ -- return 0; -+ ret = 0; -+ goto out; - } - - /* -@@ -1135,6 +1104,10 @@ reset: - * Let others run again. - */ - atomic_set(&mce_executing, 0); -+ -+out: -+ instrumentation_end(); -+ - return ret; - } - -@@ -1144,7 +1117,7 @@ static void mce_clear_state(unsigned long *toclear) - - for (i = 0; i < this_cpu_read(mce_num_banks); i++) { - if (test_bit(i, toclear)) -- mce_wrmsrl(msr_ops.status(i), 0); -+ mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); - } - } - -@@ -1203,7 +1176,7 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin - m->addr = 0; - m->bank = i; - -- m->status = mce_rdmsrl(msr_ops.status(i)); -+ m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); - if (!(m->status & MCI_STATUS_VAL)) - continue; - -@@ -1280,10 +1253,12 @@ static void kill_me_maybe(struct callback_head *cb) - - /* - * -EHWPOISON from memory_failure() means that it already sent SIGBUS -- * to the current process with the proper error info, so no need to -- * send SIGBUS here again. -+ * to the current process with the proper error info, -+ * -EOPNOTSUPP means hwpoison_filter() filtered the error event, -+ * -+ * In both cases, no further processing is required. - */ -- if (ret == -EHWPOISON) -+ if (ret == -EHWPOISON || ret == -EOPNOTSUPP) - return; - - if (p->mce_vaddr != (void __user *)-1l) { -@@ -1454,6 +1429,14 @@ noinstr void do_machine_check(struct pt_regs *regs) - if (worst != MCE_AR_SEVERITY && !kill_current_task) - goto out; - -+ /* -+ * Enable instrumentation around the external facilities like -+ * task_work_add() (via queue_task_work()), fixup_exception() etc. -+ * For now, that is. Fixing this properly would need a lot more involved -+ * reorganization. -+ */ -+ instrumentation_begin(); -+ - /* Fault was in user mode and we need to take some action */ - if ((m.cs & 3) == 3) { - /* If this triggers there is no way to recover. Die hard. */ -@@ -1479,6 +1462,9 @@ noinstr void do_machine_check(struct pt_regs *regs) - if (m.kflags & MCE_IN_KERNEL_COPYIN) - queue_task_work(&m, msg, kill_current_task); - } -+ -+ instrumentation_end(); -+ - out: - mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); - } -@@ -1687,8 +1673,8 @@ static void __mcheck_cpu_init_clear_banks(void) - - if (!b->init) - continue; -- wrmsrl(msr_ops.ctl(i), b->ctl); -- wrmsrl(msr_ops.status(i), 0); -+ wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); -+ wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); - } - } - -@@ -1714,7 +1700,7 @@ static void __mcheck_cpu_check_banks(void) - if (!b->init) - continue; - -- rdmsrl(msr_ops.ctl(i), msrval); -+ rdmsrl(mca_msr_reg(i, MCA_CTL), msrval); - b->init = !!msrval; - } - } -@@ -1871,13 +1857,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) - mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); - mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); - mce_flags.amd_threshold = 1; -- -- if (mce_flags.smca) { -- msr_ops.ctl = smca_ctl_reg; -- msr_ops.status = smca_status_reg; -- msr_ops.addr = smca_addr_reg; -- msr_ops.misc = smca_misc_reg; -- } - } - } - -@@ -2253,7 +2232,7 @@ static void mce_disable_error_reporting(void) - struct mce_bank *b = &mce_banks[i]; - - if (b->init) -- wrmsrl(msr_ops.ctl(i), 0); -+ wrmsrl(mca_msr_reg(i, MCA_CTL), 0); - } - return; - } -@@ -2323,6 +2302,7 @@ static void mce_restart(void) - { - mce_timer_delete_all(); - on_each_cpu(mce_cpu_restart, NULL, 1); -+ mce_schedule_work(); - } - - /* Toggle features for corrected errors */ -@@ -2605,7 +2585,7 @@ static void mce_reenable_cpu(void) - struct mce_bank *b = &mce_banks[i]; - - if (b->init) -- wrmsrl(msr_ops.ctl(i), b->ctl); -+ wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); - } - } - -diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c -index 0bfc14041bbb4..b63b548497c14 100644 ---- a/arch/x86/kernel/cpu/mce/inject.c -+++ b/arch/x86/kernel/cpu/mce/inject.c -@@ -350,7 +350,7 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf, - char buf[MAX_FLAG_OPT_SIZE], *__buf; - int err; - -- if (cnt > MAX_FLAG_OPT_SIZE) -+ if (!cnt || cnt > MAX_FLAG_OPT_SIZE) - return -EINVAL; - - if (copy_from_user(&buf, ubuf, cnt)) -diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c -index acfd5d9f93c68..baafbb37be678 100644 ---- a/arch/x86/kernel/cpu/mce/intel.c -+++ b/arch/x86/kernel/cpu/mce/intel.c -@@ -486,6 +486,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c) - case INTEL_FAM6_BROADWELL_X: - case INTEL_FAM6_SKYLAKE_X: - case INTEL_FAM6_ICELAKE_X: -+ case INTEL_FAM6_ICELAKE_D: - case INTEL_FAM6_SAPPHIRERAPIDS_X: - case INTEL_FAM6_XEON_PHI_KNL: - case INTEL_FAM6_XEON_PHI_KNM: -@@ -547,12 +548,13 @@ bool intel_filter_mce(struct mce *m) - { - struct cpuinfo_x86 *c = &boot_cpu_data; - -- /* MCE errata HSD131, HSM142, HSW131, BDM48, and HSM142 */ -+ /* MCE errata HSD131, HSM142, HSW131, BDM48, HSM142 and SKX37 */ - if ((c->x86 == 6) && - ((c->x86_model == INTEL_FAM6_HASWELL) || - (c->x86_model == INTEL_FAM6_HASWELL_L) || - (c->x86_model == INTEL_FAM6_BROADWELL) || -- (c->x86_model == INTEL_FAM6_HASWELL_G)) && -+ (c->x86_model == INTEL_FAM6_HASWELL_G) || -+ (c->x86_model == INTEL_FAM6_SKYLAKE_X)) && - (m->bank == 0) && - ((m->status & 0xa0000000ffffffff) == 0x80000000000f0005)) - return true; -diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h -index 88dcc79cfb07d..760b57814760a 100644 ---- a/arch/x86/kernel/cpu/mce/internal.h -+++ b/arch/x86/kernel/cpu/mce/internal.h -@@ -168,14 +168,14 @@ struct mce_vendor_flags { - - extern struct mce_vendor_flags mce_flags; - --struct mca_msr_regs { -- u32 (*ctl) (int bank); -- u32 (*status) (int bank); -- u32 (*addr) (int bank); -- u32 (*misc) (int bank); -+enum mca_msr { -+ MCA_CTL, -+ MCA_STATUS, -+ MCA_ADDR, -+ MCA_MISC, - }; - --extern struct mca_msr_regs msr_ops; -+u32 mca_msr_reg(int bank, enum mca_msr reg); - - /* Decide whether to add MCE record to MCE event pool or filter it out. */ - extern bool filter_mce(struct mce *m); -@@ -186,14 +186,4 @@ extern bool amd_filter_mce(struct mce *m); - static inline bool amd_filter_mce(struct mce *m) { return false; }; - #endif - --__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr); -- --__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr); -- - #endif /* __X86_MCE_INTERNAL_H__ */ -diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c -index 17e6314431169..d9b77a74f8d2e 100644 ---- a/arch/x86/kernel/cpu/mce/severity.c -+++ b/arch/x86/kernel/cpu/mce/severity.c -@@ -265,25 +265,26 @@ static bool is_copy_from_user(struct pt_regs *regs) - */ - static int error_context(struct mce *m, struct pt_regs *regs) - { -- enum handler_type t; -- - if ((m->cs & 3) == 3) - return IN_USER; - if (!mc_recoverable(m->mcgstatus)) - return IN_KERNEL; - -- t = ex_get_fault_handler_type(m->ip); -- if (t == EX_HANDLER_FAULT) { -- m->kflags |= MCE_IN_KERNEL_RECOV; -- return IN_KERNEL_RECOV; -- } -- if (t == EX_HANDLER_UACCESS && regs && is_copy_from_user(regs)) { -- m->kflags |= MCE_IN_KERNEL_RECOV; -+ switch (ex_get_fixup_type(m->ip)) { -+ case EX_TYPE_UACCESS: -+ case EX_TYPE_COPY: -+ if (!regs || !is_copy_from_user(regs)) -+ return IN_KERNEL; - m->kflags |= MCE_IN_KERNEL_COPYIN; -+ fallthrough; -+ case EX_TYPE_FAULT: -+ case EX_TYPE_FAULT_MCE_SAFE: -+ case EX_TYPE_DEFAULT_MCE_SAFE: -+ m->kflags |= MCE_IN_KERNEL_RECOV; - return IN_KERNEL_RECOV; -+ default: -+ return IN_KERNEL; - } -- -- return IN_KERNEL; - } - - static int mce_severity_amd_smca(struct mce *m, enum context err_ctx) -diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c -index 3d4a48336084f..6a95a52d08daa 100644 ---- a/arch/x86/kernel/cpu/microcode/amd.c -+++ b/arch/x86/kernel/cpu/microcode/amd.c -@@ -55,7 +55,9 @@ struct cont_desc { - }; - - static u32 ucode_new_rev; --static u8 amd_ucode_patch[PATCH_MAX_SIZE]; -+ -+/* One blob per node. */ -+static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE]; - - /* - * Microcode patch container file is prepended to the initrd in cpio -@@ -428,7 +430,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p - patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); - #else - new_rev = &ucode_new_rev; -- patch = &amd_ucode_patch; -+ patch = &amd_ucode_patch[0]; - #endif - - desc.cpuid_1_eax = cpuid_1_eax; -@@ -440,7 +442,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p - return ret; - - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); -- if (rev >= mc->hdr.patch_id) -+ -+ /* -+ * Allow application of the same revision to pick up SMT-specific -+ * changes even if the revision of the other SMT thread is already -+ * up-to-date. -+ */ -+ if (rev > mc->hdr.patch_id) - return ret; - - if (!__apply_microcode_amd(mc)) { -@@ -522,8 +530,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) - - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - -- /* Check whether we have saved a new patch already: */ -- if (*new_rev && rev < mc->hdr.patch_id) { -+ /* -+ * Check whether a new patch has been saved already. Also, allow application of -+ * the same revision in order to pick up SMT-thread-specific configuration even -+ * if the sibling SMT thread already has an up-to-date revision. -+ */ -+ if (*new_rev && rev <= mc->hdr.patch_id) { - if (!__apply_microcode_amd(mc)) { - *new_rev = mc->hdr.patch_id; - return; -@@ -537,8 +549,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) - apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false); - } - --static enum ucode_state --load_microcode_amd(bool save, u8 family, const u8 *data, size_t size); -+static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); - - int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) - { -@@ -556,19 +567,19 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) - if (!desc.mc) - return -EINVAL; - -- ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); -+ ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); - if (ret > UCODE_UPDATED) - return -EINVAL; - - return 0; - } - --void reload_ucode_amd(void) -+void reload_ucode_amd(unsigned int cpu) - { -- struct microcode_amd *mc; - u32 rev, dummy __always_unused; -+ struct microcode_amd *mc; - -- mc = (struct microcode_amd *)amd_ucode_patch; -+ mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)]; - - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - -@@ -688,7 +699,7 @@ static enum ucode_state apply_microcode_amd(int cpu) - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - - /* need to apply patch? */ -- if (rev >= mc_amd->hdr.patch_id) { -+ if (rev > mc_amd->hdr.patch_id) { - ret = UCODE_OK; - goto out; - } -@@ -782,6 +793,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, - kfree(patch); - return -EINVAL; - } -+ patch->size = *patch_size; - - mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); - proc_id = mc_hdr->processor_rev_id; -@@ -833,9 +845,10 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, - return UCODE_OK; - } - --static enum ucode_state --load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) -+static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) - { -+ struct cpuinfo_x86 *c; -+ unsigned int nid, cpu; - struct ucode_patch *p; - enum ucode_state ret; - -@@ -848,22 +861,22 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) - return ret; - } - -- p = find_patch(0); -- if (!p) { -- return ret; -- } else { -- if (boot_cpu_data.microcode >= p->patch_id) -- return ret; -+ for_each_node(nid) { -+ cpu = cpumask_first(cpumask_of_node(nid)); -+ c = &cpu_data(cpu); - -- ret = UCODE_NEW; -- } -+ p = find_patch(cpu); -+ if (!p) -+ continue; - -- /* save BSP's matching patch for early load */ -- if (!save) -- return ret; -+ if (c->microcode >= p->patch_id) -+ continue; -+ -+ ret = UCODE_NEW; - -- memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); -- memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE)); -+ memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE); -+ memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE)); -+ } - - return ret; - } -@@ -889,12 +902,11 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, - { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; - struct cpuinfo_x86 *c = &cpu_data(cpu); -- bool bsp = c->cpu_index == boot_cpu_data.cpu_index; - enum ucode_state ret = UCODE_NFOUND; - const struct firmware *fw; - - /* reload ucode container only on the boot cpu */ -- if (!refresh_fw || !bsp) -+ if (!refresh_fw) - return UCODE_OK; - - if (c->x86 >= 0x15) -@@ -909,7 +921,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, - if (!verify_container(fw->data, fw->size, false)) - goto fw_release; - -- ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); -+ ret = load_microcode_amd(c->x86, fw->data, fw->size); - - fw_release: - release_firmware(fw); -diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c -index efb69be41ab18..d2f00d77e9adf 100644 ---- a/arch/x86/kernel/cpu/microcode/core.c -+++ b/arch/x86/kernel/cpu/microcode/core.c -@@ -315,7 +315,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) - #endif - } - --void reload_early_microcode(void) -+void reload_early_microcode(unsigned int cpu) - { - int vendor, family; - -@@ -329,7 +329,7 @@ void reload_early_microcode(void) - break; - case X86_VENDOR_AMD: - if (family >= 0x10) -- reload_ucode_amd(); -+ reload_ucode_amd(cpu); - break; - default: - break; -@@ -390,101 +390,10 @@ static int apply_microcode_on_target(int cpu) - return ret; - } - --#ifdef CONFIG_MICROCODE_OLD_INTERFACE --static int do_microcode_update(const void __user *buf, size_t size) --{ -- int error = 0; -- int cpu; -- -- for_each_online_cpu(cpu) { -- struct ucode_cpu_info *uci = ucode_cpu_info + cpu; -- enum ucode_state ustate; -- -- if (!uci->valid) -- continue; -- -- ustate = microcode_ops->request_microcode_user(cpu, buf, size); -- if (ustate == UCODE_ERROR) { -- error = -1; -- break; -- } else if (ustate == UCODE_NEW) { -- apply_microcode_on_target(cpu); -- } -- } -- -- return error; --} -- --static int microcode_open(struct inode *inode, struct file *file) --{ -- return capable(CAP_SYS_RAWIO) ? stream_open(inode, file) : -EPERM; --} -- --static ssize_t microcode_write(struct file *file, const char __user *buf, -- size_t len, loff_t *ppos) --{ -- ssize_t ret = -EINVAL; -- unsigned long nr_pages = totalram_pages(); -- -- if ((len >> PAGE_SHIFT) > nr_pages) { -- pr_err("too much data (max %ld pages)\n", nr_pages); -- return ret; -- } -- -- cpus_read_lock(); -- mutex_lock(µcode_mutex); -- -- if (do_microcode_update(buf, len) == 0) -- ret = (ssize_t)len; -- -- if (ret > 0) -- perf_check_microcode(); -- -- mutex_unlock(µcode_mutex); -- cpus_read_unlock(); -- -- return ret; --} -- --static const struct file_operations microcode_fops = { -- .owner = THIS_MODULE, -- .write = microcode_write, -- .open = microcode_open, -- .llseek = no_llseek, --}; -- --static struct miscdevice microcode_dev = { -- .minor = MICROCODE_MINOR, -- .name = "microcode", -- .nodename = "cpu/microcode", -- .fops = µcode_fops, --}; -- --static int __init microcode_dev_init(void) --{ -- int error; -- -- error = misc_register(µcode_dev); -- if (error) { -- pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR); -- return error; -- } -- -- return 0; --} -- --static void __exit microcode_dev_exit(void) --{ -- misc_deregister(µcode_dev); --} --#else --#define microcode_dev_init() 0 --#define microcode_dev_exit() do { } while (0) --#endif -- - /* fake device for request_firmware */ - static struct platform_device *microcode_pdev; - -+#ifdef CONFIG_MICROCODE_LATE_LOADING - /* - * Late loading dance. Why the heavy-handed stomp_machine effort? - * -@@ -599,16 +508,27 @@ wait_for_siblings: - */ - static int microcode_reload_late(void) - { -- int ret; -+ int old = boot_cpu_data.microcode, ret; -+ struct cpuinfo_x86 prev_info; - - atomic_set(&late_cpus_in, 0); - atomic_set(&late_cpus_out, 0); - -- ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); -- if (ret == 0) -- microcode_check(); -+ /* -+ * Take a snapshot before the microcode update in order to compare and -+ * check whether any bits changed after an update. -+ */ -+ store_cpu_caps(&prev_info); - -- pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode); -+ ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); -+ if (!ret) { -+ pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", -+ old, boot_cpu_data.microcode); -+ microcode_check(&prev_info); -+ } else { -+ pr_info("Reload failed, current microcode revision: 0x%x\n", -+ boot_cpu_data.microcode); -+ } - - return ret; - } -@@ -652,6 +572,9 @@ put: - return ret; - } - -+static DEVICE_ATTR_WO(reload); -+#endif -+ - static ssize_t version_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -668,7 +591,6 @@ static ssize_t pf_show(struct device *dev, - return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); - } - --static DEVICE_ATTR_WO(reload); - static DEVICE_ATTR(version, 0444, version_show, NULL); - static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL); - -@@ -775,9 +697,9 @@ static struct subsys_interface mc_cpu_interface = { - }; - - /** -- * mc_bp_resume - Update boot CPU microcode during resume. -+ * microcode_bsp_resume - Update boot CPU microcode during resume. - */ --static void mc_bp_resume(void) -+void microcode_bsp_resume(void) - { - int cpu = smp_processor_id(); - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; -@@ -785,11 +707,11 @@ static void mc_bp_resume(void) - if (uci->valid && uci->mc) - microcode_ops->apply_microcode(cpu); - else if (!uci->mc) -- reload_early_microcode(); -+ reload_early_microcode(cpu); - } - - static struct syscore_ops mc_syscore_ops = { -- .resume = mc_bp_resume, -+ .resume = microcode_bsp_resume, - }; - - static int mc_cpu_starting(unsigned int cpu) -@@ -821,7 +743,9 @@ static int mc_cpu_down_prep(unsigned int cpu) - } - - static struct attribute *cpu_root_microcode_attrs[] = { -+#ifdef CONFIG_MICROCODE_LATE_LOADING - &dev_attr_reload.attr, -+#endif - NULL - }; - -@@ -873,10 +797,6 @@ static int __init microcode_init(void) - goto out_driver; - } - -- error = microcode_dev_init(); -- if (error) -- goto out_ucode_group; -- - register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", - mc_cpu_starting, NULL); -@@ -887,10 +807,6 @@ static int __init microcode_init(void) - - return 0; - -- out_ucode_group: -- sysfs_remove_group(&cpu_subsys.dev_root->kobj, -- &cpu_root_microcode_group); -- - out_driver: - cpus_read_lock(); - mutex_lock(µcode_mutex); -diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c -index 7e8e07bddd5fe..1ba590e6ef7bb 100644 ---- a/arch/x86/kernel/cpu/microcode/intel.c -+++ b/arch/x86/kernel/cpu/microcode/intel.c -@@ -659,7 +659,6 @@ void load_ucode_intel_ap(void) - else - iup = &intel_ucode_patch; - --reget: - if (!*iup) { - patch = __load_ucode_intel(&uci); - if (!patch) -@@ -670,12 +669,7 @@ reget: - - uci.mc = *iup; - -- if (apply_microcode_early(&uci, true)) { -- /* Mixed-silicon system? Try to refetch the proper patch: */ -- *iup = NULL; -- -- goto reget; -- } -+ apply_microcode_early(&uci, true); - } - - static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) -diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c -index e095c28d27ae8..8d3c649a1769b 100644 ---- a/arch/x86/kernel/cpu/mshyperv.c -+++ b/arch/x86/kernel/cpu/mshyperv.c -@@ -79,7 +79,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0) - inc_irq_stat(hyperv_stimer0_count); - if (hv_stimer0_handler) - hv_stimer0_handler(); -- add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0); -+ add_interrupt_randomness(HYPERV_STIMER0_VECTOR); - ack_APIC_irq(); - - set_irq_regs(old_regs); -@@ -163,12 +163,22 @@ static uint32_t __init ms_hyperv_platform(void) - cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, - &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); - -- if (eax >= HYPERV_CPUID_MIN && -- eax <= HYPERV_CPUID_MAX && -- !memcmp("Microsoft Hv", hyp_signature, 12)) -- return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS; -+ if (eax < HYPERV_CPUID_MIN || eax > HYPERV_CPUID_MAX || -+ memcmp("Microsoft Hv", hyp_signature, 12)) -+ return 0; - -- return 0; -+ /* HYPERCALL and VP_INDEX MSRs are mandatory for all features. */ -+ eax = cpuid_eax(HYPERV_CPUID_FEATURES); -+ if (!(eax & HV_MSR_HYPERCALL_AVAILABLE)) { -+ pr_warn("x86/hyperv: HYPERCALL MSR not available.\n"); -+ return 0; -+ } -+ if (!(eax & HV_MSR_VP_INDEX_AVAILABLE)) { -+ pr_warn("x86/hyperv: VP_INDEX MSR not available.\n"); -+ return 0; -+ } -+ -+ return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS; - } - - static unsigned char hv_get_nmi_reason(void) -@@ -279,12 +289,16 @@ static void __init ms_hyperv_init_platform(void) - * To mirror what Windows does we should extract CPU management - * features and use the ReservedIdentityBit to detect if Linux is the - * root partition. But that requires negotiating CPU management -- * interface (a process to be finalized). -+ * interface (a process to be finalized). For now, use the privilege -+ * flag as the indicator for running as root. - * -- * For now, use the privilege flag as the indicator for running as -- * root. -+ * Hyper-V should never specify running as root and as a Confidential -+ * VM. But to protect against a compromised/malicious Hyper-V trying -+ * to exploit root behavior to expose Confidential VM memory, ignore -+ * the root partition setting if also a Confidential VM. - */ -- if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) { -+ if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) && -+ !(ms_hyperv.priv_high & HV_ISOLATION)) { - hv_root_partition = true; - pr_info("Hyper-V: running as root partition\n"); - } -diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c -index bb1c3f5f60c81..a5c51a14fbce8 100644 ---- a/arch/x86/kernel/cpu/resctrl/core.c -+++ b/arch/x86/kernel/cpu/resctrl/core.c -@@ -66,9 +66,6 @@ struct rdt_hw_resource rdt_resources_all[] = { - .rid = RDT_RESOURCE_L3, - .name = "L3", - .cache_level = 3, -- .cache = { -- .min_cbm_bits = 1, -- }, - .domains = domain_init(RDT_RESOURCE_L3), - .parse_ctrlval = parse_cbm, - .format_str = "%d=%0*x", -@@ -83,9 +80,6 @@ struct rdt_hw_resource rdt_resources_all[] = { - .rid = RDT_RESOURCE_L2, - .name = "L2", - .cache_level = 2, -- .cache = { -- .min_cbm_bits = 1, -- }, - .domains = domain_init(RDT_RESOURCE_L2), - .parse_ctrlval = parse_cbm, - .format_str = "%d=%0*x", -@@ -877,6 +871,7 @@ static __init void rdt_init_res_defs_intel(void) - r->cache.arch_has_sparse_bitmaps = false; - r->cache.arch_has_empty_bitmaps = false; - r->cache.arch_has_per_cpu_cfg = false; -+ r->cache.min_cbm_bits = 1; - } else if (r->rid == RDT_RESOURCE_MBA) { - hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE; - hw_res->msr_update = mba_wrmsr_intel; -@@ -897,6 +892,7 @@ static __init void rdt_init_res_defs_amd(void) - r->cache.arch_has_sparse_bitmaps = true; - r->cache.arch_has_empty_bitmaps = true; - r->cache.arch_has_per_cpu_cfg = true; -+ r->cache.min_cbm_bits = 0; - } else if (r->rid == RDT_RESOURCE_MBA) { - hw_res->msr_base = MSR_IA32_MBA_BW_BASE; - hw_res->msr_update = mba_wrmsr_amd; -diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c -index 87666275eed92..000e1467b4cde 100644 ---- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c -+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c -@@ -353,7 +353,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - { - struct resctrl_schema *s; - struct rdtgroup *rdtgrp; -- struct rdt_domain *dom; - struct rdt_resource *r; - char *tok, *resname; - int ret = 0; -@@ -382,10 +381,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - goto out; - } - -- list_for_each_entry(s, &resctrl_schema_all, list) { -- list_for_each_entry(dom, &s->res->domains, list) -- memset(dom->staged_config, 0, sizeof(dom->staged_config)); -- } -+ rdt_staged_configs_clear(); - - while ((tok = strsep(&buf, "\n")) != NULL) { - resname = strim(strsep(&tok, ":")); -@@ -422,6 +418,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - } - - out: -+ rdt_staged_configs_clear(); - rdtgroup_kn_unlock(of->kn); - cpus_read_unlock(); - return ret ?: nbytes; -diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h -index 1d647188a43bf..218d88800565a 100644 ---- a/arch/x86/kernel/cpu/resctrl/internal.h -+++ b/arch/x86/kernel/cpu/resctrl/internal.h -@@ -550,5 +550,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); - void __check_limbo(struct rdt_domain *d, bool force_free); - void rdt_domain_reconfigure_cdp(struct rdt_resource *r); - void __init thread_throttle_mode_init(void); -+void rdt_staged_configs_clear(void); - - #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ -diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c -index db813f819ad6c..4d8398986f784 100644 ---- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c -+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c -@@ -420,6 +420,7 @@ static int pseudo_lock_fn(void *_rdtgrp) - struct pseudo_lock_region *plr = rdtgrp->plr; - u32 rmid_p, closid_p; - unsigned long i; -+ u64 saved_msr; - #ifdef CONFIG_KASAN - /* - * The registers used for local register variables are also used -@@ -463,6 +464,7 @@ static int pseudo_lock_fn(void *_rdtgrp) - * the buffer and evict pseudo-locked memory read earlier from the - * cache. - */ -+ saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL); - __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); - closid_p = this_cpu_read(pqr_state.cur_closid); - rmid_p = this_cpu_read(pqr_state.cur_rmid); -@@ -514,7 +516,7 @@ static int pseudo_lock_fn(void *_rdtgrp) - __wrmsr(IA32_PQR_ASSOC, rmid_p, closid_p); - - /* Re-enable the hardware prefetcher(s) */ -- wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0); -+ wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr); - local_irq_enable(); - - plr->thread_done = 1; -@@ -871,6 +873,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) - static int measure_cycles_lat_fn(void *_plr) - { - struct pseudo_lock_region *plr = _plr; -+ u32 saved_low, saved_high; - unsigned long i; - u64 start, end; - void *mem_r; -@@ -879,6 +882,7 @@ static int measure_cycles_lat_fn(void *_plr) - /* - * Disable hardware prefetchers. - */ -+ rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); - wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); - mem_r = READ_ONCE(plr->kmem); - /* -@@ -895,7 +899,7 @@ static int measure_cycles_lat_fn(void *_plr) - end = rdtsc_ordered(); - trace_pseudo_lock_mem_latency((u32)(end - start)); - } -- wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0); -+ wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); - local_irq_enable(); - plr->thread_done = 1; - wake_up_interruptible(&plr->lock_thread_wq); -@@ -940,6 +944,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, - u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0; - struct perf_event *miss_event, *hit_event; - int hit_pmcnum, miss_pmcnum; -+ u32 saved_low, saved_high; - unsigned int line_size; - unsigned int size; - unsigned long i; -@@ -973,6 +978,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, - /* - * Disable hardware prefetchers. - */ -+ rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); - wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0); - - /* Initialize rest of local variables */ -@@ -1031,7 +1037,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, - */ - rmb(); - /* Re-enable hardware prefetchers */ -- wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0); -+ wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high); - local_irq_enable(); - out_hit: - perf_event_release_kernel(hit_event); -diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c -index b57b3db9a6a78..2ec16477eb3e1 100644 ---- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c -+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c -@@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...) - va_end(ap); - } - -+void rdt_staged_configs_clear(void) -+{ -+ struct rdt_resource *r; -+ struct rdt_domain *dom; -+ -+ lockdep_assert_held(&rdtgroup_mutex); -+ -+ for_each_alloc_capable_rdt_resource(r) { -+ list_for_each_entry(dom, &r->domains, list) -+ memset(dom->staged_config, 0, sizeof(dom->staged_config)); -+ } -+} -+ - /* - * Trivial allocator for CLOSIDs. Since h/w only supports a small number, - * we can keep a bitmap of free CLOSIDs in a single integer. -@@ -314,7 +327,7 @@ static void update_cpu_closid_rmid(void *info) - * executing task might have its own closid selected. Just reuse - * the context switch code. - */ -- resctrl_sched_in(); -+ resctrl_sched_in(current); - } - - /* -@@ -535,7 +548,7 @@ static void _update_task_closid_rmid(void *task) - * Otherwise, the MSR is updated when the task is scheduled in. - */ - if (task == current) -- resctrl_sched_in(); -+ resctrl_sched_in(task); - } - - static void update_task_closid_rmid(struct task_struct *t) -@@ -580,8 +593,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk, - /* - * Ensure the task's closid and rmid are written before determining if - * the task is current that will decide if it will be interrupted. -+ * This pairs with the full barrier between the rq->curr update and -+ * resctrl_sched_in() during context switch. - */ -- barrier(); -+ smp_mb(); - - /* - * By now, the task's closid and rmid are set. If the task is current -@@ -716,11 +731,15 @@ unlock: - static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) - { - struct task_struct *p, *t; -+ pid_t pid; - - rcu_read_lock(); - for_each_process_thread(p, t) { -- if (is_closid_match(t, r) || is_rmid_match(t, r)) -- seq_printf(s, "%d\n", t->pid); -+ if (is_closid_match(t, r) || is_rmid_match(t, r)) { -+ pid = task_pid_vnr(t); -+ if (pid) -+ seq_printf(s, "%d\n", pid); -+ } - } - rcu_read_unlock(); - } -@@ -2363,6 +2382,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, - WRITE_ONCE(t->closid, to->closid); - WRITE_ONCE(t->rmid, to->mon.rmid); - -+ /* -+ * Order the closid/rmid stores above before the loads -+ * in task_curr(). This pairs with the full barrier -+ * between the rq->curr update and resctrl_sched_in() -+ * during context switch. -+ */ -+ smp_mb(); -+ - /* - * If the task is on a CPU, set the CPU in the mask. - * The detection is inaccurate as tasks might move or -@@ -2803,7 +2830,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) - { - struct resctrl_schema *s; - struct rdt_resource *r; -- int ret; -+ int ret = 0; -+ -+ rdt_staged_configs_clear(); - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; -@@ -2812,20 +2841,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) - } else { - ret = rdtgroup_init_cat(s, rdtgrp->closid); - if (ret < 0) -- return ret; -+ goto out; - } - - ret = resctrl_arch_update_domains(r, rdtgrp->closid); - if (ret < 0) { - rdt_last_cmd_puts("Failed to initialize allocations\n"); -- return ret; -+ goto out; - } - - } - - rdtgrp->mode = RDT_MODE_SHAREABLE; - -- return 0; -+out: -+ rdt_staged_configs_clear(); -+ return ret; - } - - static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, -diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c -index 21d1f062895a8..06bfef1c4175e 100644 ---- a/arch/x86/kernel/cpu/scattered.c -+++ b/arch/x86/kernel/cpu/scattered.c -@@ -26,6 +26,7 @@ struct cpuid_bit { - static const struct cpuid_bit cpuid_bits[] = { - { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, - { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, -+ { X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 }, - { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 }, - { X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 }, - { X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 }, -diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c -index 001808e3901cc..fa5777af8da1a 100644 ---- a/arch/x86/kernel/cpu/sgx/encl.c -+++ b/arch/x86/kernel/cpu/sgx/encl.c -@@ -12,6 +12,116 @@ - #include "encls.h" - #include "sgx.h" - -+#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd)) -+/* -+ * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to -+ * determine the page index associated with the first PCMD entry -+ * within a PCMD page. -+ */ -+#define PCMD_FIRST_MASK GENMASK(4, 0) -+ -+/** -+ * reclaimer_writing_to_pcmd() - Query if any enclave page associated with -+ * a PCMD page is in process of being reclaimed. -+ * @encl: Enclave to which PCMD page belongs -+ * @start_addr: Address of enclave page using first entry within the PCMD page -+ * -+ * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is -+ * stored. The PCMD data of a reclaimed enclave page contains enough -+ * information for the processor to verify the page at the time -+ * it is loaded back into the Enclave Page Cache (EPC). -+ * -+ * The backing storage to which enclave pages are reclaimed is laid out as -+ * follows: -+ * Encrypted enclave pages:SECS page:PCMD pages -+ * -+ * Each PCMD page contains the PCMD metadata of -+ * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages. -+ * -+ * A PCMD page can only be truncated if it is (a) empty, and (b) not in the -+ * process of getting data (and thus soon being non-empty). (b) is tested with -+ * a check if an enclave page sharing the PCMD page is in the process of being -+ * reclaimed. -+ * -+ * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it -+ * intends to reclaim that enclave page - it means that the PCMD page -+ * associated with that enclave page is about to get some data and thus -+ * even if the PCMD page is empty, it should not be truncated. -+ * -+ * Context: Enclave mutex (&sgx_encl->lock) must be held. -+ * Return: 1 if the reclaimer is about to write to the PCMD page -+ * 0 if the reclaimer has no intention to write to the PCMD page -+ */ -+static int reclaimer_writing_to_pcmd(struct sgx_encl *encl, -+ unsigned long start_addr) -+{ -+ int reclaimed = 0; -+ int i; -+ -+ /* -+ * PCMD_FIRST_MASK is based on number of PCMD entries within -+ * PCMD page being 32. -+ */ -+ BUILD_BUG_ON(PCMDS_PER_PAGE != 32); -+ -+ for (i = 0; i < PCMDS_PER_PAGE; i++) { -+ struct sgx_encl_page *entry; -+ unsigned long addr; -+ -+ addr = start_addr + i * PAGE_SIZE; -+ -+ /* -+ * Stop when reaching the SECS page - it does not -+ * have a page_array entry and its reclaim is -+ * started and completed with enclave mutex held so -+ * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED -+ * flag. -+ */ -+ if (addr == encl->base + encl->size) -+ break; -+ -+ entry = xa_load(&encl->page_array, PFN_DOWN(addr)); -+ if (!entry) -+ continue; -+ -+ /* -+ * VA page slot ID uses same bit as the flag so it is important -+ * to ensure that the page is not already in backing store. -+ */ -+ if (entry->epc_page && -+ (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) { -+ reclaimed = 1; -+ break; -+ } -+ } -+ -+ return reclaimed; -+} -+ -+/* -+ * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's -+ * follow right after the EPC data in the backing storage. In addition to the -+ * visible enclave pages, there's one extra page slot for SECS, before PCMD -+ * structs. -+ */ -+static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl, -+ unsigned long page_index) -+{ -+ pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs); -+ -+ return epc_end_off + page_index * sizeof(struct sgx_pcmd); -+} -+ -+/* -+ * Free a page from the backing storage in the given page index. -+ */ -+static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index) -+{ -+ struct inode *inode = file_inode(encl->backing); -+ -+ shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1); -+} -+ - /* - * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC - * Pages" in the SDM. -@@ -22,9 +132,12 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, - { - unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; - struct sgx_encl *encl = encl_page->encl; -+ pgoff_t page_index, page_pcmd_off; -+ unsigned long pcmd_first_page; - struct sgx_pageinfo pginfo; - struct sgx_backing b; -- pgoff_t page_index; -+ bool pcmd_page_empty; -+ u8 *pcmd_page; - int ret; - - if (secs_page) -@@ -32,14 +145,21 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, - else - page_index = PFN_DOWN(encl->size); - -- ret = sgx_encl_get_backing(encl, page_index, &b); -+ /* -+ * Address of enclave page using the first entry within the PCMD page. -+ */ -+ pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base; -+ -+ page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); -+ -+ ret = sgx_encl_lookup_backing(encl, page_index, &b); - if (ret) - return ret; - - pginfo.addr = encl_page->desc & PAGE_MASK; - pginfo.contents = (unsigned long)kmap_atomic(b.contents); -- pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) + -- b.pcmd_offset; -+ pcmd_page = kmap_atomic(b.pcmd); -+ pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset; - - if (secs_page) - pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page); -@@ -55,10 +175,32 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, - ret = -EFAULT; - } - -- kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset)); -+ memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd)); -+ set_page_dirty(b.pcmd); -+ -+ /* -+ * The area for the PCMD in the page was zeroed above. Check if the -+ * whole page is now empty meaning that all PCMD's have been zeroed: -+ */ -+ pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE); -+ -+ kunmap_atomic(pcmd_page); - kunmap_atomic((void *)(unsigned long)pginfo.contents); - -- sgx_encl_put_backing(&b, false); -+ get_page(b.pcmd); -+ sgx_encl_put_backing(&b); -+ -+ sgx_encl_truncate_backing_page(encl, page_index); -+ -+ if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) { -+ sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off)); -+ pcmd_page = kmap_atomic(b.pcmd); -+ if (memchr_inv(pcmd_page, 0, PAGE_SIZE)) -+ pr_warn("PCMD page not empty after truncate.\n"); -+ kunmap_atomic(pcmd_page); -+ } -+ -+ put_page(b.pcmd); - - return ret; - } -@@ -391,11 +533,15 @@ const struct vm_operations_struct sgx_vm_ops = { - void sgx_encl_release(struct kref *ref) - { - struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); -+ unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1); - struct sgx_va_page *va_page; - struct sgx_encl_page *entry; -- unsigned long index; -+ unsigned long count = 0; - -- xa_for_each(&encl->page_array, index, entry) { -+ XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base)); -+ -+ xas_lock(&xas); -+ xas_for_each(&xas, entry, max_page_index) { - if (entry->epc_page) { - /* - * The page and its radix tree entry cannot be freed -@@ -410,7 +556,20 @@ void sgx_encl_release(struct kref *ref) - } - - kfree(entry); -+ /* -+ * Invoke scheduler on every XA_CHECK_SCHED iteration -+ * to prevent soft lockups. -+ */ -+ if (!(++count % XA_CHECK_SCHED)) { -+ xas_pause(&xas); -+ xas_unlock(&xas); -+ -+ cond_resched(); -+ -+ xas_lock(&xas); -+ } - } -+ xas_unlock(&xas); - - xa_destroy(&encl->page_array); - -@@ -574,10 +733,10 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl, - * 0 on success, - * -errno otherwise. - */ --int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, -+static int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, - struct sgx_backing *backing) - { -- pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5); -+ pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); - struct page *contents; - struct page *pcmd; - -@@ -585,7 +744,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, - if (IS_ERR(contents)) - return PTR_ERR(contents); - -- pcmd = sgx_encl_get_backing_page(encl, pcmd_index); -+ pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off)); - if (IS_ERR(pcmd)) { - put_page(contents); - return PTR_ERR(pcmd); -@@ -594,25 +753,118 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, - backing->page_index = page_index; - backing->contents = contents; - backing->pcmd = pcmd; -- backing->pcmd_offset = -- (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) * -- sizeof(struct sgx_pcmd); -+ backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1); - - return 0; - } - -+/* -+ * When called from ksgxd, returns the mem_cgroup of a struct mm stored -+ * in the enclave's mm_list. When not called from ksgxd, just returns -+ * the mem_cgroup of the current task. -+ */ -+static struct mem_cgroup *sgx_encl_get_mem_cgroup(struct sgx_encl *encl) -+{ -+ struct mem_cgroup *memcg = NULL; -+ struct sgx_encl_mm *encl_mm; -+ int idx; -+ -+ /* -+ * If called from normal task context, return the mem_cgroup -+ * of the current task's mm. The remainder of the handling is for -+ * ksgxd. -+ */ -+ if (!current_is_ksgxd()) -+ return get_mem_cgroup_from_mm(current->mm); -+ -+ /* -+ * Search the enclave's mm_list to find an mm associated with -+ * this enclave to charge the allocation to. -+ */ -+ idx = srcu_read_lock(&encl->srcu); -+ -+ list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { -+ if (!mmget_not_zero(encl_mm->mm)) -+ continue; -+ -+ memcg = get_mem_cgroup_from_mm(encl_mm->mm); -+ -+ mmput_async(encl_mm->mm); -+ -+ break; -+ } -+ -+ srcu_read_unlock(&encl->srcu, idx); -+ -+ /* -+ * In the rare case that there isn't an mm associated with -+ * the enclave, set memcg to the current active mem_cgroup. -+ * This will be the root mem_cgroup if there is no active -+ * mem_cgroup. -+ */ -+ if (!memcg) -+ return get_mem_cgroup_from_mm(NULL); -+ -+ return memcg; -+} -+ - /** -- * sgx_encl_put_backing() - Unpin the backing storage -+ * sgx_encl_alloc_backing() - allocate a new backing storage page -+ * @encl: an enclave pointer -+ * @page_index: enclave page index - * @backing: data for accessing backing storage for the page -- * @do_write: mark pages dirty -+ * -+ * When called from ksgxd, sets the active memcg from one of the -+ * mms in the enclave's mm_list prior to any backing page allocation, -+ * in order to ensure that shmem page allocations are charged to the -+ * enclave. -+ * -+ * Return: -+ * 0 on success, -+ * -errno otherwise. - */ --void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write) -+int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index, -+ struct sgx_backing *backing) - { -- if (do_write) { -- set_page_dirty(backing->pcmd); -- set_page_dirty(backing->contents); -- } -+ struct mem_cgroup *encl_memcg = sgx_encl_get_mem_cgroup(encl); -+ struct mem_cgroup *memcg = set_active_memcg(encl_memcg); -+ int ret; -+ -+ ret = sgx_encl_get_backing(encl, page_index, backing); -+ -+ set_active_memcg(memcg); -+ mem_cgroup_put(encl_memcg); - -+ return ret; -+} -+ -+/** -+ * sgx_encl_lookup_backing() - retrieve an existing backing storage page -+ * @encl: an enclave pointer -+ * @page_index: enclave page index -+ * @backing: data for accessing backing storage for the page -+ * -+ * Retrieve a backing page for loading data back into an EPC page with ELDU. -+ * It is the caller's responsibility to ensure that it is appropriate to use -+ * sgx_encl_lookup_backing() rather than sgx_encl_alloc_backing(). If lookup is -+ * not used correctly, this will cause an allocation which is not accounted for. -+ * -+ * Return: -+ * 0 on success, -+ * -errno otherwise. -+ */ -+int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index, -+ struct sgx_backing *backing) -+{ -+ return sgx_encl_get_backing(encl, page_index, backing); -+} -+ -+/** -+ * sgx_encl_put_backing() - Unpin the backing storage -+ * @backing: data for accessing backing storage for the page -+ */ -+void sgx_encl_put_backing(struct sgx_backing *backing) -+{ - put_page(backing->pcmd); - put_page(backing->contents); - } -diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h -index fec43ca65065b..332ef3568267e 100644 ---- a/arch/x86/kernel/cpu/sgx/encl.h -+++ b/arch/x86/kernel/cpu/sgx/encl.h -@@ -103,11 +103,14 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr, - int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, - unsigned long end, unsigned long vm_flags); - -+bool current_is_ksgxd(void); - void sgx_encl_release(struct kref *ref); - int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm); --int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, -- struct sgx_backing *backing); --void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write); -+int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index, -+ struct sgx_backing *backing); -+int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index, -+ struct sgx_backing *backing); -+void sgx_encl_put_backing(struct sgx_backing *backing); - int sgx_encl_test_and_clear_young(struct mm_struct *mm, - struct sgx_encl_page *page); - -diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c -index 83df20e3e6333..217777c029eea 100644 ---- a/arch/x86/kernel/cpu/sgx/ioctl.c -+++ b/arch/x86/kernel/cpu/sgx/ioctl.c -@@ -372,6 +372,29 @@ err_out_free: - return ret; - } - -+/* -+ * Ensure user provided offset and length values are valid for -+ * an enclave. -+ */ -+static int sgx_validate_offset_length(struct sgx_encl *encl, -+ unsigned long offset, -+ unsigned long length) -+{ -+ if (!IS_ALIGNED(offset, PAGE_SIZE)) -+ return -EINVAL; -+ -+ if (!length || !IS_ALIGNED(length, PAGE_SIZE)) -+ return -EINVAL; -+ -+ if (offset + length < offset) -+ return -EINVAL; -+ -+ if (offset + length - PAGE_SIZE >= encl->size) -+ return -EINVAL; -+ -+ return 0; -+} -+ - /** - * sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES - * @encl: an enclave pointer -@@ -425,14 +448,10 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg) - if (copy_from_user(&add_arg, arg, sizeof(add_arg))) - return -EFAULT; - -- if (!IS_ALIGNED(add_arg.offset, PAGE_SIZE) || -- !IS_ALIGNED(add_arg.src, PAGE_SIZE)) -- return -EINVAL; -- -- if (!add_arg.length || add_arg.length & (PAGE_SIZE - 1)) -+ if (!IS_ALIGNED(add_arg.src, PAGE_SIZE)) - return -EINVAL; - -- if (add_arg.offset + add_arg.length - PAGE_SIZE >= encl->size) -+ if (sgx_validate_offset_length(encl, add_arg.offset, add_arg.length)) - return -EINVAL; - - if (copy_from_user(&secinfo, (void __user *)add_arg.secinfo, -diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c -index 63d3de02bbccb..4ea48acf55faa 100644 ---- a/arch/x86/kernel/cpu/sgx/main.c -+++ b/arch/x86/kernel/cpu/sgx/main.c -@@ -28,8 +28,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq); - static LIST_HEAD(sgx_active_page_list); - static DEFINE_SPINLOCK(sgx_reclaimer_lock); - --/* The free page list lock protected variables prepend the lock. */ --static unsigned long sgx_nr_free_pages; -+static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); - - /* Nodes with one or more EPC sections. */ - static nodemask_t sgx_numa_mask; -@@ -47,9 +46,13 @@ static LIST_HEAD(sgx_dirty_page_list); - * Reset post-kexec EPC pages to the uninitialized state. The pages are removed - * from the input list, and made available for the page allocator. SECS pages - * prepending their children in the input list are left intact. -+ * -+ * Return 0 when sanitization was successful or kthread was stopped, and the -+ * number of unsanitized pages otherwise. - */ --static void __sgx_sanitize_pages(struct list_head *dirty_page_list) -+static unsigned long __sgx_sanitize_pages(struct list_head *dirty_page_list) - { -+ unsigned long left_dirty = 0; - struct sgx_epc_page *page; - LIST_HEAD(dirty); - int ret; -@@ -57,7 +60,7 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list) - /* dirty_page_list is thread-local, no need for a lock: */ - while (!list_empty(dirty_page_list)) { - if (kthread_should_stop()) -- return; -+ return 0; - - page = list_first_entry(dirty_page_list, struct sgx_epc_page, list); - -@@ -72,12 +75,14 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list) - } else { - /* The page is not yet clean - move to the dirty list. */ - list_move_tail(&page->list, &dirty); -+ left_dirty++; - } - - cond_resched(); - } - - list_splice(&dirty, dirty_page_list); -+ return left_dirty; - } - - static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page) -@@ -171,6 +176,8 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot, - backing->pcmd_offset; - - ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot); -+ set_page_dirty(backing->pcmd); -+ set_page_dirty(backing->contents); - - kunmap_atomic((void *)(unsigned long)(pginfo.metadata - - backing->pcmd_offset)); -@@ -288,9 +295,10 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, - sgx_encl_ewb(epc_page, backing); - encl_page->epc_page = NULL; - encl->secs_child_cnt--; -+ sgx_encl_put_backing(backing); - - if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) { -- ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size), -+ ret = sgx_encl_alloc_backing(encl, PFN_DOWN(encl->size), - &secs_backing); - if (ret) - goto out; -@@ -300,7 +308,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, - sgx_encl_free_epc_page(encl->secs.epc_page); - encl->secs.epc_page = NULL; - -- sgx_encl_put_backing(&secs_backing, true); -+ sgx_encl_put_backing(&secs_backing); - } - - out: -@@ -361,11 +369,14 @@ static void sgx_reclaim_pages(void) - goto skip; - - page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); -- ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]); -- if (ret) -- goto skip; - - mutex_lock(&encl_page->encl->lock); -+ ret = sgx_encl_alloc_backing(encl_page->encl, page_index, &backing[i]); -+ if (ret) { -+ mutex_unlock(&encl_page->encl->lock); -+ goto skip; -+ } -+ - encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED; - mutex_unlock(&encl_page->encl->lock); - continue; -@@ -393,7 +404,6 @@ skip: - - encl_page = epc_page->owner; - sgx_reclaimer_write(epc_page, &backing[i]); -- sgx_encl_put_backing(&backing[i], true); - - kref_put(&encl_page->encl->refcount, sgx_encl_release); - epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; -@@ -403,14 +413,15 @@ skip: - - spin_lock(&node->lock); - list_add_tail(&epc_page->list, &node->free_page_list); -- sgx_nr_free_pages++; - spin_unlock(&node->lock); -+ atomic_long_inc(&sgx_nr_free_pages); - } - } - - static bool sgx_should_reclaim(unsigned long watermark) - { -- return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list); -+ return atomic_long_read(&sgx_nr_free_pages) < watermark && -+ !list_empty(&sgx_active_page_list); - } - - static int ksgxd(void *p) -@@ -422,10 +433,7 @@ static int ksgxd(void *p) - * required for SECS pages, whose child pages blocked EREMOVE. - */ - __sgx_sanitize_pages(&sgx_dirty_page_list); -- __sgx_sanitize_pages(&sgx_dirty_page_list); -- -- /* sanity check: */ -- WARN_ON(!list_empty(&sgx_dirty_page_list)); -+ WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list)); - - while (!kthread_should_stop()) { - if (try_to_freeze()) -@@ -457,6 +465,11 @@ static bool __init sgx_page_reclaimer_init(void) - return true; - } - -+bool current_is_ksgxd(void) -+{ -+ return current == ksgxd_tsk; -+} -+ - static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid) - { - struct sgx_numa_node *node = &sgx_numa_nodes[nid]; -@@ -471,9 +484,9 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid) - - page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list); - list_del_init(&page->list); -- sgx_nr_free_pages--; - - spin_unlock(&node->lock); -+ atomic_long_dec(&sgx_nr_free_pages); - - return page; - } -@@ -625,9 +638,9 @@ void sgx_free_epc_page(struct sgx_epc_page *page) - spin_lock(&node->lock); - - list_add_tail(&page->list, &node->free_page_list); -- sgx_nr_free_pages++; - - spin_unlock(&node->lock); -+ atomic_long_inc(&sgx_nr_free_pages); - } - - static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, -diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c -index 64511c4a52001..1550910201238 100644 ---- a/arch/x86/kernel/cpu/sgx/virt.c -+++ b/arch/x86/kernel/cpu/sgx/virt.c -@@ -167,6 +167,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file) - continue; - - xa_erase(&vepc->page_array, index); -+ cond_resched(); - } - - /* -@@ -185,6 +186,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file) - list_add_tail(&epc_page->list, &secs_pages); - - xa_erase(&vepc->page_array, index); -+ cond_resched(); - } - - /* -@@ -206,6 +208,7 @@ static int sgx_vepc_release(struct inode *inode, struct file *file) - - if (sgx_vepc_free_page(epc_page)) - list_add_tail(&epc_page->list, &secs_pages); -+ cond_resched(); - } - - if (!list_empty(&secs_pages)) -diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c -index 132a2de44d2fe..0270925fe013b 100644 ---- a/arch/x86/kernel/cpu/topology.c -+++ b/arch/x86/kernel/cpu/topology.c -@@ -79,7 +79,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c) - * initial apic id, which also represents 32-bit extended x2apic id. - */ - c->initial_apicid = edx; -- smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); -+ smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx)); - #endif - return 0; - } -@@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c) - unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width; - unsigned int core_select_mask, core_level_siblings; - unsigned int die_select_mask, die_level_siblings; -+ unsigned int pkg_mask_width; - bool die_level_present = false; - int leaf; - -@@ -108,13 +109,14 @@ int detect_extended_topology(struct cpuinfo_x86 *c) - */ - cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx); - c->initial_apicid = edx; -- core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); -+ core_level_siblings = LEVEL_MAX_SIBLINGS(ebx); -+ smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx)); - core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); - die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); -- die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); -+ pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); - - sub_index = 1; -- do { -+ while (true) { - cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx); - - /* -@@ -132,10 +134,15 @@ int detect_extended_topology(struct cpuinfo_x86 *c) - die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); - } - -+ if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE) -+ pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); -+ else -+ break; -+ - sub_index++; -- } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); -+ } - -- core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; -+ core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width; - die_select_mask = (~(-1 << die_plus_mask_width)) >> - core_plus_mask_width; - -@@ -148,7 +155,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c) - } - - c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, -- die_plus_mask_width); -+ pkg_mask_width); - /* - * Reinit the apicid, now that we have extended initial_apicid. - */ -diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c -index 9c7a5f0492929..8009c8346d8f8 100644 ---- a/arch/x86/kernel/cpu/tsx.c -+++ b/arch/x86/kernel/cpu/tsx.c -@@ -19,7 +19,7 @@ - - enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED; - --void tsx_disable(void) -+static void tsx_disable(void) - { - u64 tsx; - -@@ -39,7 +39,7 @@ void tsx_disable(void) - wrmsrl(MSR_IA32_TSX_CTRL, tsx); - } - --void tsx_enable(void) -+static void tsx_enable(void) - { - u64 tsx; - -@@ -58,24 +58,6 @@ void tsx_enable(void) - wrmsrl(MSR_IA32_TSX_CTRL, tsx); - } - --static bool __init tsx_ctrl_is_supported(void) --{ -- u64 ia32_cap = x86_read_arch_cap_msr(); -- -- /* -- * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this -- * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. -- * -- * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a -- * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES -- * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get -- * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, -- * tsx= cmdline requests will do nothing on CPUs without -- * MSR_IA32_TSX_CTRL support. -- */ -- return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR); --} -- - static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) - { - if (boot_cpu_has_bug(X86_BUG_TAA)) -@@ -84,7 +66,45 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) - return TSX_CTRL_ENABLE; - } - --void tsx_clear_cpuid(void) -+/* -+ * Disabling TSX is not a trivial business. -+ * -+ * First of all, there's a CPUID bit: X86_FEATURE_RTM_ALWAYS_ABORT -+ * which says that TSX is practically disabled (all transactions are -+ * aborted by default). When that bit is set, the kernel unconditionally -+ * disables TSX. -+ * -+ * In order to do that, however, it needs to dance a bit: -+ * -+ * 1. The first method to disable it is through MSR_TSX_FORCE_ABORT and -+ * the MSR is present only when *two* CPUID bits are set: -+ * -+ * - X86_FEATURE_RTM_ALWAYS_ABORT -+ * - X86_FEATURE_TSX_FORCE_ABORT -+ * -+ * 2. The second method is for CPUs which do not have the above-mentioned -+ * MSR: those use a different MSR - MSR_IA32_TSX_CTRL and disable TSX -+ * through that one. Those CPUs can also have the initially mentioned -+ * CPUID bit X86_FEATURE_RTM_ALWAYS_ABORT set and for those the same strategy -+ * applies: TSX gets disabled unconditionally. -+ * -+ * When either of the two methods are present, the kernel disables TSX and -+ * clears the respective RTM and HLE feature flags. -+ * -+ * An additional twist in the whole thing presents late microcode loading -+ * which, when done, may cause for the X86_FEATURE_RTM_ALWAYS_ABORT CPUID -+ * bit to be set after the update. -+ * -+ * A subsequent hotplug operation on any logical CPU except the BSP will -+ * cause for the supported CPUID feature bits to get re-detected and, if -+ * RTM and HLE get cleared all of a sudden, but, userspace did consult -+ * them before the update, then funny explosions will happen. Long story -+ * short: the kernel doesn't modify CPUID feature bits after booting. -+ * -+ * That's why, this function's call in init_intel() doesn't clear the -+ * feature flags. -+ */ -+static void tsx_clear_cpuid(void) - { - u64 msr; - -@@ -97,6 +117,40 @@ void tsx_clear_cpuid(void) - rdmsrl(MSR_TSX_FORCE_ABORT, msr); - msr |= MSR_TFA_TSX_CPUID_CLEAR; - wrmsrl(MSR_TSX_FORCE_ABORT, msr); -+ } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) { -+ rdmsrl(MSR_IA32_TSX_CTRL, msr); -+ msr |= TSX_CTRL_CPUID_CLEAR; -+ wrmsrl(MSR_IA32_TSX_CTRL, msr); -+ } -+} -+ -+/* -+ * Disable TSX development mode -+ * -+ * When the microcode released in Feb 2022 is applied, TSX will be disabled by -+ * default on some processors. MSR 0x122 (TSX_CTRL) and MSR 0x123 -+ * (IA32_MCU_OPT_CTRL) can be used to re-enable TSX for development, doing so is -+ * not recommended for production deployments. In particular, applying MD_CLEAR -+ * flows for mitigation of the Intel TSX Asynchronous Abort (TAA) transient -+ * execution attack may not be effective on these processors when Intel TSX is -+ * enabled with updated microcode. -+ */ -+static void tsx_dev_mode_disable(void) -+{ -+ u64 mcu_opt_ctrl; -+ -+ /* Check if RTM_ALLOW exists */ -+ if (!boot_cpu_has_bug(X86_BUG_TAA) || -+ !cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL) || -+ !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL)) -+ return; -+ -+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); -+ -+ if (mcu_opt_ctrl & RTM_ALLOW) { -+ mcu_opt_ctrl &= ~RTM_ALLOW; -+ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); -+ setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT); - } - } - -@@ -105,14 +159,14 @@ void __init tsx_init(void) - char arg[5] = {}; - int ret; - -+ tsx_dev_mode_disable(); -+ - /* -- * Hardware will always abort a TSX transaction if both CPUID bits -- * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is -- * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them -- * here. -+ * Hardware will always abort a TSX transaction when the CPUID bit -+ * RTM_ALWAYS_ABORT is set. In this case, it is better not to enumerate -+ * CPUID.RTM and CPUID.HLE bits. Clear them here. - */ -- if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) && -- boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { -+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) { - tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT; - tsx_clear_cpuid(); - setup_clear_cpu_cap(X86_FEATURE_RTM); -@@ -120,7 +174,20 @@ void __init tsx_init(void) - return; - } - -- if (!tsx_ctrl_is_supported()) { -+ /* -+ * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this -+ * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. -+ * -+ * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a -+ * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES -+ * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get -+ * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, -+ * tsx= cmdline requests will do nothing on CPUs without -+ * MSR_IA32_TSX_CTRL support. -+ */ -+ if (x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR) { -+ setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL); -+ } else { - tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED; - return; - } -@@ -175,3 +242,16 @@ void __init tsx_init(void) - setup_force_cpu_cap(X86_FEATURE_HLE); - } - } -+ -+void tsx_ap_init(void) -+{ -+ tsx_dev_mode_disable(); -+ -+ if (tsx_ctrl_state == TSX_CTRL_ENABLE) -+ tsx_enable(); -+ else if (tsx_ctrl_state == TSX_CTRL_DISABLE) -+ tsx_disable(); -+ else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT) -+ /* See comment over that function for more details. */ -+ tsx_clear_cpuid(); -+} -diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c -index e8326a8d1c5dc..97b9212a6aabf 100644 ---- a/arch/x86/kernel/crash.c -+++ b/arch/x86/kernel/crash.c -@@ -37,7 +37,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -81,15 +80,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) - */ - cpu_crash_vmclear_loaded_vmcss(); - -- /* Disable VMX or SVM if needed. -- * -- * We need to disable virtualization on all CPUs. -- * Having VMX or SVM enabled on any CPU may break rebooting -- * after the kdump kernel has finished its task. -- */ -- cpu_emergency_vmxoff(); -- cpu_emergency_svm_disable(); -- - /* - * Disable Intel PT to stop its logging - */ -@@ -148,12 +138,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs) - */ - cpu_crash_vmclear_loaded_vmcss(); - -- /* Booting kdump kernel with VMX or SVM enabled won't work, -- * because (among other limitations) we can't disable paging -- * with the virt flags. -- */ -- cpu_emergency_vmxoff(); -- cpu_emergency_svm_disable(); -+ cpu_emergency_disable_virtualization(); - - /* - * Disable Intel PT to stop its logging -@@ -401,10 +386,8 @@ int crash_load_segments(struct kimage *image) - kbuf.buf_align = ELF_CORE_HEADER_ALIGN; - kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; - ret = kexec_add_buffer(&kbuf); -- if (ret) { -- vfree((void *)image->elf_headers); -+ if (ret) - return ret; -- } - image->elf_load_addr = kbuf.mem; - pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", - image->elf_load_addr, kbuf.bufsz, kbuf.bufsz); -diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c -index ea4fe192189d5..92b33c7eaf3f9 100644 ---- a/arch/x86/kernel/dumpstack.c -+++ b/arch/x86/kernel/dumpstack.c -@@ -195,7 +195,6 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, - printk("%sCall Trace:\n", log_lvl); - - unwind_start(&state, task, regs, stack); -- stack = stack ? : get_stack_pointer(task, regs); - regs = unwind_get_entry_regs(&state, &partial); - - /* -@@ -214,9 +213,13 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, - * - hardirq stack - * - entry stack - */ -- for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { -+ for (stack = stack ?: get_stack_pointer(task, regs); -+ stack; -+ stack = stack_info.next_sp) { - const char *stack_name; - -+ stack = PTR_ALIGN(stack, sizeof(long)); -+ - if (get_stack_info(stack, task, &stack_info, &visit_mask)) { - /* - * We weren't on a valid stack. It's possible that -@@ -351,7 +354,7 @@ unsigned long oops_begin(void) - } - NOKPROBE_SYMBOL(oops_begin); - --void __noreturn rewind_stack_do_exit(int signr); -+void __noreturn rewind_stack_and_make_dead(int signr); - - void oops_end(unsigned long flags, struct pt_regs *regs, int signr) - { -@@ -386,7 +389,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) - * reuse the task stack and that existing poisons are invalid. - */ - kasan_unpoison_task_stack(current); -- rewind_stack_do_exit(signr); -+ rewind_stack_and_make_dead(signr); - } - NOKPROBE_SYMBOL(oops_end); - -diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c -index 5601b95944fae..6c5defd6569a3 100644 ---- a/arch/x86/kernel/dumpstack_64.c -+++ b/arch/x86/kernel/dumpstack_64.c -@@ -32,9 +32,15 @@ const char *stack_type_name(enum stack_type type) - { - BUILD_BUG_ON(N_EXCEPTION_STACKS != 6); - -+ if (type == STACK_TYPE_TASK) -+ return "TASK"; -+ - if (type == STACK_TYPE_IRQ) - return "IRQ"; - -+ if (type == STACK_TYPE_SOFTIRQ) -+ return "SOFTIRQ"; -+ - if (type == STACK_TYPE_ENTRY) { - /* - * On 64-bit, we have a generic entry stack that we -diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c -index bc0657f0deedf..f267205f2d5a4 100644 ---- a/arch/x86/kernel/e820.c -+++ b/arch/x86/kernel/e820.c -@@ -995,8 +995,10 @@ early_param("memmap", parse_memmap_opt); - */ - void __init e820__reserve_setup_data(void) - { -+ struct setup_indirect *indirect; - struct setup_data *data; -- u64 pa_data; -+ u64 pa_data, pa_next; -+ u32 len; - - pa_data = boot_params.hdr.setup_data; - if (!pa_data) -@@ -1004,6 +1006,14 @@ void __init e820__reserve_setup_data(void) - - while (pa_data) { - data = early_memremap(pa_data, sizeof(*data)); -+ if (!data) { -+ pr_warn("e820: failed to memremap setup_data entry\n"); -+ return; -+ } -+ -+ len = sizeof(*data); -+ pa_next = data->next; -+ - e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); - - /* -@@ -1015,18 +1025,27 @@ void __init e820__reserve_setup_data(void) - sizeof(*data) + data->len, - E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); - -- if (data->type == SETUP_INDIRECT && -- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { -- e820__range_update(((struct setup_indirect *)data->data)->addr, -- ((struct setup_indirect *)data->data)->len, -- E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); -- e820__range_update_kexec(((struct setup_indirect *)data->data)->addr, -- ((struct setup_indirect *)data->data)->len, -- E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); -+ if (data->type == SETUP_INDIRECT) { -+ len += data->len; -+ early_memunmap(data, sizeof(*data)); -+ data = early_memremap(pa_data, len); -+ if (!data) { -+ pr_warn("e820: failed to memremap indirect setup_data\n"); -+ return; -+ } -+ -+ indirect = (struct setup_indirect *)data->data; -+ -+ if (indirect->type != SETUP_INDIRECT) { -+ e820__range_update(indirect->addr, indirect->len, -+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); -+ e820__range_update_kexec(indirect->addr, indirect->len, -+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); -+ } - } - -- pa_data = data->next; -- early_memunmap(data, sizeof(*data)); -+ pa_data = pa_next; -+ early_memunmap(data, len); - } - - e820__update_table(e820_table); -diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c -index 391a4e2b86049..8690fab95ae4b 100644 ---- a/arch/x86/kernel/early-quirks.c -+++ b/arch/x86/kernel/early-quirks.c -@@ -515,6 +515,7 @@ static const struct intel_early_ops gen11_early_ops __initconst = { - .stolen_size = gen9_stolen_size, - }; - -+/* Intel integrated GPUs for which we need to reserve "stolen memory" */ - static const struct pci_device_id intel_early_ids[] __initconst = { - INTEL_I830_IDS(&i830_early_ops), - INTEL_I845G_IDS(&i845_early_ops), -@@ -591,6 +592,13 @@ static void __init intel_graphics_quirks(int num, int slot, int func) - u16 device; - int i; - -+ /* -+ * Reserve "stolen memory" for an integrated GPU. If we've already -+ * found one, there's nothing to do for other (discrete) GPUs. -+ */ -+ if (resource_size(&intel_graphics_stolen_res)) -+ return; -+ - device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); - - for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) { -@@ -703,7 +711,7 @@ static struct chipset early_qrk[] __initdata = { - { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, - PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, - { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, -- QFLAG_APPLY_ONCE, intel_graphics_quirks }, -+ 0, intel_graphics_quirks }, - /* - * HPET on the current version of the Baytrail platform has accuracy - * problems: it will halt in deep idle state - so we disable it. -diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c -index 7ada7bd03a327..3ad1bf5de7373 100644 ---- a/arch/x86/kernel/fpu/core.c -+++ b/arch/x86/kernel/fpu/core.c -@@ -25,17 +25,7 @@ - */ - union fpregs_state init_fpstate __ro_after_init; - --/* -- * Track whether the kernel is using the FPU state -- * currently. -- * -- * This flag is used: -- * -- * - by IRQ context code to potentially use the FPU -- * if it's unused. -- * -- * - to debug kernel_fpu_begin()/end() correctness -- */ -+/* Track in-kernel FPU usage */ - static DEFINE_PER_CPU(bool, in_kernel_fpu); - - /* -@@ -43,42 +33,37 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu); - */ - DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); - --static bool kernel_fpu_disabled(void) --{ -- return this_cpu_read(in_kernel_fpu); --} -- --static bool interrupted_kernel_fpu_idle(void) --{ -- return !kernel_fpu_disabled(); --} -- --/* -- * Were we in user mode (or vm86 mode) when we were -- * interrupted? -- * -- * Doing kernel_fpu_begin/end() is ok if we are running -- * in an interrupt context from user mode - we'll just -- * save the FPU state as required. -- */ --static bool interrupted_user_mode(void) --{ -- struct pt_regs *regs = get_irq_regs(); -- return regs && user_mode(regs); --} -- - /* - * Can we use the FPU in kernel mode with the - * whole "kernel_fpu_begin/end()" sequence? -- * -- * It's always ok in process context (ie "not interrupt") -- * but it is sometimes ok even from an irq. - */ - bool irq_fpu_usable(void) - { -- return !in_interrupt() || -- interrupted_user_mode() || -- interrupted_kernel_fpu_idle(); -+ if (WARN_ON_ONCE(in_nmi())) -+ return false; -+ -+ /* In kernel FPU usage already active? */ -+ if (this_cpu_read(in_kernel_fpu)) -+ return false; -+ -+ /* -+ * When not in NMI or hard interrupt context, FPU can be used in: -+ * -+ * - Task context except from within fpregs_lock()'ed critical -+ * regions. -+ * -+ * - Soft interrupt processing context which cannot happen -+ * while in a fpregs_lock()'ed critical region. -+ */ -+ if (!in_hardirq()) -+ return true; -+ -+ /* -+ * In hard interrupt context it's safe when soft interrupts -+ * are enabled, which means the interrupt did not hit in -+ * a fpregs_lock()'ed critical region. -+ */ -+ return !softirq_count(); - } - EXPORT_SYMBOL(irq_fpu_usable); - -@@ -345,7 +330,7 @@ static void fpu_reset_fpstate(void) - struct fpu *fpu = ¤t->thread.fpu; - - fpregs_lock(); -- fpu__drop(fpu); -+ __fpu_invalidate_fpregs_state(fpu); - /* - * This does not change the actual hardware registers. It just - * resets the memory image and sets TIF_NEED_FPU_LOAD so a -diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c -index 64e29927cc32f..ddf65f1927e12 100644 ---- a/arch/x86/kernel/fpu/init.c -+++ b/arch/x86/kernel/fpu/init.c -@@ -49,7 +49,7 @@ void fpu__init_cpu(void) - fpu__init_cpu_xstate(); - } - --static bool fpu__probe_without_cpuid(void) -+static bool __init fpu__probe_without_cpuid(void) - { - unsigned long cr0; - u16 fsw, fcw; -@@ -67,7 +67,7 @@ static bool fpu__probe_without_cpuid(void) - return fsw == 0 && (fcw & 0x103f) == 0x003f; - } - --static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) -+static void __init fpu__init_system_early_generic(void) - { - if (!boot_cpu_has(X86_FEATURE_CPUID) && - !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { -@@ -138,9 +138,6 @@ static void __init fpu__init_system_generic(void) - unsigned int fpu_kernel_xstate_size __ro_after_init; - EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size); - --/* Get alignment of the TYPE. */ --#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test) -- - /* - * Enforce that 'MEMBER' is the last field of 'TYPE'. - * -@@ -148,8 +145,8 @@ EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size); - * because that's how C aligns structs. - */ - #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \ -- BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \ -- TYPE_ALIGN(TYPE))) -+ BUILD_BUG_ON(sizeof(TYPE) != \ -+ ALIGN(offsetofend(TYPE, MEMBER), _Alignof(TYPE))) - - /* - * We append the 'struct fpu' to the task_struct: -@@ -229,9 +226,9 @@ static void __init fpu__init_system_ctx_switch(void) - * Called on the boot CPU once per system bootup, to set up the initial - * FPU state that is later cloned into all processes: - */ --void __init fpu__init_system(struct cpuinfo_x86 *c) -+void __init fpu__init_system(void) - { -- fpu__init_system_early_generic(c); -+ fpu__init_system_early_generic(); - - /* - * The FPU has to be operational for some of the -diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c -index 66ed317ebc0d3..bd243ae57680e 100644 ---- a/arch/x86/kernel/fpu/regset.c -+++ b/arch/x86/kernel/fpu/regset.c -@@ -87,11 +87,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, - const void *kbuf, const void __user *ubuf) - { - struct fpu *fpu = &target->thread.fpu; -- struct user32_fxsr_struct newstate; -+ struct fxregs_state newstate; - int ret; - -- BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state)); -- - if (!cpu_feature_enabled(X86_FEATURE_FXSR)) - return -ENODEV; - -@@ -112,9 +110,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, - /* Copy the state */ - memcpy(&fpu->state.fxsave, &newstate, sizeof(newstate)); - -- /* Clear xmm8..15 */ -+ /* Clear xmm8..15 for 32-bit callers */ - BUILD_BUG_ON(sizeof(fpu->state.fxsave.xmm_space) != 16 * 16); -- memset(&fpu->state.fxsave.xmm_space[8], 0, 8 * 16); -+ if (in_ia32_syscall()) -+ memset(&fpu->state.fxsave.xmm_space[8*4], 0, 8 * 16); - - /* Mark FP and SSE as in use when XSAVE is enabled */ - if (use_xsave()) -@@ -164,7 +163,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, - } - - fpu_force_restore(fpu); -- ret = copy_uabi_from_kernel_to_xstate(&fpu->state.xsave, kbuf ?: tmpbuf); -+ ret = copy_uabi_from_kernel_to_xstate(&fpu->state.xsave, kbuf ?: tmpbuf, &target->thread.pkru); - - out: - vfree(tmpbuf); -diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c -index 831b25c5e7058..7f76cb099e66a 100644 ---- a/arch/x86/kernel/fpu/signal.c -+++ b/arch/x86/kernel/fpu/signal.c -@@ -205,7 +205,7 @@ retry: - fpregs_unlock(); - - if (ret) { -- if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size)) -+ if (!fault_in_writeable(buf_fx, fpu_user_xstate_size)) - goto retry; - return -EFAULT; - } -@@ -278,10 +278,9 @@ retry: - if (ret != -EFAULT) - return -EINVAL; - -- ret = fault_in_pages_readable(buf, size); -- if (!ret) -+ if (!fault_in_readable(buf, size)) - goto retry; -- return ret; -+ return -EFAULT; - } - - /* -@@ -371,7 +370,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx, - fpregs_unlock(); - - if (use_xsave() && !fx_only) { -- ret = copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx); -+ ret = copy_sigframe_from_user_to_xstate(tsk, buf_fx); - if (ret) - return ret; - } else { -diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c -index c8def1b7f8fba..81891f0fff6f6 100644 ---- a/arch/x86/kernel/fpu/xstate.c -+++ b/arch/x86/kernel/fpu/xstate.c -@@ -809,6 +809,13 @@ void __init fpu__init_system_xstate(void) - goto out_disable; - } - -+ /* -+ * CPU capabilities initialization runs before FPU init. So -+ * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely -+ * functional, set the feature bit so depending code works. -+ */ -+ setup_force_cpu_cap(X86_FEATURE_OSXSAVE); -+ - print_xstate_offset_size(); - pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n", - xfeatures_mask_all, -@@ -1091,8 +1098,31 @@ static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size, - } - - -+/** -+ * copy_uabi_to_xstate - Copy a UABI format buffer to the kernel xstate -+ * @fpstate: The fpstate buffer to copy to -+ * @kbuf: The UABI format buffer, if it comes from the kernel -+ * @ubuf: The UABI format buffer, if it comes from userspace -+ * @pkru: The location to write the PKRU value to -+ * -+ * Converts from the UABI format into the kernel internal hardware -+ * dependent format. -+ * -+ * This function ultimately has two different callers with distinct PKRU -+ * behavior. -+ * 1. When called from sigreturn the PKRU register will be restored from -+ * @fpstate via an XRSTOR. Correctly copying the UABI format buffer to -+ * @fpstate is sufficient to cover this case, but the caller will also -+ * pass a pointer to the thread_struct's pkru field in @pkru and updating -+ * it is harmless. -+ * 2. When called from ptrace the PKRU register will be restored from the -+ * thread_struct's pkru field. A pointer to that is passed in @pkru. -+ * The kernel will restore it manually, so the XRSTOR behavior that resets -+ * the PKRU register to the hardware init value (0) if the corresponding -+ * xfeatures bit is not set is emulated here. -+ */ - static int copy_uabi_to_xstate(struct xregs_state *xsave, const void *kbuf, -- const void __user *ubuf) -+ const void __user *ubuf, u32 *pkru) - { - unsigned int offset, size; - struct xstate_header hdr; -@@ -1140,6 +1170,14 @@ static int copy_uabi_to_xstate(struct xregs_state *xsave, const void *kbuf, - } - } - -+ if (hdr.xfeatures & XFEATURE_MASK_PKRU) { -+ struct pkru_state *xpkru; -+ -+ xpkru = __raw_xsave_addr(xsave, XFEATURE_PKRU); -+ *pkru = xpkru->pkru; -+ } else -+ *pkru = 0; -+ - /* - * The state that came in from userspace was user-state only. - * Mask all the user states out of 'xfeatures': -@@ -1159,9 +1197,9 @@ static int copy_uabi_to_xstate(struct xregs_state *xsave, const void *kbuf, - * format and copy to the target thread. This is called from - * xstateregs_set(). - */ --int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf) -+int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf, u32 *pkru) - { -- return copy_uabi_to_xstate(xsave, kbuf, NULL); -+ return copy_uabi_to_xstate(xsave, kbuf, NULL, pkru); - } - - /* -@@ -1169,10 +1207,10 @@ int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf) - * XSAVE[S] format and copy to the target thread. This is called from the - * sigreturn() and rt_sigreturn() system calls. - */ --int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave, -+int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, - const void __user *ubuf) - { -- return copy_uabi_to_xstate(xsave, NULL, ubuf); -+ return copy_uabi_to_xstate(&tsk->thread.fpu.state.xsave, NULL, ubuf, &tsk->thread.pkru); - } - - static bool validate_xsaves_xrstors(u64 mask) -diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c -index 1b3ce3b4a2a2f..4017da3a4c701 100644 ---- a/arch/x86/kernel/ftrace.c -+++ b/arch/x86/kernel/ftrace.c -@@ -93,6 +93,7 @@ static int ftrace_verify_code(unsigned long ip, const char *old_code) - - /* Make sure it is what we expect it to be */ - if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) { -+ ftrace_expected = old_code; - WARN_ON(1); - return -EINVAL; - } -@@ -218,7 +219,9 @@ void ftrace_replace_code(int enable) - - ret = ftrace_verify_code(rec->ip, old); - if (ret) { -+ ftrace_expected = old; - ftrace_bug(ret, rec); -+ ftrace_expected = NULL; - return; - } - } -@@ -308,7 +311,7 @@ union ftrace_op_code_union { - } __attribute__((packed)); - }; - --#define RET_SIZE 1 -+#define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS)) - - static unsigned long - create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) -@@ -321,12 +324,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) - unsigned long offset; - unsigned long npages; - unsigned long size; -- unsigned long retq; - unsigned long *ptr; - void *trampoline; - void *ip; - /* 48 8b 15 is movq (%rip), %rdx */ - unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; -+ unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE }; - union ftrace_op_code_union op_ptr; - int ret; - -@@ -366,10 +369,10 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) - ip = trampoline + size; - - /* The trampoline ends with ret(q) */ -- retq = (unsigned long)ftrace_stub; -- ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE); -- if (WARN_ON(ret < 0)) -- goto fail; -+ if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) -+ memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE); -+ else -+ memcpy(ip, retq, sizeof(retq)); - - /* No need to test direct calls on created trampolines */ - if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { -diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S -index e405fe1a8bf41..a0ed0e4a2c0cd 100644 ---- a/arch/x86/kernel/ftrace_32.S -+++ b/arch/x86/kernel/ftrace_32.S -@@ -19,7 +19,7 @@ - #endif - - SYM_FUNC_START(__fentry__) -- ret -+ RET - SYM_FUNC_END(__fentry__) - EXPORT_SYMBOL(__fentry__) - -@@ -84,7 +84,7 @@ ftrace_graph_call: - - /* This is weak to keep gas from relaxing the jumps */ - SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) -- ret -+ RET - SYM_CODE_END(ftrace_caller) - - SYM_CODE_START(ftrace_regs_caller) -@@ -177,7 +177,7 @@ SYM_CODE_START(ftrace_graph_caller) - popl %edx - popl %ecx - popl %eax -- ret -+ RET - SYM_CODE_END(ftrace_graph_caller) - - .globl return_to_handler -diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S -index 7c273846c6877..6cc14a835991d 100644 ---- a/arch/x86/kernel/ftrace_64.S -+++ b/arch/x86/kernel/ftrace_64.S -@@ -132,7 +132,7 @@ - #ifdef CONFIG_DYNAMIC_FTRACE - - SYM_FUNC_START(__fentry__) -- retq -+ RET - SYM_FUNC_END(__fentry__) - EXPORT_SYMBOL(__fentry__) - -@@ -181,11 +181,10 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) - - /* - * This is weak to keep gas from relaxing the jumps. -- * It is also used to copy the retq for trampolines. - */ - SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) - UNWIND_HINT_FUNC -- retq -+ RET - SYM_FUNC_END(ftrace_epilogue) - - SYM_FUNC_START(ftrace_regs_caller) -@@ -299,7 +298,7 @@ fgraph_trace: - #endif - - SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL) -- retq -+ RET - - trace: - /* save_mcount_regs fills in first two parameters */ -@@ -331,11 +330,11 @@ SYM_FUNC_START(ftrace_graph_caller) - - restore_mcount_regs - -- retq -+ RET - SYM_FUNC_END(ftrace_graph_caller) - - SYM_FUNC_START(return_to_handler) -- subq $24, %rsp -+ subq $16, %rsp - - /* Save the return values */ - movq %rax, (%rsp) -@@ -347,7 +346,19 @@ SYM_FUNC_START(return_to_handler) - movq %rax, %rdi - movq 8(%rsp), %rdx - movq (%rsp), %rax -- addq $24, %rsp -- JMP_NOSPEC rdi -+ -+ addq $16, %rsp -+ /* -+ * Jump back to the old return address. This cannot be JMP_NOSPEC rdi -+ * since IBT would demand that contain ENDBR, which simply isn't so for -+ * return addresses. Use a retpoline here to keep the RSB balanced. -+ */ -+ ANNOTATE_INTRA_FUNCTION_CALL -+ call .Ldo_rop -+ int3 -+.Ldo_rop: -+ mov %rdi, (%rsp) -+ UNWIND_HINT_FUNC -+ RET - SYM_FUNC_END(return_to_handler) - #endif -diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c -index de01903c37355..5036104d54707 100644 ---- a/arch/x86/kernel/head64.c -+++ b/arch/x86/kernel/head64.c -@@ -418,6 +418,8 @@ static void __init clear_bss(void) - { - memset(__bss_start, 0, - (unsigned long) __bss_stop - (unsigned long) __bss_start); -+ memset(__brk_base, 0, -+ (unsigned long) __brk_limit - (unsigned long) __brk_base); - } - - static unsigned long get_cmd_line_ptr(void) -diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S -index d8c64dab0efe0..9b7acc9c7874c 100644 ---- a/arch/x86/kernel/head_32.S -+++ b/arch/x86/kernel/head_32.S -@@ -23,6 +23,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -340,7 +341,7 @@ SYM_FUNC_END(startup_32_smp) - __INIT - setup_once: - andl $0,setup_once_ref /* Once is enough, thanks */ -- ret -+ RET - - SYM_FUNC_START(early_idt_handler_array) - # 36(%esp) %eflags -diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S -index d8b3ebd2bb85f..81f1ae278718e 100644 ---- a/arch/x86/kernel/head_64.S -+++ b/arch/x86/kernel/head_64.S -@@ -312,6 +312,8 @@ SYM_CODE_END(start_cpu0) - SYM_CODE_START_NOALIGN(vc_boot_ghcb) - UNWIND_HINT_IRET_REGS offset=8 - -+ ANNOTATE_UNRET_END -+ - /* Build pt_regs */ - PUSH_AND_CLEAR_REGS - -@@ -369,6 +371,7 @@ SYM_CODE_START(early_idt_handler_array) - SYM_CODE_END(early_idt_handler_array) - - SYM_CODE_START_LOCAL(early_idt_handler_common) -+ ANNOTATE_UNRET_END - /* - * The stack is the hardware frame, an error code or zero, and the - * vector number. -@@ -415,6 +418,8 @@ SYM_CODE_END(early_idt_handler_common) - SYM_CODE_START_NOALIGN(vc_no_ghcb) - UNWIND_HINT_IRET_REGS offset=8 - -+ ANNOTATE_UNRET_END -+ - /* Build pt_regs */ - PUSH_AND_CLEAR_REGS - -diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c -index 882213df37130..71f336425e58a 100644 ---- a/arch/x86/kernel/hpet.c -+++ b/arch/x86/kernel/hpet.c -@@ -1435,8 +1435,12 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) - hpet_rtc_timer_reinit(); - memset(&curr_time, 0, sizeof(struct rtc_time)); - -- if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) -- mc146818_get_time(&curr_time); -+ if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) { -+ if (unlikely(mc146818_get_time(&curr_time) < 0)) { -+ pr_err_ratelimited("unable to read current time from RTC\n"); -+ return IRQ_HANDLED; -+ } -+ } - - if (hpet_rtc_flags & RTC_UIE && - curr_time.tm_sec != hpet_prev_update_sec) { -diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c -index 15aefa3f3e18e..f91e5e31aa4f0 100644 ---- a/arch/x86/kernel/i8259.c -+++ b/arch/x86/kernel/i8259.c -@@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq) - disable_irq_nosync(irq); - io_apic_irqs &= ~(1<init(0); - -- for (i = 0; i < nr_legacy_irqs(); i++) -+ for (i = 0; i < nr_legacy_irqs(); i++) { - irq_set_chip_and_handler(i, chip, handle_level_irq); -+ irq_set_status_flags(i, IRQ_LEVEL); -+ } - } - - void __init init_IRQ(void) -diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c -index 64b6da95af984..e2e89bebcbc32 100644 ---- a/arch/x86/kernel/kdebugfs.c -+++ b/arch/x86/kernel/kdebugfs.c -@@ -88,11 +88,13 @@ create_setup_data_node(struct dentry *parent, int no, - - static int __init create_setup_data_nodes(struct dentry *parent) - { -+ struct setup_indirect *indirect; - struct setup_data_node *node; - struct setup_data *data; -- int error; -+ u64 pa_data, pa_next; - struct dentry *d; -- u64 pa_data; -+ int error; -+ u32 len; - int no = 0; - - d = debugfs_create_dir("setup_data", parent); -@@ -112,12 +114,29 @@ static int __init create_setup_data_nodes(struct dentry *parent) - error = -ENOMEM; - goto err_dir; - } -- -- if (data->type == SETUP_INDIRECT && -- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { -- node->paddr = ((struct setup_indirect *)data->data)->addr; -- node->type = ((struct setup_indirect *)data->data)->type; -- node->len = ((struct setup_indirect *)data->data)->len; -+ pa_next = data->next; -+ -+ if (data->type == SETUP_INDIRECT) { -+ len = sizeof(*data) + data->len; -+ memunmap(data); -+ data = memremap(pa_data, len, MEMREMAP_WB); -+ if (!data) { -+ kfree(node); -+ error = -ENOMEM; -+ goto err_dir; -+ } -+ -+ indirect = (struct setup_indirect *)data->data; -+ -+ if (indirect->type != SETUP_INDIRECT) { -+ node->paddr = indirect->addr; -+ node->type = indirect->type; -+ node->len = indirect->len; -+ } else { -+ node->paddr = pa_data; -+ node->type = data->type; -+ node->len = data->len; -+ } - } else { - node->paddr = pa_data; - node->type = data->type; -@@ -125,7 +144,7 @@ static int __init create_setup_data_nodes(struct dentry *parent) - } - - create_setup_data_node(d, no, node); -- pa_data = data->next; -+ pa_data = pa_next; - - memunmap(data); - no++; -diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c -index b6e046e4b2895..c4b618d0b16a0 100644 ---- a/arch/x86/kernel/kprobes/core.c -+++ b/arch/x86/kernel/kprobes/core.c -@@ -37,6 +37,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -289,12 +290,15 @@ static int can_probe(unsigned long paddr) - if (ret < 0) - return 0; - -+#ifdef CONFIG_KGDB - /* -- * Another debugging subsystem might insert this breakpoint. -- * In that case, we can't recover it. -+ * If there is a dynamically installed kgdb sw breakpoint, -+ * this function should not be probed. - */ -- if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) -+ if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && -+ kgdb_has_hit_break(addr)) - return 0; -+#endif - addr += insn.length; - } - -@@ -495,7 +499,7 @@ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs) - match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^ - ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT); - if (p->ainsn.jcc.type >= 0xe) -- match = match && (regs->flags & X86_EFLAGS_ZF); -+ match = match || (regs->flags & X86_EFLAGS_ZF); - } - __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert)); - } -@@ -816,16 +820,20 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe); - static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) - { -- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { -- kcb->kprobe_status = KPROBE_HIT_SSDONE; -- cur->post_handler(cur, regs, 0); -- } -- - /* Restore back the original saved kprobes variables and continue. */ -- if (kcb->kprobe_status == KPROBE_REENTER) -+ if (kcb->kprobe_status == KPROBE_REENTER) { -+ /* This will restore both kcb and current_kprobe */ - restore_previous_kprobe(kcb); -- else -+ } else { -+ /* -+ * Always update the kcb status because -+ * reset_curent_kprobe() doesn't update kcb. -+ */ -+ kcb->kprobe_status = KPROBE_HIT_SSDONE; -+ if (cur->post_handler) -+ cur->post_handler(cur, regs, 0); - reset_current_kprobe(); -+ } - } - NOKPROBE_SYMBOL(kprobe_post_process); - -@@ -1044,7 +1052,7 @@ asm( - RESTORE_REGS_STRING - " popfl\n" - #endif -- " ret\n" -+ ASM_RET - ".size kretprobe_trampoline, .-kretprobe_trampoline\n" - ); - NOKPROBE_SYMBOL(kretprobe_trampoline); -diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c -index 71425ebba98a1..98d0e2012e1f3 100644 ---- a/arch/x86/kernel/kprobes/opt.c -+++ b/arch/x86/kernel/kprobes/opt.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -45,8 +46,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) - /* This function only handles jump-optimized kprobe */ - if (kp && kprobe_optimized(kp)) { - op = container_of(kp, struct optimized_kprobe, kp); -- /* If op->list is not empty, op is under optimizing */ -- if (list_empty(&op->list)) -+ /* If op is optimized or under unoptimizing */ -+ if (list_empty(&op->list) || optprobe_queued_unopt(op)) - goto found; - } - } -@@ -272,19 +273,6 @@ static int insn_is_indirect_jump(struct insn *insn) - return ret; - } - --static bool is_padding_int3(unsigned long addr, unsigned long eaddr) --{ -- unsigned char ops; -- -- for (; addr < eaddr; addr++) { -- if (get_kernel_nofault(ops, (void *)addr) < 0 || -- ops != INT3_INSN_OPCODE) -- return false; -- } -- -- return true; --} -- - /* Decode whole function to ensure any instructions don't jump into target */ - static int can_optimize(unsigned long paddr) - { -@@ -327,15 +315,15 @@ static int can_optimize(unsigned long paddr) - ret = insn_decode_kernel(&insn, (void *)recovered_insn); - if (ret < 0) - return 0; -- -+#ifdef CONFIG_KGDB - /* -- * In the case of detecting unknown breakpoint, this could be -- * a padding INT3 between functions. Let's check that all the -- * rest of the bytes are also INT3. -+ * If there is a dynamically installed kgdb sw breakpoint, -+ * this function should not be probed. - */ -- if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) -- return is_padding_int3(addr, paddr - offset + size) ? 1 : 0; -- -+ if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && -+ kgdb_has_hit_break(addr)) -+ return 0; -+#endif - /* Recover address */ - insn.kaddr = (void *)addr; - insn.next_byte = (void *)(addr + insn.length); -@@ -358,7 +346,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) - - for (i = 1; i < op->optinsn.size; i++) { - p = get_kprobe(op->kp.addr + i); -- if (p && !kprobe_disabled(p)) -+ if (p && !kprobe_disarmed(p)) - return -EEXIST; - } - -diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c -index d0a19121c6a4f..257892fcefa79 100644 ---- a/arch/x86/kernel/ksysfs.c -+++ b/arch/x86/kernel/ksysfs.c -@@ -91,26 +91,41 @@ static int get_setup_data_paddr(int nr, u64 *paddr) - - static int __init get_setup_data_size(int nr, size_t *size) - { -- int i = 0; -+ u64 pa_data = boot_params.hdr.setup_data, pa_next; -+ struct setup_indirect *indirect; - struct setup_data *data; -- u64 pa_data = boot_params.hdr.setup_data; -+ int i = 0; -+ u32 len; - - while (pa_data) { - data = memremap(pa_data, sizeof(*data), MEMREMAP_WB); - if (!data) - return -ENOMEM; -+ pa_next = data->next; -+ - if (nr == i) { -- if (data->type == SETUP_INDIRECT && -- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) -- *size = ((struct setup_indirect *)data->data)->len; -- else -+ if (data->type == SETUP_INDIRECT) { -+ len = sizeof(*data) + data->len; -+ memunmap(data); -+ data = memremap(pa_data, len, MEMREMAP_WB); -+ if (!data) -+ return -ENOMEM; -+ -+ indirect = (struct setup_indirect *)data->data; -+ -+ if (indirect->type != SETUP_INDIRECT) -+ *size = indirect->len; -+ else -+ *size = data->len; -+ } else { - *size = data->len; -+ } - - memunmap(data); - return 0; - } - -- pa_data = data->next; -+ pa_data = pa_next; - memunmap(data); - i++; - } -@@ -120,9 +135,11 @@ static int __init get_setup_data_size(int nr, size_t *size) - static ssize_t type_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) - { -+ struct setup_indirect *indirect; -+ struct setup_data *data; - int nr, ret; - u64 paddr; -- struct setup_data *data; -+ u32 len; - - ret = kobj_to_setup_data_nr(kobj, &nr); - if (ret) -@@ -135,10 +152,20 @@ static ssize_t type_show(struct kobject *kobj, - if (!data) - return -ENOMEM; - -- if (data->type == SETUP_INDIRECT) -- ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type); -- else -+ if (data->type == SETUP_INDIRECT) { -+ len = sizeof(*data) + data->len; -+ memunmap(data); -+ data = memremap(paddr, len, MEMREMAP_WB); -+ if (!data) -+ return -ENOMEM; -+ -+ indirect = (struct setup_indirect *)data->data; -+ -+ ret = sprintf(buf, "0x%x\n", indirect->type); -+ } else { - ret = sprintf(buf, "0x%x\n", data->type); -+ } -+ - memunmap(data); - return ret; - } -@@ -149,9 +176,10 @@ static ssize_t setup_data_data_read(struct file *fp, - char *buf, - loff_t off, size_t count) - { -+ struct setup_indirect *indirect; -+ struct setup_data *data; - int nr, ret = 0; - u64 paddr, len; -- struct setup_data *data; - void *p; - - ret = kobj_to_setup_data_nr(kobj, &nr); -@@ -165,10 +193,27 @@ static ssize_t setup_data_data_read(struct file *fp, - if (!data) - return -ENOMEM; - -- if (data->type == SETUP_INDIRECT && -- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { -- paddr = ((struct setup_indirect *)data->data)->addr; -- len = ((struct setup_indirect *)data->data)->len; -+ if (data->type == SETUP_INDIRECT) { -+ len = sizeof(*data) + data->len; -+ memunmap(data); -+ data = memremap(paddr, len, MEMREMAP_WB); -+ if (!data) -+ return -ENOMEM; -+ -+ indirect = (struct setup_indirect *)data->data; -+ -+ if (indirect->type != SETUP_INDIRECT) { -+ paddr = indirect->addr; -+ len = indirect->len; -+ } else { -+ /* -+ * Even though this is technically undefined, return -+ * the data as though it is a normal setup_data struct. -+ * This will at least allow it to be inspected. -+ */ -+ paddr += sizeof(*data); -+ len = data->len; -+ } - } else { - paddr += sizeof(*data); - len = data->len; -diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c -index b656456c3a944..eba6485a59a39 100644 ---- a/arch/x86/kernel/kvm.c -+++ b/arch/x86/kernel/kvm.c -@@ -66,6 +66,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align - DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible; - static int has_steal_clock = 0; - -+static int has_guest_poll = 0; - /* - * No need for any "IO delay" on KVM - */ -@@ -187,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token) - { - u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); - struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; -- struct kvm_task_sleep_node *n; -+ struct kvm_task_sleep_node *n, *dummy = NULL; - - if (token == ~0) { - apf_task_wake_all(); -@@ -199,28 +200,41 @@ again: - n = _find_apf_task(b, token); - if (!n) { - /* -- * async PF was not yet handled. -- * Add dummy entry for the token. -+ * Async #PF not yet handled, add a dummy entry for the token. -+ * Allocating the token must be down outside of the raw lock -+ * as the allocator is preemptible on PREEMPT_RT kernels. - */ -- n = kzalloc(sizeof(*n), GFP_ATOMIC); -- if (!n) { -+ if (!dummy) { -+ raw_spin_unlock(&b->lock); -+ dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC); -+ - /* -- * Allocation failed! Busy wait while other cpu -- * handles async PF. -+ * Continue looping on allocation failure, eventually -+ * the async #PF will be handled and allocating a new -+ * node will be unnecessary. -+ */ -+ if (!dummy) -+ cpu_relax(); -+ -+ /* -+ * Recheck for async #PF completion before enqueueing -+ * the dummy token to avoid duplicate list entries. - */ -- raw_spin_unlock(&b->lock); -- cpu_relax(); - goto again; - } -- n->token = token; -- n->cpu = smp_processor_id(); -- init_swait_queue_head(&n->wq); -- hlist_add_head(&n->link, &b->list); -+ dummy->token = token; -+ dummy->cpu = smp_processor_id(); -+ init_swait_queue_head(&dummy->wq); -+ hlist_add_head(&dummy->link, &b->list); -+ dummy = NULL; - } else { - apf_task_wake_one(n); - } - raw_spin_unlock(&b->lock); -- return; -+ -+ /* A dummy token might be allocated and ultimately not used. */ -+ if (dummy) -+ kfree(dummy); - } - EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); - -@@ -457,19 +471,22 @@ static bool pv_tlb_flush_supported(void) - { - return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && - !kvm_para_has_hint(KVM_HINTS_REALTIME) && -- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); -+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && -+ (num_possible_cpus() != 1)); - } - - static bool pv_ipi_supported(void) - { -- return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI); -+ return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) && -+ (num_possible_cpus() != 1)); - } - - static bool pv_sched_yield_supported(void) - { - return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && - !kvm_para_has_hint(KVM_HINTS_REALTIME) && -- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); -+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && -+ (num_possible_cpus() != 1)); - } - - #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) -@@ -507,7 +524,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) - } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { - ipi_bitmap <<= min - apic_id; - min = apic_id; -- } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { -+ } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) { - max = apic_id < max ? max : apic_id; - } else { - ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, -@@ -647,14 +664,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu) - - static int kvm_suspend(void) - { -+ u64 val = 0; -+ - kvm_guest_cpu_offline(false); - -+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL -+ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) -+ rdmsrl(MSR_KVM_POLL_CONTROL, val); -+ has_guest_poll = !(val & 1); -+#endif - return 0; - } - - static void kvm_resume(void) - { - kvm_cpu_online(raw_smp_processor_id()); -+ -+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL -+ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll) -+ wrmsrl(MSR_KVM_POLL_CONTROL, 0); -+#endif - } - - static struct syscore_ops kvm_syscore_ops = { -@@ -919,7 +948,7 @@ asm( - "movq __per_cpu_offset(,%rdi,8), %rax;" - "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" - "setne %al;" --"ret;" -+ASM_RET - ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;" - ".popsection"); - -diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c -index 131f30fdcfbdc..dc8b17568784f 100644 ---- a/arch/x86/kernel/machine_kexec_64.c -+++ b/arch/x86/kernel/machine_kexec_64.c -@@ -373,9 +373,6 @@ void machine_kexec(struct kimage *image) - #ifdef CONFIG_KEXEC_FILE - void *arch_kexec_kernel_image_load(struct kimage *image) - { -- vfree(image->elf_headers); -- image->elf_headers = NULL; -- - if (!image->fops || !image->fops->load) - return ERR_PTR(-ENOEXEC); - -@@ -511,6 +508,15 @@ overflow: - (int)ELF64_R_TYPE(rel[i].r_info), value); - return -ENOEXEC; - } -+ -+int arch_kimage_file_post_load_cleanup(struct kimage *image) -+{ -+ vfree(image->elf_headers); -+ image->elf_headers = NULL; -+ image->elf_headers_sz = 0; -+ -+ return kexec_image_post_load_cleanup_default(image); -+} - #endif /* CONFIG_KEXEC_FILE */ - - static int -diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c -index 5e9a34b5bd741..06b53ea940bf6 100644 ---- a/arch/x86/kernel/module.c -+++ b/arch/x86/kernel/module.c -@@ -67,6 +67,7 @@ static unsigned long int get_module_load_offset(void) - - void *module_alloc(unsigned long size) - { -+ gfp_t gfp_mask = GFP_KERNEL; - void *p; - - if (PAGE_ALIGN(size) > MODULES_LEN) -@@ -74,10 +75,10 @@ void *module_alloc(unsigned long size) - - p = __vmalloc_node_range(size, MODULE_ALIGN, - MODULES_VADDR + get_module_load_offset(), -- MODULES_END, GFP_KERNEL, -- PAGE_KERNEL, 0, NUMA_NO_NODE, -+ MODULES_END, gfp_mask, -+ PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE, - __builtin_return_address(0)); -- if (p && (kasan_module_alloc(p, size) < 0)) { -+ if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) { - vfree(p); - return NULL; - } -@@ -251,7 +252,8 @@ int module_finalize(const Elf_Ehdr *hdr, - struct module *me) - { - const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, -- *para = NULL, *orc = NULL, *orc_ip = NULL; -+ *para = NULL, *orc = NULL, *orc_ip = NULL, -+ *retpolines = NULL, *returns = NULL; - char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - - for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { -@@ -267,8 +269,28 @@ int module_finalize(const Elf_Ehdr *hdr, - orc = s; - if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name)) - orc_ip = s; -+ if (!strcmp(".retpoline_sites", secstrings + s->sh_name)) -+ retpolines = s; -+ if (!strcmp(".return_sites", secstrings + s->sh_name)) -+ returns = s; - } - -+ /* -+ * See alternative_instructions() for the ordering rules between the -+ * various patching types. -+ */ -+ if (para) { -+ void *pseg = (void *)para->sh_addr; -+ apply_paravirt(pseg, pseg + para->sh_size); -+ } -+ if (retpolines) { -+ void *rseg = (void *)retpolines->sh_addr; -+ apply_retpolines(rseg, rseg + retpolines->sh_size); -+ } -+ if (returns) { -+ void *rseg = (void *)returns->sh_addr; -+ apply_returns(rseg, rseg + returns->sh_size); -+ } - if (alt) { - /* patch .altinstructions */ - void *aseg = (void *)alt->sh_addr; -@@ -282,11 +304,6 @@ int module_finalize(const Elf_Ehdr *hdr, - tseg, tseg + text->sh_size); - } - -- if (para) { -- void *pseg = (void *)para->sh_addr; -- apply_paravirt(pseg, pseg + para->sh_size); -- } -- - /* make jump label nops */ - jump_label_apply_nops(me); - -diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c -index 04cafc057bed4..f1cdb8891ad41 100644 ---- a/arch/x86/kernel/paravirt.c -+++ b/arch/x86/kernel/paravirt.c -@@ -41,7 +41,7 @@ extern void _paravirt_nop(void); - asm (".pushsection .entry.text, \"ax\"\n" - ".global _paravirt_nop\n" - "_paravirt_nop:\n\t" -- "ret\n\t" -+ ASM_RET - ".size _paravirt_nop, . - _paravirt_nop\n\t" - ".type _paravirt_nop, @function\n\t" - ".popsection"); -diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c -index 6b07faaa15798..23154d24b1173 100644 ---- a/arch/x86/kernel/pmem.c -+++ b/arch/x86/kernel/pmem.c -@@ -27,6 +27,11 @@ static __init int register_e820_pmem(void) - * simply here to trigger the module to load on demand. - */ - pdev = platform_device_alloc("e820_pmem", -1); -- return platform_device_add(pdev); -+ -+ rc = platform_device_add(pdev); -+ if (rc) -+ platform_device_put(pdev); -+ -+ return rc; - } - device_initcall(register_e820_pmem); -diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c -index 1d9463e3096b6..e6b28c689e9a9 100644 ---- a/arch/x86/kernel/process.c -+++ b/arch/x86/kernel/process.c -@@ -132,6 +132,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, - frame->ret_addr = (unsigned long) ret_from_fork; - p->thread.sp = (unsigned long) fork_frame; - p->thread.io_bitmap = NULL; -+ p->thread.iopl_warn = 0; - memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); - - #ifdef CONFIG_X86_64 -@@ -583,7 +584,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, - } - - if (updmsr) -- wrmsrl(MSR_IA32_SPEC_CTRL, msr); -+ update_spec_ctrl_cond(msr); - } - - static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) -@@ -730,7 +731,7 @@ bool xen_set_default_idle(void) - } - #endif - --void stop_this_cpu(void *dummy) -+void __noreturn stop_this_cpu(void *dummy) - { - local_irq_disable(); - /* -@@ -804,6 +805,10 @@ static void amd_e400_idle(void) - */ - static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) - { -+ /* User has disallowed the use of MWAIT. Fallback to HALT */ -+ if (boot_option_idle_override == IDLE_NOMWAIT) -+ return 0; -+ - if (c->x86_vendor != X86_VENDOR_INTEL) - return 0; - -@@ -912,9 +917,8 @@ static int __init idle_setup(char *str) - } else if (!strcmp(str, "nomwait")) { - /* - * If the boot option of "idle=nomwait" is added, -- * it means that mwait will be disabled for CPU C2/C3 -- * states. In such case it won't touch the variable -- * of boot_option_idle_override. -+ * it means that mwait will be disabled for CPU C1/C2/C3 -+ * states. - */ - boot_option_idle_override = IDLE_NOMWAIT; - } else -diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c -index 4f2f54e1281c3..d4a130337e931 100644 ---- a/arch/x86/kernel/process_32.c -+++ b/arch/x86/kernel/process_32.c -@@ -216,7 +216,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - switch_fpu_finish(next_fpu); - - /* Load the Intel cache allocation PQR MSR. */ -- resctrl_sched_in(); -+ resctrl_sched_in(next_p); - - return prev_p; - } -diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c -index ec0d836a13b12..b8fe38cd121df 100644 ---- a/arch/x86/kernel/process_64.c -+++ b/arch/x86/kernel/process_64.c -@@ -656,7 +656,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - } - - /* Load the Intel cache allocation PQR MSR. */ -- resctrl_sched_in(); -+ resctrl_sched_in(next_p); - - return prev_p; - } -diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c -index 4c208ea3bd9f3..033d9c6a94689 100644 ---- a/arch/x86/kernel/ptrace.c -+++ b/arch/x86/kernel/ptrace.c -@@ -1224,7 +1224,7 @@ static struct user_regset x86_64_regsets[] __ro_after_init = { - }, - [REGSET_FP] = { - .core_note_type = NT_PRFPREG, -- .n = sizeof(struct user_i387_struct) / sizeof(long), -+ .n = sizeof(struct fxregs_state) / sizeof(long), - .size = sizeof(long), .align = sizeof(long), - .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set - }, -@@ -1271,7 +1271,7 @@ static struct user_regset x86_32_regsets[] __ro_after_init = { - }, - [REGSET_XFP] = { - .core_note_type = NT_PRXFPREG, -- .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), -+ .n = sizeof(struct fxregs_state) / sizeof(u32), - .size = sizeof(u32), .align = sizeof(u32), - .active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set - }, -diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c -index 0a40df66a40de..deedd77c7593f 100644 ---- a/arch/x86/kernel/reboot.c -+++ b/arch/x86/kernel/reboot.c -@@ -113,17 +113,9 @@ void __noreturn machine_real_restart(unsigned int type) - spin_unlock(&rtc_lock); - - /* -- * Switch back to the initial page table. -+ * Switch to the trampoline page table. - */ --#ifdef CONFIG_X86_32 -- load_cr3(initial_page_table); --#else -- write_cr3(real_mode_header->trampoline_pgd); -- -- /* Exiting long mode will fail if CR4.PCIDE is set. */ -- if (boot_cpu_has(X86_FEATURE_PCID)) -- cr4_clear_bits(X86_CR4_PCIDE); --#endif -+ load_trampoline_pgtable(); - - /* Jump to the identity-mapped low memory code */ - #ifdef CONFIG_X86_32 -@@ -536,33 +528,29 @@ static inline void kb_wait(void) - } - } - --static void vmxoff_nmi(int cpu, struct pt_regs *regs) --{ -- cpu_emergency_vmxoff(); --} -+static inline void nmi_shootdown_cpus_on_restart(void); - --/* Use NMIs as IPIs to tell all CPUs to disable virtualization */ --static void emergency_vmx_disable_all(void) -+static void emergency_reboot_disable_virtualization(void) - { - /* Just make sure we won't change CPUs while doing this */ - local_irq_disable(); - - /* -- * Disable VMX on all CPUs before rebooting, otherwise we risk hanging -- * the machine, because the CPU blocks INIT when it's in VMX root. -+ * Disable virtualization on all CPUs before rebooting to avoid hanging -+ * the system, as VMX and SVM block INIT when running in the host. - * - * We can't take any locks and we may be on an inconsistent state, so -- * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. -+ * use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt. - * -- * Do the NMI shootdown even if VMX if off on _this_ CPU, as that -- * doesn't prevent a different CPU from being in VMX root operation. -+ * Do the NMI shootdown even if virtualization is off on _this_ CPU, as -+ * other CPUs may have virtualization enabled. - */ -- if (cpu_has_vmx()) { -- /* Safely force _this_ CPU out of VMX root operation. */ -- __cpu_emergency_vmxoff(); -+ if (cpu_has_vmx() || cpu_has_svm(NULL)) { -+ /* Safely force _this_ CPU out of VMX/SVM operation. */ -+ cpu_emergency_disable_virtualization(); - -- /* Halt and exit VMX root operation on the other CPUs. */ -- nmi_shootdown_cpus(vmxoff_nmi); -+ /* Disable VMX/SVM and halt on other CPUs. */ -+ nmi_shootdown_cpus_on_restart(); - } - } - -@@ -598,7 +586,7 @@ static void native_machine_emergency_restart(void) - unsigned short mode; - - if (reboot_emergency) -- emergency_vmx_disable_all(); -+ emergency_reboot_disable_virtualization(); - - tboot_shutdown(TB_SHUTDOWN_REBOOT); - -@@ -803,6 +791,17 @@ void machine_crash_shutdown(struct pt_regs *regs) - /* This is the CPU performing the emergency shutdown work. */ - int crashing_cpu = -1; - -+/* -+ * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during -+ * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if -+ * GIF=0, i.e. if the crash occurred between CLGI and STGI. -+ */ -+void cpu_emergency_disable_virtualization(void) -+{ -+ cpu_emergency_vmxoff(); -+ cpu_emergency_svm_disable(); -+} -+ - #if defined(CONFIG_SMP) - - static nmi_shootdown_cb shootdown_callback; -@@ -825,7 +824,14 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) - return NMI_HANDLED; - local_irq_disable(); - -- shootdown_callback(cpu, regs); -+ if (shootdown_callback) -+ shootdown_callback(cpu, regs); -+ -+ /* -+ * Prepare the CPU for reboot _after_ invoking the callback so that the -+ * callback can safely use virtualization instructions, e.g. VMCLEAR. -+ */ -+ cpu_emergency_disable_virtualization(); - - atomic_dec(&waiting_for_crash_ipi); - /* Assume hlt works */ -@@ -836,18 +842,32 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) - return NMI_HANDLED; - } - --/* -- * Halt all other CPUs, calling the specified function on each of them -+/** -+ * nmi_shootdown_cpus - Stop other CPUs via NMI -+ * @callback: Optional callback to be invoked from the NMI handler -+ * -+ * The NMI handler on the remote CPUs invokes @callback, if not -+ * NULL, first and then disables virtualization to ensure that -+ * INIT is recognized during reboot. - * -- * This function can be used to halt all other CPUs on crash -- * or emergency reboot time. The function passed as parameter -- * will be called inside a NMI handler on all CPUs. -+ * nmi_shootdown_cpus() can only be invoked once. After the first -+ * invocation all other CPUs are stuck in crash_nmi_callback() and -+ * cannot respond to a second NMI. - */ - void nmi_shootdown_cpus(nmi_shootdown_cb callback) - { - unsigned long msecs; -+ - local_irq_disable(); - -+ /* -+ * Avoid certain doom if a shootdown already occurred; re-registering -+ * the NMI handler will cause list corruption, modifying the callback -+ * will do who knows what, etc... -+ */ -+ if (WARN_ON_ONCE(crash_ipi_issued)) -+ return; -+ - /* Make a note of crashing cpu. Will be used in NMI callback. */ - crashing_cpu = safe_smp_processor_id(); - -@@ -875,7 +895,17 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) - msecs--; - } - -- /* Leave the nmi callback set */ -+ /* -+ * Leave the nmi callback set, shootdown is a one-time thing. Clearing -+ * the callback could result in a NULL pointer dereference if a CPU -+ * (finally) responds after the timeout expires. -+ */ -+} -+ -+static inline void nmi_shootdown_cpus_on_restart(void) -+{ -+ if (!crash_ipi_issued) -+ nmi_shootdown_cpus(NULL); - } - - /* -@@ -905,6 +935,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) - /* No other CPUs to shoot down */ - } - -+static inline void nmi_shootdown_cpus_on_restart(void) { } -+ - void run_crash_ipi_callback(struct pt_regs *regs) - { - } -diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S -index f469153eca8ab..c7c4b1917336d 100644 ---- a/arch/x86/kernel/relocate_kernel_32.S -+++ b/arch/x86/kernel/relocate_kernel_32.S -@@ -7,10 +7,12 @@ - #include - #include - #include -+#include - #include - - /* -- * Must be relocatable PIC code callable as a C function -+ * Must be relocatable PIC code callable as a C function, in particular -+ * there must be a plain RET and not jump to return thunk. - */ - - #define PTR(x) (x << 2) -@@ -91,7 +93,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel) - movl %edi, %eax - addl $(identity_mapped - relocate_kernel), %eax - pushl %eax -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - SYM_CODE_END(relocate_kernel) - - SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) -@@ -159,12 +163,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) - xorl %edx, %edx - xorl %esi, %esi - xorl %ebp, %ebp -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - 1: - popl %edx - movl CP_PA_SWAP_PAGE(%edi), %esp - addl $PAGE_SIZE, %esp - 2: -+ ANNOTATE_RETPOLINE_SAFE - call *%edx - - /* get the re-entry point of the peer system */ -@@ -190,7 +197,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) - movl %edi, %eax - addl $(virtual_mapped - relocate_kernel), %eax - pushl %eax -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - SYM_CODE_END(identity_mapped) - - SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) -@@ -208,7 +217,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) - popl %edi - popl %esi - popl %ebx -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - SYM_CODE_END(virtual_mapped) - - /* Do the copies */ -@@ -271,7 +282,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) - popl %edi - popl %ebx - popl %ebp -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - SYM_CODE_END(swap_pages) - - .globl kexec_control_code_size -diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S -index c53271aebb64d..8a9cea950e398 100644 ---- a/arch/x86/kernel/relocate_kernel_64.S -+++ b/arch/x86/kernel/relocate_kernel_64.S -@@ -13,7 +13,8 @@ - #include - - /* -- * Must be relocatable PIC code callable as a C function -+ * Must be relocatable PIC code callable as a C function, in particular -+ * there must be a plain RET and not jump to return thunk. - */ - - #define PTR(x) (x << 3) -@@ -104,7 +105,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel) - /* jump to identity mapped page */ - addq $(identity_mapped - relocate_kernel), %r8 - pushq %r8 -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - SYM_CODE_END(relocate_kernel) - - SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) -@@ -191,7 +194,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) - xorl %r14d, %r14d - xorl %r15d, %r15d - -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - - 1: - popq %rdx -@@ -210,7 +215,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) - call swap_pages - movq $virtual_mapped, %rax - pushq %rax -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - SYM_CODE_END(identity_mapped) - - SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) -@@ -231,7 +238,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) - popq %r12 - popq %rbp - popq %rbx -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - SYM_CODE_END(virtual_mapped) - - /* Do the copies */ -@@ -288,7 +297,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) - lea PAGE_SIZE(%rax), %rsi - jmp 0b - 3: -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - SYM_CODE_END(swap_pages) - - .globl kexec_control_code_size -diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c -index 40ed44ead0631..8e56c4de00b9c 100644 ---- a/arch/x86/kernel/setup.c -+++ b/arch/x86/kernel/setup.c -@@ -368,21 +368,41 @@ static void __init parse_setup_data(void) - - static void __init memblock_x86_reserve_range_setup_data(void) - { -+ struct setup_indirect *indirect; - struct setup_data *data; -- u64 pa_data; -+ u64 pa_data, pa_next; -+ u32 len; - - pa_data = boot_params.hdr.setup_data; - while (pa_data) { - data = early_memremap(pa_data, sizeof(*data)); -+ if (!data) { -+ pr_warn("setup: failed to memremap setup_data entry\n"); -+ return; -+ } -+ -+ len = sizeof(*data); -+ pa_next = data->next; -+ - memblock_reserve(pa_data, sizeof(*data) + data->len); - -- if (data->type == SETUP_INDIRECT && -- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) -- memblock_reserve(((struct setup_indirect *)data->data)->addr, -- ((struct setup_indirect *)data->data)->len); -+ if (data->type == SETUP_INDIRECT) { -+ len += data->len; -+ early_memunmap(data, sizeof(*data)); -+ data = early_memremap(pa_data, len); -+ if (!data) { -+ pr_warn("setup: failed to memremap indirect setup_data\n"); -+ return; -+ } - -- pa_data = data->next; -- early_memunmap(data, sizeof(*data)); -+ indirect = (struct setup_indirect *)data->data; -+ -+ if (indirect->type != SETUP_INDIRECT) -+ memblock_reserve(indirect->addr, indirect->len); -+ } -+ -+ pa_data = pa_next; -+ early_memunmap(data, len); - } - } - -@@ -713,9 +733,6 @@ static void __init early_reserve_memory(void) - - early_reserve_initrd(); - -- if (efi_enabled(EFI_BOOT)) -- efi_memblock_x86_reserve_range(); -- - memblock_x86_reserve_range_setup_data(); - - reserve_ibft_region(); -@@ -890,6 +907,9 @@ void __init setup_arch(char **cmdline_p) - - parse_early_param(); - -+ if (efi_enabled(EFI_BOOT)) -+ efi_memblock_x86_reserve_range(); -+ - #ifdef CONFIG_MEMORY_HOTPLUG - /* - * Memory used by the kernel cannot be hot-removed because Linux -diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c -index a6895e440bc35..a0064cf77e562 100644 ---- a/arch/x86/kernel/sev.c -+++ b/arch/x86/kernel/sev.c -@@ -46,16 +46,6 @@ static struct ghcb __initdata *boot_ghcb; - struct sev_es_runtime_data { - struct ghcb ghcb_page; - -- /* Physical storage for the per-CPU IST stack of the #VC handler */ -- char ist_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE); -- -- /* -- * Physical storage for the per-CPU fall-back stack of the #VC handler. -- * The fall-back stack is used when it is not safe to switch back to the -- * interrupted stack in the #VC entry code. -- */ -- char fallback_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE); -- - /* - * Reserve one page per CPU as backup storage for the unencrypted GHCB. - * It is needed when an NMI happens while the #VC handler uses the real -@@ -99,27 +89,6 @@ DEFINE_STATIC_KEY_FALSE(sev_es_enable_key); - /* Needed in vc_early_forward_exception */ - void do_early_exception(struct pt_regs *regs, int trapnr); - --static void __init setup_vc_stacks(int cpu) --{ -- struct sev_es_runtime_data *data; -- struct cpu_entry_area *cea; -- unsigned long vaddr; -- phys_addr_t pa; -- -- data = per_cpu(runtime_data, cpu); -- cea = get_cpu_entry_area(cpu); -- -- /* Map #VC IST stack */ -- vaddr = CEA_ESTACK_BOT(&cea->estacks, VC); -- pa = __pa(data->ist_stack); -- cea_set_pte((void *)vaddr, pa, PAGE_KERNEL); -- -- /* Map VC fall-back stack */ -- vaddr = CEA_ESTACK_BOT(&cea->estacks, VC2); -- pa = __pa(data->fallback_stack); -- cea_set_pte((void *)vaddr, pa, PAGE_KERNEL); --} -- - static __always_inline bool on_vc_stack(struct pt_regs *regs) - { - unsigned long sp = regs->sp; -@@ -325,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, - char *dst, char *buf, size_t size) - { - unsigned long error_code = X86_PF_PROT | X86_PF_WRITE; -- char __user *target = (char __user *)dst; -- u64 d8; -- u32 d4; -- u16 d2; -- u8 d1; - - /* - * This function uses __put_user() independent of whether kernel or user -@@ -351,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, - * instructions here would cause infinite nesting. - */ - switch (size) { -- case 1: -+ case 1: { -+ u8 d1; -+ u8 __user *target = (u8 __user *)dst; -+ - memcpy(&d1, buf, 1); - if (__put_user(d1, target)) - goto fault; - break; -- case 2: -+ } -+ case 2: { -+ u16 d2; -+ u16 __user *target = (u16 __user *)dst; -+ - memcpy(&d2, buf, 2); - if (__put_user(d2, target)) - goto fault; - break; -- case 4: -+ } -+ case 4: { -+ u32 d4; -+ u32 __user *target = (u32 __user *)dst; -+ - memcpy(&d4, buf, 4); - if (__put_user(d4, target)) - goto fault; - break; -- case 8: -+ } -+ case 8: { -+ u64 d8; -+ u64 __user *target = (u64 __user *)dst; -+ - memcpy(&d8, buf, 8); - if (__put_user(d8, target)) - goto fault; - break; -+ } - default: - WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); - return ES_UNSUPPORTED; -@@ -393,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, - char *src, char *buf, size_t size) - { - unsigned long error_code = X86_PF_PROT; -- char __user *s = (char __user *)src; -- u64 d8; -- u32 d4; -- u16 d2; -- u8 d1; - - /* - * This function uses __get_user() independent of whether kernel or user -@@ -419,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, - * instructions here would cause infinite nesting. - */ - switch (size) { -- case 1: -+ case 1: { -+ u8 d1; -+ u8 __user *s = (u8 __user *)src; -+ - if (__get_user(d1, s)) - goto fault; - memcpy(buf, &d1, 1); - break; -- case 2: -+ } -+ case 2: { -+ u16 d2; -+ u16 __user *s = (u16 __user *)src; -+ - if (__get_user(d2, s)) - goto fault; - memcpy(buf, &d2, 2); - break; -- case 4: -+ } -+ case 4: { -+ u32 d4; -+ u32 __user *s = (u32 __user *)src; -+ - if (__get_user(d4, s)) - goto fault; - memcpy(buf, &d4, 4); - break; -- case 8: -+ } -+ case 8: { -+ u64 d8; -+ u64 __user *s = (u64 __user *)src; - if (__get_user(d8, s)) - goto fault; - memcpy(buf, &d8, 8); - break; -+ } - default: - WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); - return ES_UNSUPPORTED; -@@ -787,7 +777,6 @@ void __init sev_es_init_vc_handling(void) - for_each_possible_cpu(cpu) { - alloc_runtime_data(cpu); - init_ghcb(cpu); -- setup_vc_stacks(cpu); - } - - sev_es_setup_play_dead(); -diff --git a/arch/x86/kernel/sev_verify_cbit.S b/arch/x86/kernel/sev_verify_cbit.S -index ee04941a6546a..3355e27c69ebf 100644 ---- a/arch/x86/kernel/sev_verify_cbit.S -+++ b/arch/x86/kernel/sev_verify_cbit.S -@@ -85,5 +85,5 @@ SYM_FUNC_START(sev_verify_cbit) - #endif - /* Return page-table pointer */ - movq %rdi, %rax -- ret -+ RET - SYM_FUNC_END(sev_verify_cbit) -diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c -index f4d21e4700835..bf10340a9b71d 100644 ---- a/arch/x86/kernel/signal.c -+++ b/arch/x86/kernel/signal.c -@@ -722,7 +722,7 @@ badframe: - /* max_frame_size tells userspace the worst case signal stack size. */ - static unsigned long __ro_after_init max_frame_size; - --void __init init_sigframe_size(void) -+static int __init init_sigframe_size(void) - { - max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING; - -@@ -732,7 +732,9 @@ void __init init_sigframe_size(void) - max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT); - - pr_info("max sigframe size: %lu\n", max_frame_size); -+ return 0; - } -+early_initcall(init_sigframe_size); - - unsigned long get_sigframe_size(void) - { -diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c -index b52407c56000e..879ef8c72f5c0 100644 ---- a/arch/x86/kernel/signal_compat.c -+++ b/arch/x86/kernel/signal_compat.c -@@ -149,8 +149,10 @@ static inline void signal_compat_build_tests(void) - - BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x18); - BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x20); -+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_flags) != 0x24); - BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_data) != 0x10); - BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_type) != 0x14); -+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_flags) != 0x18); - - CHECK_CSI_OFFSET(_sigpoll); - CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int)); -diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c -index 06db901fabe8e..375b33ecafa27 100644 ---- a/arch/x86/kernel/smp.c -+++ b/arch/x86/kernel/smp.c -@@ -32,7 +32,7 @@ - #include - #include - #include --#include -+#include - - /* - * Some notes on x86 processor bugs affecting SMP operation: -@@ -122,7 +122,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) - if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) - return NMI_HANDLED; - -- cpu_emergency_vmxoff(); -+ cpu_emergency_disable_virtualization(); - stop_this_cpu(NULL); - - return NMI_HANDLED; -@@ -134,7 +134,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) - DEFINE_IDTENTRY_SYSVEC(sysvec_reboot) - { - ack_APIC_irq(); -- cpu_emergency_vmxoff(); -+ cpu_emergency_disable_virtualization(); - stop_this_cpu(NULL); - } - -diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c -index 85f6e242b6b45..714f66aa03388 100644 ---- a/arch/x86/kernel/smpboot.c -+++ b/arch/x86/kernel/smpboot.c -@@ -105,6 +105,17 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); - DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); - EXPORT_PER_CPU_SYMBOL(cpu_info); - -+struct mwait_cpu_dead { -+ unsigned int control; -+ unsigned int status; -+}; -+ -+/* -+ * Cache line aligned data for mwait_play_dead(). Separate on purpose so -+ * that it's unlikely to be touched by other CPUs. -+ */ -+static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead); -+ - /* Logical package management. We might want to allocate that dynamically */ - unsigned int __max_logical_packages __read_mostly; - EXPORT_SYMBOL(__max_logical_packages); -@@ -1685,10 +1696,10 @@ EXPORT_SYMBOL_GPL(cond_wakeup_cpu0); - */ - static inline void mwait_play_dead(void) - { -+ struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead); - unsigned int eax, ebx, ecx, edx; - unsigned int highest_cstate = 0; - unsigned int highest_subcstate = 0; -- void *mwait_ptr; - int i; - - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || -@@ -1723,13 +1734,6 @@ static inline void mwait_play_dead(void) - (highest_subcstate - 1); - } - -- /* -- * This should be a memory location in a cache line which is -- * unlikely to be touched by other processors. The actual -- * content is immaterial as it is not actually modified in any way. -- */ -- mwait_ptr = ¤t_thread_info()->flags; -- - wbinvd(); - - while (1) { -@@ -1741,9 +1745,9 @@ static inline void mwait_play_dead(void) - * case where we return around the loop. - */ - mb(); -- clflush(mwait_ptr); -+ clflush(md); - mb(); -- __monitor(mwait_ptr, 0, 0); -+ __monitor(md, 0, 0); - mb(); - __mwait(eax, 0); - -diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c -index ea028e736831a..b48b659ccf6fb 100644 ---- a/arch/x86/kernel/static_call.c -+++ b/arch/x86/kernel/static_call.c -@@ -12,12 +12,21 @@ enum insn_type { - }; - - /* -- * data16 data16 xorq %rax, %rax - a single 5 byte instruction that clears %rax -- * The REX.W cancels the effect of any data16. -+ * ud1 %esp, %ecx - a 3 byte #UD that is unique to trampolines, chosen such -+ * that there is no false-positive trampoline identification while also being a -+ * speculation stop. - */ --static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 }; -+static const u8 tramp_ud[] = { 0x0f, 0xb9, 0xcc }; - --static void __ref __static_call_transform(void *insn, enum insn_type type, void *func) -+/* -+ * cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax -+ */ -+static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 }; -+ -+static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc }; -+ -+static void __ref __static_call_transform(void *insn, enum insn_type type, -+ void *func, bool modinit) - { - const void *emulate = NULL; - int size = CALL_INSN_SIZE; -@@ -42,15 +51,17 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void - break; - - case RET: -- code = text_gen_insn(RET_INSN_OPCODE, insn, func); -- size = RET_INSN_SIZE; -+ if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) -+ code = text_gen_insn(JMP32_INSN_OPCODE, insn, &__x86_return_thunk); -+ else -+ code = &retinsn; - break; - } - - if (memcmp(insn, code, size) == 0) - return; - -- if (unlikely(system_state == SYSTEM_BOOTING)) -+ if (system_state == SYSTEM_BOOTING || modinit) - return text_poke_early(insn, code, size); - - text_poke_bp(insn, code, size, emulate); -@@ -98,14 +109,55 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) - - if (tramp) { - __static_call_validate(tramp, true); -- __static_call_transform(tramp, __sc_insn(!func, true), func); -+ __static_call_transform(tramp, __sc_insn(!func, true), func, false); - } - - if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) { - __static_call_validate(site, tail); -- __static_call_transform(site, __sc_insn(!func, tail), func); -+ __static_call_transform(site, __sc_insn(!func, tail), func, false); - } - - mutex_unlock(&text_mutex); - } - EXPORT_SYMBOL_GPL(arch_static_call_transform); -+ -+#ifdef CONFIG_RETHUNK -+/* -+ * This is called by apply_returns() to fix up static call trampolines, -+ * specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as -+ * having a return trampoline. -+ * -+ * The problem is that static_call() is available before determining -+ * X86_FEATURE_RETHUNK and, by implication, running alternatives. -+ * -+ * This means that __static_call_transform() above can have overwritten the -+ * return trampoline and we now need to fix things up to be consistent. -+ */ -+bool __static_call_fixup(void *tramp, u8 op, void *dest) -+{ -+ unsigned long addr = (unsigned long)tramp; -+ /* -+ * Not all .return_sites are a static_call trampoline (most are not). -+ * Check if the 3 bytes after the return are still kernel text, if not, -+ * then this definitely is not a trampoline and we need not worry -+ * further. -+ * -+ * This avoids the memcmp() below tripping over pagefaults etc.. -+ */ -+ if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && -+ !kernel_text_address(addr + 7)) -+ return false; -+ -+ if (memcmp(tramp+5, tramp_ud, 3)) { -+ /* Not a trampoline site, not our problem. */ -+ return false; -+ } -+ -+ mutex_lock(&text_mutex); -+ if (op == RET_INSN_OPCODE || dest == &__x86_return_thunk) -+ __static_call_transform(tramp, RET, NULL, true); -+ mutex_unlock(&text_mutex); -+ -+ return true; -+} -+#endif -diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c -index 0f3c307b37b3a..8e2b2552b5eea 100644 ---- a/arch/x86/kernel/step.c -+++ b/arch/x86/kernel/step.c -@@ -180,8 +180,7 @@ void set_task_blockstep(struct task_struct *task, bool on) - * - * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if - * task is current or it can't be running, otherwise we can race -- * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but -- * PTRACE_KILL is not safe. -+ * with __switch_to_xtra(). We rely on ptrace_freeze_traced(). - */ - local_irq_disable(); - debugctl = get_debugctlmsr(); -diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c -index 660b78827638f..8cc653ffdccd7 100644 ---- a/arch/x86/kernel/sys_x86_64.c -+++ b/arch/x86/kernel/sys_x86_64.c -@@ -68,9 +68,6 @@ static int __init control_va_addr_alignment(char *str) - if (*str == 0) - return 1; - -- if (*str == '=') -- str++; -- - if (!strcmp(str, "32")) - va_align.flags = ALIGN_VA_32; - else if (!strcmp(str, "64")) -@@ -80,11 +77,11 @@ static int __init control_va_addr_alignment(char *str) - else if (!strcmp(str, "on")) - va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; - else -- return 0; -+ pr_warn("invalid option value: 'align_va_addr=%s'\n", str); - - return 1; - } --__setup("align_va_addr", control_va_addr_alignment); -+__setup("align_va_addr=", control_va_addr_alignment); - - SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, -diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c -index a58800973aed3..ca47080e37741 100644 ---- a/arch/x86/kernel/traps.c -+++ b/arch/x86/kernel/traps.c -@@ -313,17 +313,19 @@ out: - } - - #ifdef CONFIG_VMAP_STACK --__visible void __noreturn handle_stack_overflow(const char *message, -- struct pt_regs *regs, -- unsigned long fault_address) -+__visible void __noreturn handle_stack_overflow(struct pt_regs *regs, -+ unsigned long fault_address, -+ struct stack_info *info) - { -- printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n", -- (void *)fault_address, current->stack, -- (char *)current->stack + THREAD_SIZE - 1); -- die(message, regs, 0); -+ const char *name = stack_type_name(info->type); -+ -+ printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n", -+ name, (void *)fault_address, info->begin, info->end); -+ -+ die("stack guard page", regs, 0); - - /* Be absolutely certain we don't return. */ -- panic("%s", message); -+ panic("%s stack guard hit", name); - } - #endif - -@@ -353,6 +355,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault) - - #ifdef CONFIG_VMAP_STACK - unsigned long address = read_cr2(); -+ struct stack_info info; - #endif - - #ifdef CONFIG_X86_ESPFIX64 -@@ -455,10 +458,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault) - * stack even if the actual trigger for the double fault was - * something else. - */ -- if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) { -- handle_stack_overflow("kernel stack overflow (double-fault)", -- regs, address); -- } -+ if (get_stack_guard_info((void *)address, &info)) -+ handle_stack_overflow(regs, address, &info); - #endif - - pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code); -@@ -528,6 +529,36 @@ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs, - - #define GPFSTR "general protection fault" - -+static bool fixup_iopl_exception(struct pt_regs *regs) -+{ -+ struct thread_struct *t = ¤t->thread; -+ unsigned char byte; -+ unsigned long ip; -+ -+ if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3) -+ return false; -+ -+ if (insn_get_effective_ip(regs, &ip)) -+ return false; -+ -+ if (get_user(byte, (const char __user *)ip)) -+ return false; -+ -+ if (byte != 0xfa && byte != 0xfb) -+ return false; -+ -+ if (!t->iopl_warn && printk_ratelimit()) { -+ pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx", -+ current->comm, task_pid_nr(current), ip); -+ print_vma_addr(KERN_CONT " in ", ip); -+ pr_cont("\n"); -+ t->iopl_warn = 1; -+ } -+ -+ regs->ip += 1; -+ return true; -+} -+ - DEFINE_IDTENTRY_ERRORCODE(exc_general_protection) - { - char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR; -@@ -553,6 +584,9 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection) - tsk = current; - - if (user_mode(regs)) { -+ if (fixup_iopl_exception(regs)) -+ goto exit; -+ - tsk->thread.error_code = error_code; - tsk->thread.trap_nr = X86_TRAP_GP; - -@@ -625,6 +659,7 @@ static bool do_int3(struct pt_regs *regs) - - return res == NOTIFY_STOP; - } -+NOKPROBE_SYMBOL(do_int3); - - static void do_int3_user(struct pt_regs *regs) - { -@@ -709,7 +744,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r - stack = (unsigned long *)sp; - - if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY || -- info.type >= STACK_TYPE_EXCEPTION_LAST) -+ info.type > STACK_TYPE_EXCEPTION_LAST) - sp = __this_cpu_ist_top_va(VC2); - - sync: -@@ -727,14 +762,10 @@ sync: - } - #endif - --struct bad_iret_stack { -- void *error_entry_ret; -- struct pt_regs regs; --}; -- --asmlinkage __visible noinstr --struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) -+asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs) - { -+ struct pt_regs tmp, *new_stack; -+ - /* - * This is called from entry_64.S early in handling a fault - * caused by a bad iret to user mode. To handle the fault -@@ -743,19 +774,18 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) - * just below the IRET frame) and we want to pretend that the - * exception came from the IRET target. - */ -- struct bad_iret_stack tmp, *new_stack = -- (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; -+ new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; - - /* Copy the IRET target to the temporary storage. */ -- __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8); -+ __memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8); - - /* Copy the remainder of the stack from the current stack. */ -- __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip)); -+ __memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip)); - - /* Update the entry stack */ - __memcpy(new_stack, &tmp, sizeof(tmp)); - -- BUG_ON(!user_mode(&new_stack->regs)); -+ BUG_ON(!user_mode(new_stack)); - return new_stack; - } - #endif -diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c -index 2e076a459a0c0..a698196377be9 100644 ---- a/arch/x86/kernel/tsc.c -+++ b/arch/x86/kernel/tsc.c -@@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason) - - EXPORT_SYMBOL_GPL(mark_tsc_unstable); - -+static void __init tsc_disable_clocksource_watchdog(void) -+{ -+ clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY; -+ clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; -+} -+ - static void __init check_system_tsc_reliable(void) - { - #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) -@@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void) - #endif - if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) - tsc_clocksource_reliable = 1; -+ -+ /* -+ * Disable the clocksource watchdog when the system has: -+ * - TSC running at constant frequency -+ * - TSC which does not stop in C-States -+ * - the TSC_ADJUST register which allows to detect even minimal -+ * modifications -+ * - not more than two sockets. As the number of sockets cannot be -+ * evaluated at the early boot stage where this has to be -+ * invoked, check the number of online memory nodes as a -+ * fallback solution which is an reasonable estimate. -+ */ -+ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && -+ boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && -+ boot_cpu_has(X86_FEATURE_TSC_ADJUST) && -+ nr_online_nodes <= 2) -+ tsc_disable_clocksource_watchdog(); - } - - /* -@@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void) - if (tsc_unstable) - goto unreg; - -- if (tsc_clocksource_reliable || no_tsc_watchdog) -- clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; -- - if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) - clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; - -@@ -1527,7 +1547,7 @@ void __init tsc_init(void) - } - - if (tsc_clocksource_reliable || no_tsc_watchdog) -- clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY; -+ tsc_disable_clocksource_watchdog(); - - clocksource_register_khz(&clocksource_tsc_early, tsc_khz); - detect_art(); -diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c -index 50a4515fe0ad1..9452dc9664b51 100644 ---- a/arch/x86/kernel/tsc_sync.c -+++ b/arch/x86/kernel/tsc_sync.c -@@ -30,6 +30,7 @@ struct tsc_adjust { - }; - - static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); -+static struct timer_list tsc_sync_check_timer; - - /* - * TSC's on different sockets may be reset asynchronously. -@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume) - } - } - -+/* -+ * Normally the tsc_sync will be checked every time system enters idle -+ * state, but there is still caveat that a system won't enter idle, -+ * either because it's too busy or configured purposely to not enter -+ * idle. -+ * -+ * So setup a periodic timer (every 10 minutes) to make sure the check -+ * is always on. -+ */ -+ -+#define SYNC_CHECK_INTERVAL (HZ * 600) -+ -+static void tsc_sync_check_timer_fn(struct timer_list *unused) -+{ -+ int next_cpu; -+ -+ tsc_verify_tsc_adjust(false); -+ -+ /* Run the check for all onlined CPUs in turn */ -+ next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); -+ if (next_cpu >= nr_cpu_ids) -+ next_cpu = cpumask_first(cpu_online_mask); -+ -+ tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL; -+ add_timer_on(&tsc_sync_check_timer, next_cpu); -+} -+ -+static int __init start_sync_check_timer(void) -+{ -+ if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable) -+ return 0; -+ -+ timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0); -+ tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL; -+ add_timer(&tsc_sync_check_timer); -+ -+ return 0; -+} -+late_initcall(start_sync_check_timer); -+ - static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval, - unsigned int cpu, bool bootcpu) - { -diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c -index a1202536fc57c..8488966da5f19 100644 ---- a/arch/x86/kernel/unwind_orc.c -+++ b/arch/x86/kernel/unwind_orc.c -@@ -93,22 +93,27 @@ static struct orc_entry *orc_find(unsigned long ip); - static struct orc_entry *orc_ftrace_find(unsigned long ip) - { - struct ftrace_ops *ops; -- unsigned long caller; -+ unsigned long tramp_addr, offset; - - ops = ftrace_ops_trampoline(ip); - if (!ops) - return NULL; - -+ /* Set tramp_addr to the start of the code copied by the trampoline */ - if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) -- caller = (unsigned long)ftrace_regs_call; -+ tramp_addr = (unsigned long)ftrace_regs_caller; - else -- caller = (unsigned long)ftrace_call; -+ tramp_addr = (unsigned long)ftrace_caller; -+ -+ /* Now place tramp_addr to the location within the trampoline ip is at */ -+ offset = ip - ops->trampoline; -+ tramp_addr += offset; - - /* Prevent unlikely recursion */ -- if (ip == caller) -+ if (ip == tramp_addr) - return NULL; - -- return orc_find(caller); -+ return orc_find(tramp_addr); - } - #else - static struct orc_entry *orc_ftrace_find(unsigned long ip) -@@ -695,7 +700,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, - /* Otherwise, skip ahead to the user-specified starting frame: */ - while (!unwind_done(state) && - (!on_stack(&state->stack_info, first_frame, sizeof(long)) || -- state->sp < (unsigned long)first_frame)) -+ state->sp <= (unsigned long)first_frame)) - unwind_next_frame(state); - - return; -diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c -index b63cf8f7745ee..6c07f6daaa227 100644 ---- a/arch/x86/kernel/uprobes.c -+++ b/arch/x86/kernel/uprobes.c -@@ -722,8 +722,9 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) - switch (opc1) { - case 0xeb: /* jmp 8 */ - case 0xe9: /* jmp 32 */ -- case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ - break; -+ case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ -+ goto setup; - - case 0xe8: /* call relative */ - branch_clear_offset(auprobe, insn); -@@ -753,6 +754,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) - return -ENOTSUPP; - } - -+setup: - auprobe->branch.opc1 = opc1; - auprobe->branch.ilen = insn->length; - auprobe->branch.offs = insn->immediate.value; -diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S -index 641f0fe1e5b4a..1258a5872d128 100644 ---- a/arch/x86/kernel/verify_cpu.S -+++ b/arch/x86/kernel/verify_cpu.S -@@ -132,9 +132,9 @@ SYM_FUNC_START_LOCAL(verify_cpu) - .Lverify_cpu_no_longmode: - popf # Restore caller passed flags - movl $1,%eax -- ret -+ RET - .Lverify_cpu_sse_ok: - popf # Restore caller passed flags - xorl %eax, %eax -- ret -+ RET - SYM_FUNC_END(verify_cpu) -diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c -index e5a7a10a0164d..17d58740891e2 100644 ---- a/arch/x86/kernel/vm86_32.c -+++ b/arch/x86/kernel/vm86_32.c -@@ -142,6 +142,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) - - user_access_end(); - -+exit_vm86: - preempt_disable(); - tsk->thread.sp0 = vm86->saved_sp0; - tsk->thread.sysenter_cs = __KERNEL_CS; -@@ -161,7 +162,8 @@ Efault_end: - user_access_end(); - Efault: - pr_alert("could not access userspace vm86 info\n"); -- do_exit(SIGSEGV); -+ force_exit_sig(SIGSEGV); -+ goto exit_vm86; - } - - static int do_vm86_irq_handling(int subfunction, int irqnumber); -diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S -index efd9e9ea17f25..ca1a7595edac8 100644 ---- a/arch/x86/kernel/vmlinux.lds.S -+++ b/arch/x86/kernel/vmlinux.lds.S -@@ -133,7 +133,20 @@ SECTIONS - LOCK_TEXT - KPROBES_TEXT - ALIGN_ENTRY_TEXT_BEGIN -+#ifdef CONFIG_CPU_SRSO -+ *(.text..__x86.rethunk_untrain) -+#endif -+ - ENTRY_TEXT -+ -+#ifdef CONFIG_CPU_SRSO -+ /* -+ * See the comment above srso_alias_untrain_ret()'s -+ * definition. -+ */ -+ . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); -+ *(.text..__x86.rethunk_safe) -+#endif - ALIGN_ENTRY_TEXT_END - SOFTIRQENTRY_TEXT - STATIC_CALL_TEXT -@@ -142,13 +155,15 @@ SECTIONS - - #ifdef CONFIG_RETPOLINE - __indirect_thunk_start = .; -- *(.text.__x86.indirect_thunk) -+ *(.text..__x86.indirect_thunk) -+ *(.text..__x86.return_thunk) - __indirect_thunk_end = .; - #endif - } :text =0xcccc - - /* End of text section, which should occupy whole number of pages */ - _etext = .; -+ - . = ALIGN(PAGE_SIZE); - - X86_ALIGN_RODATA_BEGIN -@@ -272,6 +287,27 @@ SECTIONS - __parainstructions_end = .; - } - -+#ifdef CONFIG_RETPOLINE -+ /* -+ * List of instructions that call/jmp/jcc to retpoline thunks -+ * __x86_indirect_thunk_*(). These instructions can be patched along -+ * with alternatives, after which the section can be freed. -+ */ -+ . = ALIGN(8); -+ .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) { -+ __retpoline_sites = .; -+ *(.retpoline_sites) -+ __retpoline_sites_end = .; -+ } -+ -+ . = ALIGN(8); -+ .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) { -+ __return_sites = .; -+ *(.return_sites) -+ __return_sites_end = .; -+ } -+#endif -+ - /* - * struct alt_inst entries. From the header (alternative.h): - * "Alternative instructions for different CPU types or capabilities" -@@ -475,6 +511,27 @@ INIT_PER_CPU(irq_stack_backing_store); - "fixed_percpu_data is not at start of per-cpu area"); - #endif - -+#ifdef CONFIG_RETHUNK -+. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); -+. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); -+#endif -+ -+#ifdef CONFIG_CPU_SRSO -+/* -+ * GNU ld cannot do XOR until 2.41. -+ * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 -+ * -+ * LLVM lld cannot do XOR until lld-17. -+ * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb -+ * -+ * Instead do: (A | B) - (A & B) in order to compute the XOR -+ * of the two function addresses: -+ */ -+. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - -+ (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), -+ "SRSO function pair won't alias"); -+#endif -+ - #endif /* CONFIG_X86_64 */ - - #ifdef CONFIG_KEXEC_CORE -diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c -index 8b395821cb8d0..d3e3b16ea9cf3 100644 ---- a/arch/x86/kernel/x86_init.c -+++ b/arch/x86/kernel/x86_init.c -@@ -32,8 +32,8 @@ static int __init iommu_init_noop(void) { return 0; } - static void iommu_shutdown_noop(void) { } - bool __init bool_x86_init_noop(void) { return false; } - void x86_op_int_noop(int cpu) { } --static __init int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; } --static __init void get_rtc_noop(struct timespec64 *now) { } -+static int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; } -+static void get_rtc_noop(struct timespec64 *now) { } - - static __initconst const struct of_device_id of_cmos_match[] = { - { .compatible = "motorola,mc146818" }, -diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c -index 751aa85a30012..b939b94d931f7 100644 ---- a/arch/x86/kvm/cpuid.c -+++ b/arch/x86/kvm/cpuid.c -@@ -232,6 +232,25 @@ u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) - return rsvd_bits(cpuid_maxphyaddr(vcpu), 63); - } - -+static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, -+ int nent) -+{ -+ int r; -+ -+ r = kvm_check_cpuid(e2, nent); -+ if (r) -+ return r; -+ -+ kvfree(vcpu->arch.cpuid_entries); -+ vcpu->arch.cpuid_entries = e2; -+ vcpu->arch.cpuid_nent = nent; -+ -+ kvm_update_cpuid_runtime(vcpu); -+ kvm_vcpu_after_set_cpuid(vcpu); -+ -+ return 0; -+} -+ - /* when an old userspace process fills a new kernel module */ - int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, - struct kvm_cpuid *cpuid, -@@ -268,18 +287,9 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, - e2[i].padding[2] = 0; - } - -- r = kvm_check_cpuid(e2, cpuid->nent); -- if (r) { -+ r = kvm_set_cpuid(vcpu, e2, cpuid->nent); -+ if (r) - kvfree(e2); -- goto out_free_cpuid; -- } -- -- kvfree(vcpu->arch.cpuid_entries); -- vcpu->arch.cpuid_entries = e2; -- vcpu->arch.cpuid_nent = cpuid->nent; -- -- kvm_update_cpuid_runtime(vcpu); -- kvm_vcpu_after_set_cpuid(vcpu); - - out_free_cpuid: - kvfree(e); -@@ -303,20 +313,11 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, - return PTR_ERR(e2); - } - -- r = kvm_check_cpuid(e2, cpuid->nent); -- if (r) { -+ r = kvm_set_cpuid(vcpu, e2, cpuid->nent); -+ if (r) - kvfree(e2); -- return r; -- } -- -- kvfree(vcpu->arch.cpuid_entries); -- vcpu->arch.cpuid_entries = e2; -- vcpu->arch.cpuid_nent = cpuid->nent; - -- kvm_update_cpuid_runtime(vcpu); -- kvm_vcpu_after_set_cpuid(vcpu); -- -- return 0; -+ return r; - } - - int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, -@@ -420,12 +421,13 @@ void kvm_set_cpu_caps(void) - ); - - kvm_cpu_cap_mask(CPUID_7_0_EBX, -- F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | -- F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) | -- F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | -- F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | -- F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/ -- ); -+ F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | -+ F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) | -+ F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) | -+ F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) | -+ F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) | -+ F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) | -+ F(AVX512VL)); - - kvm_cpu_cap_mask(CPUID_7_ECX, - F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | -@@ -542,6 +544,9 @@ void kvm_set_cpu_caps(void) - F(PMM) | F(PMM_EN) - ); - -+ if (cpu_feature_enabled(X86_FEATURE_SRSO_NO)) -+ kvm_cpu_cap_set(X86_FEATURE_SRSO_NO); -+ - /* - * Hide RDTSCP and RDPID if either feature is reported as supported but - * probing MSR_TSC_AUX failed. This is purely a sanity check and -@@ -565,16 +570,22 @@ struct kvm_cpuid_array { - int nent; - }; - -+static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array) -+{ -+ if (array->nent >= array->maxnent) -+ return NULL; -+ -+ return &array->entries[array->nent++]; -+} -+ - static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, - u32 function, u32 index) - { -- struct kvm_cpuid_entry2 *entry; -+ struct kvm_cpuid_entry2 *entry = get_next_cpuid(array); - -- if (array->nent >= array->maxnent) -+ if (!entry) - return NULL; - -- entry = &array->entries[array->nent++]; -- - entry->function = function; - entry->index = index; - entry->flags = 0; -@@ -716,13 +727,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) - entry->edx = 0; - } - break; -- case 9: -- break; - case 0xa: { /* Architectural Performance Monitoring */ - struct x86_pmu_capability cap; - union cpuid10_eax eax; - union cpuid10_edx edx; - -+ if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { -+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0; -+ break; -+ } -+ - perf_get_x86_pmu_capability(&cap); - - /* -@@ -750,22 +764,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) - entry->edx = edx.full; - break; - } -- /* -- * Per Intel's SDM, the 0x1f is a superset of 0xb, -- * thus they can be handled by common code. -- */ - case 0x1f: - case 0xb: - /* -- * Populate entries until the level type (ECX[15:8]) of the -- * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is -- * the starting entry, filled by the primary do_host_cpuid(). -+ * No topology; a valid topology is indicated by the presence -+ * of subleaf 1. - */ -- for (i = 1; entry->ecx & 0xff00; ++i) { -- entry = do_host_cpuid(array, function, i); -- if (!entry) -- goto out; -- } -+ entry->eax = entry->ebx = entry->ecx = 0; - break; - case 0xd: - entry->eax &= supported_xcr0; -@@ -897,11 +902,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) - entry->eax = min(entry->eax, 0x8000001f); - break; - case 0x80000001: -+ entry->ebx &= ~GENMASK(27, 16); - cpuid_entry_override(entry, CPUID_8000_0001_EDX); - cpuid_entry_override(entry, CPUID_8000_0001_ECX); - break; - case 0x80000006: -- /* L2 cache and TLB: pass through host info. */ -+ /* Drop reserved bits, pass host L2 cache and TLB info. */ -+ entry->edx &= ~GENMASK(17, 16); - break; - case 0x80000007: /* Advanced power management */ - /* invariant TSC is CPUID.80000007H:EDX[8] */ -@@ -931,6 +938,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) - g_phys_as = phys_as; - - entry->eax = g_phys_as | (virt_as << 8); -+ entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); - entry->edx = 0; - cpuid_entry_override(entry, CPUID_8000_0008_EBX); - break; -@@ -950,14 +958,21 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) - entry->ecx = entry->edx = 0; - break; - case 0x8000001a: -+ entry->eax &= GENMASK(2, 0); -+ entry->ebx = entry->ecx = entry->edx = 0; -+ break; - case 0x8000001e: -+ /* Do not return host topology information. */ -+ entry->eax = entry->ebx = entry->ecx = 0; -+ entry->edx = 0; /* reserved */ - break; - case 0x8000001F: - if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) { - entry->eax = entry->ebx = entry->ecx = entry->edx = 0; - } else { - cpuid_entry_override(entry, CPUID_8000_001F_EAX); -- -+ /* Clear NumVMPL since KVM does not support VMPL. */ -+ entry->ebx &= ~GENMASK(31, 12); - /* - * Enumerate '0' for "PA bits reduction", the adjusted - * MAXPHYADDR is enumerated directly (see 0x80000008). -diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c -index 54a83a7445384..f33c804a922ac 100644 ---- a/arch/x86/kvm/debugfs.c -+++ b/arch/x86/kvm/debugfs.c -@@ -95,6 +95,9 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v) - unsigned int *log[KVM_NR_PAGE_SIZES], *cur; - int i, j, k, l, ret; - -+ if (!kvm_memslots_have_rmaps(kvm)) -+ return 0; -+ - ret = -ENOMEM; - memset(log, 0, sizeof(log)); - for (i = 0; i < KVM_NR_PAGE_SIZES; i++) { -diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 9a144ca8e1460..cb96e4354f317 100644 ---- a/arch/x86/kvm/emulate.c -+++ b/arch/x86/kvm/emulate.c -@@ -187,9 +187,6 @@ - #define X8(x...) X4(x), X4(x) - #define X16(x...) X8(x), X8(x) - --#define NR_FASTOP (ilog2(sizeof(ulong)) + 1) --#define FASTOP_SIZE 8 -- - struct opcode { - u64 flags : 56; - u64 intercept : 8; -@@ -303,9 +300,15 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) - * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for - * different operand sizes can be reached by calculation, rather than a jump - * table (which would be bigger than the code). -+ * -+ * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR -+ * and 1 for the straight line speculation INT3, leaves 7 bytes for the -+ * body of the function. Currently none is larger than 4. - */ - static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); - -+#define FASTOP_SIZE 16 -+ - #define __FOP_FUNC(name) \ - ".align " __stringify(FASTOP_SIZE) " \n\t" \ - ".type " name ", @function \n\t" \ -@@ -315,19 +318,21 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); - __FOP_FUNC(#name) - - #define __FOP_RET(name) \ -- "ret \n\t" \ -+ ASM_RET \ - ".size " name ", .-" name "\n\t" - - #define FOP_RET(name) \ - __FOP_RET(#name) - --#define FOP_START(op) \ -+#define __FOP_START(op, align) \ - extern void em_##op(struct fastop *fake); \ - asm(".pushsection .text, \"ax\" \n\t" \ - ".global em_" #op " \n\t" \ -- ".align " __stringify(FASTOP_SIZE) " \n\t" \ -+ ".align " __stringify(align) " \n\t" \ - "em_" #op ":\n\t" - -+#define FOP_START(op) __FOP_START(op, FASTOP_SIZE) -+ - #define FOP_END \ - ".popsection") - -@@ -427,18 +432,29 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); - FOP_END - - /* Special case for SETcc - 1 instruction per cc */ -+ -+/* -+ * Depending on .config the SETcc functions look like: -+ * -+ * SETcc %al [3 bytes] -+ * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK] -+ * INT3 [1 byte; CONFIG_SLS] -+ */ -+#define SETCC_ALIGN 16 -+ - #define FOP_SETCC(op) \ -- ".align 4 \n\t" \ -+ ".align " __stringify(SETCC_ALIGN) " \n\t" \ - ".type " #op ", @function \n\t" \ - #op ": \n\t" \ - #op " %al \n\t" \ -- __FOP_RET(#op) -+ __FOP_RET(#op) \ -+ ".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t" - - asm(".pushsection .fixup, \"ax\"\n" -- "kvm_fastop_exception: xor %esi, %esi; ret\n" -+ "kvm_fastop_exception: xor %esi, %esi; " ASM_RET - ".popsection"); - --FOP_START(setcc) -+__FOP_START(setcc, SETCC_ALIGN) - FOP_SETCC(seto) - FOP_SETCC(setno) - FOP_SETCC(setc) -@@ -779,8 +795,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt, - ctxt->mode, linear); - } - --static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, -- enum x86emul_mode mode) -+static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) - { - ulong linear; - int rc; -@@ -790,41 +805,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, - - if (ctxt->op_bytes != sizeof(unsigned long)) - addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); -- rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); -+ rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear); - if (rc == X86EMUL_CONTINUE) - ctxt->_eip = addr.ea; - return rc; - } - -+static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt) -+{ -+ u64 efer; -+ struct desc_struct cs; -+ u16 selector; -+ u32 base3; -+ -+ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); -+ -+ if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) { -+ /* Real mode. cpu must not have long mode active */ -+ if (efer & EFER_LMA) -+ return X86EMUL_UNHANDLEABLE; -+ ctxt->mode = X86EMUL_MODE_REAL; -+ return X86EMUL_CONTINUE; -+ } -+ -+ if (ctxt->eflags & X86_EFLAGS_VM) { -+ /* Protected/VM86 mode. cpu must not have long mode active */ -+ if (efer & EFER_LMA) -+ return X86EMUL_UNHANDLEABLE; -+ ctxt->mode = X86EMUL_MODE_VM86; -+ return X86EMUL_CONTINUE; -+ } -+ -+ if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS)) -+ return X86EMUL_UNHANDLEABLE; -+ -+ if (efer & EFER_LMA) { -+ if (cs.l) { -+ /* Proper long mode */ -+ ctxt->mode = X86EMUL_MODE_PROT64; -+ } else if (cs.d) { -+ /* 32 bit compatibility mode*/ -+ ctxt->mode = X86EMUL_MODE_PROT32; -+ } else { -+ ctxt->mode = X86EMUL_MODE_PROT16; -+ } -+ } else { -+ /* Legacy 32 bit / 16 bit mode */ -+ ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; -+ } -+ -+ return X86EMUL_CONTINUE; -+} -+ - static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) - { -- return assign_eip(ctxt, dst, ctxt->mode); -+ return assign_eip(ctxt, dst); - } - --static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, -- const struct desc_struct *cs_desc) -+static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst) - { -- enum x86emul_mode mode = ctxt->mode; -- int rc; -+ int rc = emulator_recalc_and_set_mode(ctxt); - --#ifdef CONFIG_X86_64 -- if (ctxt->mode >= X86EMUL_MODE_PROT16) { -- if (cs_desc->l) { -- u64 efer = 0; -+ if (rc != X86EMUL_CONTINUE) -+ return rc; - -- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); -- if (efer & EFER_LMA) -- mode = X86EMUL_MODE_PROT64; -- } else -- mode = X86EMUL_MODE_PROT32; /* temporary value */ -- } --#endif -- if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) -- mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; -- rc = assign_eip(ctxt, dst, mode); -- if (rc == X86EMUL_CONTINUE) -- ctxt->mode = mode; -- return rc; -+ return assign_eip(ctxt, dst); - } - - static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) -@@ -1053,7 +1098,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt) - static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) - { - u8 rc; -- void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); -+ void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf); - - flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; - asm("push %[flags]; popf; " CALL_NOSPEC -@@ -1614,11 +1659,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, - goto exception; - } - -- if (!seg_desc.p) { -- err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; -- goto exception; -- } -- - dpl = seg_desc.dpl; - - switch (seg) { -@@ -1658,12 +1698,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, - case VCPU_SREG_TR: - if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) - goto exception; -- old_desc = seg_desc; -- seg_desc.type |= 2; /* busy */ -- ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, -- sizeof(seg_desc), &ctxt->exception); -- if (ret != X86EMUL_CONTINUE) -- return ret; - break; - case VCPU_SREG_LDTR: - if (seg_desc.s || seg_desc.type != 2) -@@ -1682,6 +1716,11 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, - break; - } - -+ if (!seg_desc.p) { -+ err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; -+ goto exception; -+ } -+ - if (seg_desc.s) { - /* mark segment as accessed */ - if (!(seg_desc.type & 1)) { -@@ -1696,8 +1735,17 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, - if (ret != X86EMUL_CONTINUE) - return ret; - if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | -- ((u64)base3 << 32), ctxt)) -- return emulate_gp(ctxt, 0); -+ ((u64)base3 << 32), ctxt)) -+ return emulate_gp(ctxt, err_code); -+ } -+ -+ if (seg == VCPU_SREG_TR) { -+ old_desc = seg_desc; -+ seg_desc.type |= 2; /* busy */ -+ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, -+ sizeof(seg_desc), &ctxt->exception); -+ if (ret != X86EMUL_CONTINUE) -+ return ret; - } - load: - ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); -@@ -1917,7 +1965,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) - if (rc != X86EMUL_CONTINUE) - return rc; - -- if (ctxt->modrm_reg == VCPU_SREG_SS) -+ if (seg == VCPU_SREG_SS) - ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; - if (ctxt->op_bytes > 2) - rsp_increment(ctxt, ctxt->op_bytes - 2); -@@ -2134,7 +2182,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) - if (rc != X86EMUL_CONTINUE) - return rc; - -- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); -+ rc = assign_eip_far(ctxt, ctxt->src.val); - /* Error handling is not implemented. */ - if (rc != X86EMUL_CONTINUE) - return X86EMUL_UNHANDLEABLE; -@@ -2215,7 +2263,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) - &new_desc); - if (rc != X86EMUL_CONTINUE) - return rc; -- rc = assign_eip_far(ctxt, eip, &new_desc); -+ rc = assign_eip_far(ctxt, eip); - /* Error handling is not implemented. */ - if (rc != X86EMUL_CONTINUE) - return X86EMUL_UNHANDLEABLE; -@@ -2598,7 +2646,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) - * those side effects need to be explicitly handled for both success - * and shutdown. - */ -- return X86EMUL_CONTINUE; -+ return emulator_recalc_and_set_mode(ctxt); - - emulate_shutdown: - ctxt->ops->triple_fault(ctxt); -@@ -2842,6 +2890,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) - ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); - - ctxt->_eip = rdx; -+ ctxt->mode = usermode; - *reg_write(ctxt, VCPU_REGS_RSP) = rcx; - - return X86EMUL_CONTINUE; -@@ -3438,7 +3487,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) - if (rc != X86EMUL_CONTINUE) - return rc; - -- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); -+ rc = assign_eip_far(ctxt, ctxt->src.val); - if (rc != X86EMUL_CONTINUE) - goto fail; - -@@ -3510,8 +3559,10 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt) - { - u64 tsc_aux = 0; - -- if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) -+ if (!ctxt->ops->guest_has_rdpid(ctxt)) - return emulate_ud(ctxt); -+ -+ ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux); - ctxt->dst.val = tsc_aux; - return X86EMUL_CONTINUE; - } -@@ -3578,11 +3629,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt) - - static int em_cr_write(struct x86_emulate_ctxt *ctxt) - { -- if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) -+ int cr_num = ctxt->modrm_reg; -+ int r; -+ -+ if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val)) - return emulate_gp(ctxt, 0); - - /* Disable writeback. */ - ctxt->dst.type = OP_NONE; -+ -+ if (cr_num == 0) { -+ /* -+ * CR0 write might have updated CR0.PE and/or CR0.PG -+ * which can affect the cpu's execution mode. -+ */ -+ r = emulator_recalc_and_set_mode(ctxt); -+ if (r != X86EMUL_CONTINUE) -+ return r; -+ } -+ - return X86EMUL_CONTINUE; - } - -@@ -4101,6 +4166,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt) - { - u32 eax, ecx, edx; - -+ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE)) -+ return emulate_ud(ctxt); -+ - eax = reg_read(ctxt, VCPU_REGS_RAX); - edx = reg_read(ctxt, VCPU_REGS_RDX); - ecx = reg_read(ctxt, VCPU_REGS_RCX); -diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c -index d5124b520f761..a067c7ce8e19c 100644 ---- a/arch/x86/kvm/hyperv.c -+++ b/arch/x86/kvm/hyperv.c -@@ -236,7 +236,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, - struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); - int ret; - -- if (!synic->active && !host) -+ if (!synic->active && (!host || data)) - return 1; - - trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); -@@ -282,6 +282,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, - case HV_X64_MSR_EOM: { - int i; - -+ if (!synic->active) -+ break; -+ - for (i = 0; i < ARRAY_SIZE(synic->sint); i++) - kvm_hv_notify_acked_sint(vcpu, i); - break; -@@ -446,6 +449,9 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) - struct kvm_lapic_irq irq; - int ret, vector; - -+ if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm)) -+ return -EINVAL; -+ - if (sint >= ARRAY_SIZE(synic->sint)) - return -EINVAL; - -@@ -658,7 +664,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, - struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); - struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); - -- if (!synic->active && !host) -+ if (!synic->active && (!host || config)) - return 1; - - if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && -@@ -687,7 +693,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, - struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); - struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); - -- if (!synic->active && !host) -+ if (!synic->active && (!host || count)) - return 1; - - trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id, -@@ -1749,7 +1755,7 @@ struct kvm_hv_hcall { - sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS]; - }; - --static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex) -+static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) - { - int i; - gpa_t gpa; -@@ -1765,7 +1771,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool - int sparse_banks_len; - bool all_cpus; - -- if (!ex) { -+ if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST || -+ hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) { - if (hc->fast) { - flush.address_space = hc->ingpa; - flush.flags = hc->outgpa; -@@ -1819,7 +1826,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool - - if (!all_cpus) { - if (hc->fast) { -- if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1) -+ /* XMM0 is already consumed, each XMM holds two sparse banks. */ -+ if (sparse_banks_len > 2 * (HV_HYPERCALL_MAX_XMM_REGISTERS - 1)) - return HV_STATUS_INVALID_HYPERCALL_INPUT; - for (i = 0; i < sparse_banks_len; i += 2) { - sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]); -@@ -1838,16 +1846,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool - - cpumask_clear(&hv_vcpu->tlb_flush); - -- vcpu_mask = all_cpus ? NULL : -- sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, -- vp_bitmap, vcpu_bitmap); -- - /* - * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't - * analyze it here, flush TLB regardless of the specified address space. - */ -- kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, -- NULL, vcpu_mask, &hv_vcpu->tlb_flush); -+ if (all_cpus) { -+ kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST); -+ } else { -+ vcpu_mask = sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, -+ vp_bitmap, vcpu_bitmap); -+ -+ kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, -+ NULL, vcpu_mask, &hv_vcpu->tlb_flush); -+ } - - ret_success: - /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */ -@@ -1874,7 +1885,7 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, - } - } - --static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex) -+static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) - { - struct kvm *kvm = vcpu->kvm; - struct hv_send_ipi_ex send_ipi_ex; -@@ -1887,8 +1898,9 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool - int sparse_banks_len; - u32 vector; - bool all_cpus; -+ int i; - -- if (!ex) { -+ if (hc->code == HVCALL_SEND_IPI) { - if (!hc->fast) { - if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi, - sizeof(send_ipi)))) -@@ -1907,9 +1919,15 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool - - trace_kvm_hv_send_ipi(vector, sparse_banks[0]); - } else { -- if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex, -- sizeof(send_ipi_ex)))) -- return HV_STATUS_INVALID_HYPERCALL_INPUT; -+ if (!hc->fast) { -+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex, -+ sizeof(send_ipi_ex)))) -+ return HV_STATUS_INVALID_HYPERCALL_INPUT; -+ } else { -+ send_ipi_ex.vector = (u32)hc->ingpa; -+ send_ipi_ex.vp_set.format = hc->outgpa; -+ send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]); -+ } - - trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector, - send_ipi_ex.vp_set.format, -@@ -1917,23 +1935,40 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool - - vector = send_ipi_ex.vector; - valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; -- sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * -- sizeof(sparse_banks[0]); -+ sparse_banks_len = bitmap_weight(&valid_bank_mask, 64); - - all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; - -+ if (all_cpus) -+ goto check_and_send_ipi; -+ - if (!sparse_banks_len) - goto ret_success; - -- if (!all_cpus && -- kvm_read_guest(kvm, -- hc->ingpa + offsetof(struct hv_send_ipi_ex, -- vp_set.bank_contents), -- sparse_banks, -- sparse_banks_len)) -- return HV_STATUS_INVALID_HYPERCALL_INPUT; -+ if (!hc->fast) { -+ if (kvm_read_guest(kvm, -+ hc->ingpa + offsetof(struct hv_send_ipi_ex, -+ vp_set.bank_contents), -+ sparse_banks, -+ sparse_banks_len * sizeof(sparse_banks[0]))) -+ return HV_STATUS_INVALID_HYPERCALL_INPUT; -+ } else { -+ /* -+ * The lower half of XMM0 is already consumed, each XMM holds -+ * two sparse banks. -+ */ -+ if (sparse_banks_len > (2 * HV_HYPERCALL_MAX_XMM_REGISTERS - 1)) -+ return HV_STATUS_INVALID_HYPERCALL_INPUT; -+ for (i = 0; i < sparse_banks_len; i++) { -+ if (i % 2) -+ sparse_banks[i] = sse128_lo(hc->xmm[(i + 1) / 2]); -+ else -+ sparse_banks[i] = sse128_hi(hc->xmm[i / 2]); -+ } -+ } - } - -+check_and_send_ipi: - if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) - return HV_STATUS_INVALID_HYPERCALL_INPUT; - -@@ -2022,7 +2057,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) - { - bool longmode; - -- longmode = is_64_bit_mode(vcpu); -+ longmode = is_64_bit_hypercall(vcpu); - if (longmode) - kvm_rax_write(vcpu, result); - else { -@@ -2092,6 +2127,7 @@ static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc) - case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: - case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: - case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: -+ case HVCALL_SEND_IPI_EX: - return true; - } - -@@ -2171,7 +2207,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) - } - - #ifdef CONFIG_X86_64 -- if (is_64_bit_mode(vcpu)) { -+ if (is_64_bit_hypercall(vcpu)) { - hc.param = kvm_rcx_read(vcpu); - hc.ingpa = kvm_rdx_read(vcpu); - hc.outgpa = kvm_r8_read(vcpu); -@@ -2243,46 +2279,28 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) - kvm_hv_hypercall_complete_userspace; - return 0; - case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: -- if (unlikely(!hc.rep_cnt || hc.rep_idx)) { -- ret = HV_STATUS_INVALID_HYPERCALL_INPUT; -- break; -- } -- ret = kvm_hv_flush_tlb(vcpu, &hc, false); -- break; -- case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: -- if (unlikely(hc.rep)) { -- ret = HV_STATUS_INVALID_HYPERCALL_INPUT; -- break; -- } -- ret = kvm_hv_flush_tlb(vcpu, &hc, false); -- break; - case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: - if (unlikely(!hc.rep_cnt || hc.rep_idx)) { - ret = HV_STATUS_INVALID_HYPERCALL_INPUT; - break; - } -- ret = kvm_hv_flush_tlb(vcpu, &hc, true); -+ ret = kvm_hv_flush_tlb(vcpu, &hc); - break; -+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: - case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: - if (unlikely(hc.rep)) { - ret = HV_STATUS_INVALID_HYPERCALL_INPUT; - break; - } -- ret = kvm_hv_flush_tlb(vcpu, &hc, true); -+ ret = kvm_hv_flush_tlb(vcpu, &hc); - break; - case HVCALL_SEND_IPI: -- if (unlikely(hc.rep)) { -- ret = HV_STATUS_INVALID_HYPERCALL_INPUT; -- break; -- } -- ret = kvm_hv_send_ipi(vcpu, &hc, false); -- break; - case HVCALL_SEND_IPI_EX: -- if (unlikely(hc.fast || hc.rep)) { -+ if (unlikely(hc.rep)) { - ret = HV_STATUS_INVALID_HYPERCALL_INPUT; - break; - } -- ret = kvm_hv_send_ipi(vcpu, &hc, true); -+ ret = kvm_hv_send_ipi(vcpu, &hc); - break; - case HVCALL_POST_DEBUG_DATA: - case HVCALL_RETRIEVE_DEBUG_DATA: -diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c -index 8c065da73f8e5..4e0f52660842b 100644 ---- a/arch/x86/kvm/ioapic.c -+++ b/arch/x86/kvm/ioapic.c -@@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, - static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) - { - ioapic->rtc_status.pending_eoi = 0; -- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1); -+ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); - } - - static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); -diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h -index bbd4a5d18b5dc..f1b2b2a6ff4db 100644 ---- a/arch/x86/kvm/ioapic.h -+++ b/arch/x86/kvm/ioapic.h -@@ -39,13 +39,13 @@ struct kvm_vcpu; - - struct dest_map { - /* vcpu bitmap where IRQ has been sent */ -- DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1); -+ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); - - /* - * Vector sent to a given vcpu, only valid when - * the vcpu's bit in map is set - */ -- u8 vectors[KVM_MAX_VCPU_ID + 1]; -+ u8 vectors[KVM_MAX_VCPU_ID]; - }; - - -@@ -81,7 +81,6 @@ struct kvm_ioapic { - unsigned long irq_states[IOAPIC_NUM_PINS]; - struct kvm_io_device dev; - struct kvm *kvm; -- void (*ack_notifier)(void *opaque, int irq); - spinlock_t lock; - struct rtc_status rtc_status; - struct delayed_work eoi_inject; -diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h -index 650642b18d151..c2d7cfe82d004 100644 ---- a/arch/x86/kvm/irq.h -+++ b/arch/x86/kvm/irq.h -@@ -56,7 +56,6 @@ struct kvm_pic { - struct kvm_io_device dev_master; - struct kvm_io_device dev_slave; - struct kvm_io_device dev_elcr; -- void (*ack_notifier)(void *opaque, int irq); - unsigned long irq_states[PIC_NUM_PINS]; - }; - -diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h -index 68b420289d7ed..fb09cd22cb7f5 100644 ---- a/arch/x86/kvm/kvm_emulate.h -+++ b/arch/x86/kvm/kvm_emulate.h -@@ -226,6 +226,7 @@ struct x86_emulate_ops { - bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt); - bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt); - bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt); -+ bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt); - - void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); - -diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c -index d6ac32f3f650c..40fc1879a6970 100644 ---- a/arch/x86/kvm/lapic.c -+++ b/arch/x86/kvm/lapic.c -@@ -113,7 +113,8 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic) - - static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu) - { -- return pi_inject_timer && kvm_vcpu_apicv_active(vcpu); -+ return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) && -+ (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm)); - } - - bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu) -@@ -676,38 +677,32 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) - static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) - { - u8 val; -- if (pv_eoi_get_user(vcpu, &val) < 0) { -- printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n", -- (unsigned long long)vcpu->arch.pv_eoi.msr_val); -+ if (pv_eoi_get_user(vcpu, &val) < 0) - return false; -- } -+ - return val & KVM_PV_EOI_ENABLED; - } - - static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) - { -- if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { -- printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n", -- (unsigned long long)vcpu->arch.pv_eoi.msr_val); -+ if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) - return; -- } -+ - __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); - } - - static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) - { -- if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { -- printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n", -- (unsigned long long)vcpu->arch.pv_eoi.msr_val); -+ if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) - return; -- } -+ - __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); - } - - static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) - { - int highest_irr; -- if (apic->vcpu->arch.apicv_active) -+ if (kvm_x86_ops.sync_pir_to_irr) - highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu); - else - highest_irr = apic_find_highest_irr(apic); -@@ -993,6 +988,10 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, - *r = -1; - - if (irq->shorthand == APIC_DEST_SELF) { -+ if (KVM_BUG_ON(!src, kvm)) { -+ *r = 0; -+ return true; -+ } - *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); - return true; - } -@@ -1296,6 +1295,7 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high) - - kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); - } -+EXPORT_SYMBOL_GPL(kvm_apic_send_ipi); - - static u32 apic_get_tmcct(struct kvm_lapic *apic) - { -@@ -1507,6 +1507,7 @@ static void cancel_apic_timer(struct kvm_lapic *apic) - if (apic->lapic_timer.hv_timer_in_use) - cancel_hv_timer(apic); - preempt_enable(); -+ atomic_set(&apic->lapic_timer.pending, 0); - } - - static void apic_update_lvtt(struct kvm_lapic *apic) -@@ -2127,11 +2128,14 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) - break; - - case APIC_SELF_IPI: -- if (apic_x2apic_mode(apic)) { -- kvm_lapic_reg_write(apic, APIC_ICR, -- APIC_DEST_SELF | (val & APIC_VECTOR_MASK)); -- } else -+ /* -+ * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold -+ * the vector, everything else is reserved. -+ */ -+ if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK)) - ret = 1; -+ else -+ kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0); - break; - default: - ret = 1; -@@ -2248,10 +2252,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) - - void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) - { -- struct kvm_lapic *apic = vcpu->arch.apic; -- -- apic_set_tpr(apic, ((cr8 & 0x0f) << 4) -- | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4)); -+ apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4); - } - - u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) -@@ -2315,6 +2316,7 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu) - apic->irr_pending = (apic_search_irr(apic) != -1); - apic->isr_count = count_vectors(apic->regs + APIC_ISR); - } -+ apic->highest_isr_cache = -1; - } - EXPORT_SYMBOL_GPL(kvm_apic_update_apicv); - -@@ -2367,7 +2369,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) - kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0); - } - kvm_apic_update_apicv(vcpu); -- apic->highest_isr_cache = -1; - update_divide_count(apic); - atomic_set(&apic->lapic_timer.pending, 0); - -@@ -2629,7 +2630,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) - kvm_apic_set_version(vcpu); - - apic_update_ppr(apic); -- hrtimer_cancel(&apic->lapic_timer.timer); -+ cancel_apic_timer(apic); - apic->lapic_timer.expired_tscdeadline = 0; - apic_update_lvtt(apic); - apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); -@@ -2637,7 +2638,6 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) - __start_apic_timer(apic, APIC_TMCCT); - kvm_lapic_set_reg(apic, APIC_TMCCT, 0); - kvm_apic_update_apicv(vcpu); -- apic->highest_isr_cache = -1; - if (vcpu->arch.apicv_active) { - static_call(kvm_x86_apicv_post_state_restore)(vcpu); - static_call(kvm_x86_hwapic_irr_update)(vcpu, -@@ -2801,6 +2801,10 @@ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) - /* if this is ICR write vector before command */ - if (reg == APIC_ICR) - kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); -+ else if (data >> 32) -+ /* Bits 63:32 are reserved in all other registers. */ -+ return 1; -+ - return kvm_lapic_reg_write(apic, reg, (u32)data); - } - -@@ -2835,6 +2839,10 @@ int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) - /* if this is ICR write vector before command */ - if (reg == APIC_ICR) - kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); -+ else if (data >> 32) -+ /* Bits 63:32 are reserved in all other registers. */ -+ return 1; -+ - return kvm_lapic_reg_write(apic, reg, (u32)data); - } - -diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h -index e9688a9f7b579..7bb165c232334 100644 ---- a/arch/x86/kvm/mmu.h -+++ b/arch/x86/kvm/mmu.h -@@ -49,6 +49,7 @@ - X86_CR4_LA57) - - #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP) -+#define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX) - - static __always_inline u64 rsvd_bits(int s, int e) - { -diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c -index 0cc58901bf7a7..4724289c8a7f8 100644 ---- a/arch/x86/kvm/mmu/mmu.c -+++ b/arch/x86/kvm/mmu/mmu.c -@@ -1071,20 +1071,6 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu) - return kvm_mmu_memory_cache_nr_free_objects(mc); - } - --static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) --{ -- struct kvm_memory_slot *slot; -- struct kvm_mmu_page *sp; -- struct kvm_rmap_head *rmap_head; -- -- sp = sptep_to_sp(spte); -- kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); -- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); -- rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); -- return pte_list_add(vcpu, spte, rmap_head); --} -- -- - static void rmap_remove(struct kvm *kvm, u64 *spte) - { - struct kvm_memslots *slots; -@@ -1097,9 +1083,9 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) - gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); - - /* -- * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the -- * context of a vCPU so have to determine which memslots to use based -- * on context information in sp->role. -+ * Unlike rmap_add, rmap_remove does not run in the context of a vCPU -+ * so we have to determine which memslots to use based on context -+ * information in sp->role. - */ - slots = kvm_memslots_for_spte_role(kvm, sp->role); - -@@ -1592,7 +1578,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) - flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp); - - if (is_tdp_mmu_enabled(kvm)) -- flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); -+ flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); - - return flush; - } -@@ -1639,19 +1625,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - - #define RMAP_RECYCLE_THRESHOLD 1000 - --static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) -+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) - { - struct kvm_memory_slot *slot; -- struct kvm_rmap_head *rmap_head; - struct kvm_mmu_page *sp; -+ struct kvm_rmap_head *rmap_head; -+ int rmap_count; - - sp = sptep_to_sp(spte); -+ kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - rmap_head = gfn_to_rmap(gfn, sp->role.level, slot); -+ rmap_count = pte_list_add(vcpu, spte, rmap_head); - -- kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); -- kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, -- KVM_PAGES_PER_HPAGE(sp->role.level)); -+ if (rmap_count > RMAP_RECYCLE_THRESHOLD) { -+ kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0)); -+ kvm_flush_remote_tlbs_with_address( -+ vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); -+ } - } - - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -@@ -2188,10 +2179,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato - iterator->shadow_addr = root; - iterator->level = vcpu->arch.mmu->shadow_root_level; - -- if (iterator->level == PT64_ROOT_4LEVEL && -+ if (iterator->level >= PT64_ROOT_4LEVEL && - vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && - !vcpu->arch.mmu->direct_map) -- --iterator->level; -+ iterator->level = PT32E_ROOT_LEVEL; - - if (iterator->level == PT32E_ROOT_LEVEL) { - /* -@@ -2366,6 +2357,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, - { - bool list_unstable; - -+ lockdep_assert_held_write(&kvm->mmu_lock); - trace_kvm_mmu_prepare_zap_page(sp); - ++kvm->stat.mmu_shadow_zapped; - *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); -@@ -2718,7 +2710,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - bool host_writable) - { - int was_rmapped = 0; -- int rmap_count; - int set_spte_ret; - int ret = RET_PF_FIXED; - bool flush = false; -@@ -2778,9 +2769,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, - - if (!was_rmapped) { - kvm_update_page_stats(vcpu->kvm, level, 1); -- rmap_count = rmap_add(vcpu, sptep, gfn); -- if (rmap_count > RMAP_RECYCLE_THRESHOLD) -- rmap_recycle(vcpu, sptep, gfn); -+ rmap_add(vcpu, sptep, gfn); - } - - return ret; -@@ -3314,6 +3303,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, - return; - - sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); -+ if (WARN_ON(!sp)) -+ return; - - if (is_tdp_mmu_page(sp)) - kvm_tdp_mmu_put_root(kvm, sp, false); -@@ -3579,7 +3570,7 @@ set_root_pgd: - out_unlock: - write_unlock(&vcpu->kvm->mmu_lock); - -- return 0; -+ return r; - } - - static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) -@@ -3889,12 +3880,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) - walk_shadow_page_lockless_end(vcpu); - } - -+static u32 alloc_apf_token(struct kvm_vcpu *vcpu) -+{ -+ /* make sure the token value is not 0 */ -+ u32 id = vcpu->arch.apf.id; -+ -+ if (id << 12 == 0) -+ vcpu->arch.apf.id = 1; -+ -+ return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; -+} -+ - static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - gfn_t gfn) - { - struct kvm_arch_async_pf arch; - -- arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; -+ arch.token = alloc_apf_token(vcpu); - arch.gfn = gfn; - arch.direct_map = vcpu->arch.mmu->direct_map; - arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); -@@ -3956,6 +3958,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, - - *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, - write, writable, hva); -+ return false; - - out_retry: - *r = RET_PF_RETRY; -@@ -4005,16 +4008,17 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, - - if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva)) - goto out_unlock; -- r = make_mmu_pages_available(vcpu); -- if (r) -- goto out_unlock; - -- if (is_tdp_mmu_fault) -+ if (is_tdp_mmu_fault) { - r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level, - pfn, prefault); -- else -+ } else { -+ r = make_mmu_pages_available(vcpu); -+ if (r) -+ goto out_unlock; - r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn, - prefault, is_tdp); -+ } - - out_unlock: - if (is_tdp_mmu_fault) -@@ -4679,6 +4683,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu, - /* PKEY and LA57 are active iff long mode is active. */ - ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs); - ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs); -+ ext.efer_lma = ____is_efer_lma(regs); - } - - ext.valid = 1; -@@ -4851,7 +4856,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, - struct kvm_mmu *context = &vcpu->arch.guest_mmu; - struct kvm_mmu_role_regs regs = { - .cr0 = cr0, -- .cr4 = cr4, -+ .cr4 = cr4 & ~X86_CR4_PKE, - .efer = efer, - }; - union kvm_mmu_role new_role; -@@ -4915,7 +4920,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, - context->direct_map = false; - - update_permission_bitmask(context, true); -- update_pkru_bitmask(context); -+ context->pkru_mask = 0; - reset_rsvds_bits_mask_ept(vcpu, context, execonly); - reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); - } -@@ -5368,7 +5373,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - - void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) - { -- kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE); -+ kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); - ++vcpu->stat.invlpg; - } - EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); -@@ -5381,14 +5386,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) - uint i; - - if (pcid == kvm_get_active_pcid(vcpu)) { -- mmu->invlpg(vcpu, gva, mmu->root_hpa); -+ if (mmu->invlpg) -+ mmu->invlpg(vcpu, gva, mmu->root_hpa); - tlb_flush = true; - } - - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { - if (VALID_PAGE(mmu->prev_roots[i].hpa) && - pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) { -- mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); -+ if (mmu->invlpg) -+ mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); - tlb_flush = true; - } - } -@@ -5473,8 +5480,8 @@ slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot, - } - - static __always_inline bool --slot_handle_leaf(struct kvm *kvm, const struct kvm_memory_slot *memslot, -- slot_level_handler fn, bool flush_on_yield) -+slot_handle_level_4k(struct kvm *kvm, const struct kvm_memory_slot *memslot, -+ slot_level_handler fn, bool flush_on_yield) - { - return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, - PG_LEVEL_4K, flush_on_yield); -@@ -5575,6 +5582,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) - { - struct kvm_mmu_page *sp, *node; - int nr_zapped, batch = 0; -+ bool unstable; - - restart: - list_for_each_entry_safe_reverse(sp, node, -@@ -5606,11 +5614,12 @@ restart: - goto restart; - } - -- if (__kvm_mmu_prepare_zap_page(kvm, sp, -- &kvm->arch.zapped_obsolete_pages, &nr_zapped)) { -- batch += nr_zapped; -+ unstable = __kvm_mmu_prepare_zap_page(kvm, sp, -+ &kvm->arch.zapped_obsolete_pages, &nr_zapped); -+ batch += nr_zapped; -+ -+ if (unstable) - goto restart; -- } - } - - /* -@@ -5758,13 +5767,11 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) - flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start, - gfn_end, flush); -- if (flush) -- kvm_flush_remote_tlbs_with_address(kvm, gfn_start, -- gfn_end - gfn_start); - } - - if (flush) -- kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end); -+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start, -+ gfn_end - gfn_start); - - kvm_dec_notifier_count(kvm, gfn_start, gfn_end); - -@@ -5856,21 +5863,21 @@ restart: - void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, - const struct kvm_memory_slot *slot) - { -- bool flush = false; -- - if (kvm_memslots_have_rmaps(kvm)) { - write_lock(&kvm->mmu_lock); -- flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true); -- if (flush) -+ /* -+ * Zap only 4k SPTEs since the legacy MMU only supports dirty -+ * logging at a 4k granularity and never creates collapsible -+ * 2m SPTEs during dirty logging. -+ */ -+ if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true)) - kvm_arch_flush_remote_tlbs_memslot(kvm, slot); - write_unlock(&kvm->mmu_lock); - } - - if (is_tdp_mmu_enabled(kvm)) { - read_lock(&kvm->mmu_lock); -- flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush); -- if (flush) -- kvm_arch_flush_remote_tlbs_memslot(kvm, slot); -+ kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot); - read_unlock(&kvm->mmu_lock); - } - } -@@ -5897,8 +5904,11 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, - - if (kvm_memslots_have_rmaps(kvm)) { - write_lock(&kvm->mmu_lock); -- flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, -- false); -+ /* -+ * Clear dirty bits only on 4k SPTEs since the legacy MMU only -+ * support dirty logging at a 4k granularity. -+ */ -+ flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false); - write_unlock(&kvm->mmu_lock); - } - -@@ -6091,12 +6101,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) - return 0; - } - --int kvm_mmu_module_init(void) -+/* -+ * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as -+ * its default value of -1 is technically undefined behavior for a boolean. -+ */ -+void __init kvm_mmu_x86_module_init(void) - { -- int ret = -ENOMEM; -- - if (nx_huge_pages == -1) - __set_nx_huge_pages(get_nx_auto_mode()); -+} -+ -+/* -+ * The bulk of the MMU initialization is deferred until the vendor module is -+ * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need -+ * to be reset when a potentially different vendor module is loaded. -+ */ -+int kvm_mmu_vendor_module_init(void) -+{ -+ int ret = -ENOMEM; - - /* - * MMU roles use union aliasing which is, generally speaking, an -@@ -6168,7 +6190,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) - mmu_free_memory_caches(vcpu); - } - --void kvm_mmu_module_exit(void) -+void kvm_mmu_vendor_module_exit(void) - { - mmu_destroy_caches(); - percpu_counter_destroy(&kvm_total_used_mmu_pages); -diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c -index 21427e84a82ef..630ae70bb6bd3 100644 ---- a/arch/x86/kvm/mmu/page_track.c -+++ b/arch/x86/kvm/mmu/page_track.c -@@ -36,8 +36,8 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, - - for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { - slot->arch.gfn_track[i] = -- kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]), -- GFP_KERNEL_ACCOUNT); -+ __vcalloc(npages, sizeof(*slot->arch.gfn_track[i]), -+ GFP_KERNEL_ACCOUNT); - if (!slot->arch.gfn_track[i]) - goto track_free; - } -diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h -index 913d52a7923e6..a1811f51eda92 100644 ---- a/arch/x86/kvm/mmu/paging_tmpl.h -+++ b/arch/x86/kvm/mmu/paging_tmpl.h -@@ -34,9 +34,8 @@ - #define PT_HAVE_ACCESSED_DIRTY(mmu) true - #ifdef CONFIG_X86_64 - #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL -- #define CMPXCHG cmpxchg -+ #define CMPXCHG "cmpxchgq" - #else -- #define CMPXCHG cmpxchg64 - #define PT_MAX_FULL_LEVELS 2 - #endif - #elif PTTYPE == 32 -@@ -52,7 +51,7 @@ - #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT - #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT - #define PT_HAVE_ACCESSED_DIRTY(mmu) true -- #define CMPXCHG cmpxchg -+ #define CMPXCHG "cmpxchgl" - #elif PTTYPE == PTTYPE_EPT - #define pt_element_t u64 - #define guest_walker guest_walkerEPT -@@ -65,7 +64,9 @@ - #define PT_GUEST_DIRTY_SHIFT 9 - #define PT_GUEST_ACCESSED_SHIFT 8 - #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) -- #define CMPXCHG cmpxchg64 -+ #ifdef CONFIG_X86_64 -+ #define CMPXCHG "cmpxchgq" -+ #endif - #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL - #else - #error Invalid PTTYPE value -@@ -147,43 +148,39 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - pt_element_t __user *ptep_user, unsigned index, - pt_element_t orig_pte, pt_element_t new_pte) - { -- int npages; -- pt_element_t ret; -- pt_element_t *table; -- struct page *page; -- -- npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); -- if (likely(npages == 1)) { -- table = kmap_atomic(page); -- ret = CMPXCHG(&table[index], orig_pte, new_pte); -- kunmap_atomic(table); -- -- kvm_release_page_dirty(page); -- } else { -- struct vm_area_struct *vma; -- unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; -- unsigned long pfn; -- unsigned long paddr; -- -- mmap_read_lock(current->mm); -- vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); -- if (!vma || !(vma->vm_flags & VM_PFNMAP)) { -- mmap_read_unlock(current->mm); -- return -EFAULT; -- } -- pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; -- paddr = pfn << PAGE_SHIFT; -- table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); -- if (!table) { -- mmap_read_unlock(current->mm); -- return -EFAULT; -- } -- ret = CMPXCHG(&table[index], orig_pte, new_pte); -- memunmap(table); -- mmap_read_unlock(current->mm); -- } -+ int r = -EFAULT; -+ -+ if (!user_access_begin(ptep_user, sizeof(pt_element_t))) -+ return -EFAULT; -+ -+#ifdef CMPXCHG -+ asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n" -+ "mov $0, %[r]\n" -+ "setnz %b[r]\n" -+ "2:" -+ _ASM_EXTABLE_UA(1b, 2b) -+ : [ptr] "+m" (*ptep_user), -+ [old] "+a" (orig_pte), -+ [r] "+q" (r) -+ : [new] "r" (new_pte) -+ : "memory"); -+#else -+ asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n" -+ "movl $0, %[r]\n" -+ "jz 2f\n" -+ "incl %[r]\n" -+ "2:" -+ _ASM_EXTABLE_UA(1b, 2b) -+ : [ptr] "+m" (*ptep_user), -+ [old] "+A" (orig_pte), -+ [r] "+rm" (r) -+ : [new_lo] "b" ((u32)new_pte), -+ [new_hi] "c" ((u32)(new_pte >> 32)) -+ : "memory"); -+#endif - -- return (ret != orig_pte); -+ user_access_end(); -+ return r; - } - - static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, -diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h -index eb7b227fc6cfe..31d6456d8ac33 100644 ---- a/arch/x86/kvm/mmu/spte.h -+++ b/arch/x86/kvm/mmu/spte.h -@@ -310,12 +310,7 @@ static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, - static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, - u64 spte, int level) - { -- /* -- * Use a bitwise-OR instead of a logical-OR to aggregate the reserved -- * bits and EPT's invalid memtype/XWR checks to avoid an extra Jcc -- * (this is extremely unlikely to be short-circuited as true). -- */ -- return __is_bad_mt_xwr(rsvd_check, spte) | -+ return __is_bad_mt_xwr(rsvd_check, spte) || - __is_rsvd_bits_set(rsvd_check, spte, level); - } - -diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c -index b3ed302c1a359..caa96c270b954 100644 ---- a/arch/x86/kvm/mmu/tdp_iter.c -+++ b/arch/x86/kvm/mmu/tdp_iter.c -@@ -26,6 +26,7 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level) - */ - void tdp_iter_restart(struct tdp_iter *iter) - { -+ iter->yielded = false; - iter->yielded_gfn = iter->next_last_level_gfn; - iter->level = iter->root_level; - -@@ -160,6 +161,11 @@ static bool try_step_up(struct tdp_iter *iter) - */ - void tdp_iter_next(struct tdp_iter *iter) - { -+ if (iter->yielded) { -+ tdp_iter_restart(iter); -+ return; -+ } -+ - if (try_step_down(iter)) - return; - -diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h -index b1748b988d3ae..e19cabbcb65c8 100644 ---- a/arch/x86/kvm/mmu/tdp_iter.h -+++ b/arch/x86/kvm/mmu/tdp_iter.h -@@ -45,6 +45,12 @@ struct tdp_iter { - * iterator walks off the end of the paging structure. - */ - bool valid; -+ /* -+ * True if KVM dropped mmu_lock and yielded in the middle of a walk, in -+ * which case tdp_iter_next() needs to restart the walk at the root -+ * level instead of advancing to the next entry. -+ */ -+ bool yielded; - }; - - /* -diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c -index 64ccfc1fa5535..7a64fb2380448 100644 ---- a/arch/x86/kvm/mmu/tdp_mmu.c -+++ b/arch/x86/kvm/mmu/tdp_mmu.c -@@ -10,7 +10,7 @@ - #include - #include - --static bool __read_mostly tdp_mmu_enabled = true; -+static bool __read_mostly tdp_mmu_enabled = false; - module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); - - /* Initializes the TDP MMU for the VM, if enabled. */ -@@ -99,15 +99,18 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, - } - - /* -- * Finds the next valid root after root (or the first valid root if root -- * is NULL), takes a reference on it, and returns that next root. If root -- * is not NULL, this thread should have already taken a reference on it, and -- * that reference will be dropped. If no valid root is found, this -- * function will return NULL. -+ * Returns the next root after @prev_root (or the first root if @prev_root is -+ * NULL). A reference to the returned root is acquired, and the reference to -+ * @prev_root is released (the caller obviously must hold a reference to -+ * @prev_root if it's non-NULL). -+ * -+ * If @only_valid is true, invalid roots are skipped. -+ * -+ * Returns NULL if the end of tdp_mmu_roots was reached. - */ - static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, - struct kvm_mmu_page *prev_root, -- bool shared) -+ bool shared, bool only_valid) - { - struct kvm_mmu_page *next_root; - -@@ -121,9 +124,14 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, - next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, - typeof(*next_root), link); - -- while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root)) -+ while (next_root) { -+ if ((!only_valid || !next_root->role.invalid) && -+ kvm_tdp_mmu_get_root(kvm, next_root)) -+ break; -+ - next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, - &next_root->link, typeof(*next_root), link); -+ } - - rcu_read_unlock(); - -@@ -143,13 +151,19 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, - * mode. In the unlikely event that this thread must free a root, the lock - * will be temporarily dropped and reacquired in write mode. - */ --#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ -- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \ -- _root; \ -- _root = tdp_mmu_next_root(_kvm, _root, _shared)) \ -- if (kvm_mmu_page_as_id(_root) != _as_id) { \ -+#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\ -+ for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \ -+ _root; \ -+ _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \ -+ if (kvm_mmu_page_as_id(_root) != _as_id) { \ - } else - -+#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ -+ __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) -+ -+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ -+ __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, false) -+ - #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \ - list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \ - lockdep_is_held_type(&kvm->mmu_lock, 0) || \ -@@ -199,7 +213,10 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) - - role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); - -- /* Check for an existing root before allocating a new one. */ -+ /* -+ * Check for an existing root before allocating a new one. Note, the -+ * role check prevents consuming an invalid root. -+ */ - for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { - if (root->role.word == role.word && - kvm_tdp_mmu_get_root(kvm, root)) -@@ -316,9 +333,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, - struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); - int level = sp->role.level; - gfn_t base_gfn = sp->gfn; -- u64 old_child_spte; -- u64 *sptep; -- gfn_t gfn; - int i; - - trace_kvm_mmu_prepare_zap_page(sp); -@@ -326,8 +340,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, - tdp_mmu_unlink_page(kvm, sp, shared); - - for (i = 0; i < PT64_ENT_PER_PAGE; i++) { -- sptep = rcu_dereference(pt) + i; -- gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); -+ u64 *sptep = rcu_dereference(pt) + i; -+ gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); -+ u64 old_child_spte; - - if (shared) { - /* -@@ -373,7 +388,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, - shared); - } - -- kvm_flush_remote_tlbs_with_address(kvm, gfn, -+ kvm_flush_remote_tlbs_with_address(kvm, base_gfn, - KVM_PAGES_PER_HPAGE(level + 1)); - - call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); -@@ -503,6 +518,8 @@ static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm, - struct tdp_iter *iter, - u64 new_spte) - { -+ WARN_ON_ONCE(iter->yielded); -+ - lockdep_assert_held_read(&kvm->mmu_lock); - - /* -@@ -613,6 +630,8 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, - u64 new_spte, bool record_acc_track, - bool record_dirty_log) - { -+ WARN_ON_ONCE(iter->yielded); -+ - lockdep_assert_held_write(&kvm->mmu_lock); - - /* -@@ -678,18 +697,19 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, - * If this function should yield and flush is set, it will perform a remote - * TLB flush before yielding. - * -- * If this function yields, it will also reset the tdp_iter's walk over the -- * paging structure and the calling function should skip to the next -- * iteration to allow the iterator to continue its traversal from the -- * paging structure root. -+ * If this function yields, iter->yielded is set and the caller must skip to -+ * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk -+ * over the paging structures to allow the iterator to continue its traversal -+ * from the paging structure root. - * -- * Return true if this function yielded and the iterator's traversal was reset. -- * Return false if a yield was not needed. -+ * Returns true if this function yielded. - */ --static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, -- struct tdp_iter *iter, bool flush, -- bool shared) -+static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, -+ struct tdp_iter *iter, -+ bool flush, bool shared) - { -+ WARN_ON(iter->yielded); -+ - /* Ensure forward progress has been made before yielding. */ - if (iter->next_last_level_gfn == iter->yielded_gfn) - return false; -@@ -709,12 +729,10 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, - - WARN_ON(iter->gfn > iter->next_last_level_gfn); - -- tdp_iter_restart(iter); -- -- return true; -+ iter->yielded = true; - } - -- return false; -+ return iter->yielded; - } - - /* -@@ -1080,13 +1098,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, - bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, - bool flush) - { -- struct kvm_mmu_page *root; -- -- for_each_tdp_mmu_root(kvm, root, range->slot->as_id) -- flush |= zap_gfn_range(kvm, root, range->start, range->end, -- range->may_block, flush, false); -- -- return flush; -+ return __kvm_tdp_mmu_zap_gfn_range(kvm, range->slot->as_id, range->start, -+ range->end, range->may_block, flush); - } - - typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter, -@@ -1270,7 +1283,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, - - lockdep_assert_held_read(&kvm->mmu_lock); - -- for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) -+ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) - spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, - slot->base_gfn + slot->npages, min_level); - -@@ -1298,6 +1311,9 @@ retry: - if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) - continue; - -+ if (!is_shadow_present_pte(iter.old_spte)) -+ continue; -+ - if (spte_ad_need_write_protect(iter.old_spte)) { - if (is_writable_pte(iter.old_spte)) - new_spte = iter.old_spte & ~PT_WRITABLE_MASK; -@@ -1341,7 +1357,7 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, - - lockdep_assert_held_read(&kvm->mmu_lock); - -- for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) -+ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) - spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, - slot->base_gfn + slot->npages); - -@@ -1415,10 +1431,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, - * Clear leaf entries which could be replaced by large mappings, for - * GFNs within the slot. - */ --static bool zap_collapsible_spte_range(struct kvm *kvm, -+static void zap_collapsible_spte_range(struct kvm *kvm, - struct kvm_mmu_page *root, -- const struct kvm_memory_slot *slot, -- bool flush) -+ const struct kvm_memory_slot *slot) - { - gfn_t start = slot->base_gfn; - gfn_t end = start + slot->npages; -@@ -1429,10 +1444,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm, - - tdp_root_for_each_pte(iter, root, start, end) { - retry: -- if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) { -- flush = false; -+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) - continue; -- } - - if (!is_shadow_present_pte(iter.old_spte) || - !is_last_spte(iter.old_spte, iter.level)) -@@ -1444,6 +1457,7 @@ retry: - pfn, PG_LEVEL_NUM)) - continue; - -+ /* Note, a successful atomic zap also does a remote TLB flush. */ - if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { - /* - * The iter must explicitly re-read the SPTE because -@@ -1452,30 +1466,24 @@ retry: - iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); - goto retry; - } -- flush = true; - } - - rcu_read_unlock(); -- -- return flush; - } - - /* - * Clear non-leaf entries (and free associated page tables) which could - * be replaced by large mappings, for GFNs within the slot. - */ --bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, -- const struct kvm_memory_slot *slot, -- bool flush) -+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, -+ const struct kvm_memory_slot *slot) - { - struct kvm_mmu_page *root; - - lockdep_assert_held_read(&kvm->mmu_lock); - -- for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) -- flush = zap_collapsible_spte_range(kvm, root, slot, flush); -- -- return flush; -+ for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) -+ zap_collapsible_spte_range(kvm, root, slot); - } - - /* -@@ -1500,12 +1508,12 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, - !is_last_spte(iter.old_spte, iter.level)) - continue; - -- if (!is_writable_pte(iter.old_spte)) -- break; -- - new_spte = iter.old_spte & - ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); - -+ if (new_spte == iter.old_spte) -+ break; -+ - tdp_mmu_set_spte(kvm, &iter, new_spte); - spte_set = true; - } -diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h -index 358f447d40120..39468b637d2e4 100644 ---- a/arch/x86/kvm/mmu/tdp_mmu.h -+++ b/arch/x86/kvm/mmu/tdp_mmu.h -@@ -10,9 +10,6 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); - __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm, - struct kvm_mmu_page *root) - { -- if (root->role.invalid) -- return false; -- - return refcount_inc_not_zero(&root->tdp_mmu_root_count); - } - -@@ -66,9 +63,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, - struct kvm_memory_slot *slot, - gfn_t gfn, unsigned long mask, - bool wrprot); --bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, -- const struct kvm_memory_slot *slot, -- bool flush); -+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, -+ const struct kvm_memory_slot *slot); - - bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, - struct kvm_memory_slot *slot, gfn_t gfn, -diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c -index 0772bad9165c5..62333f9756a36 100644 ---- a/arch/x86/kvm/pmu.c -+++ b/arch/x86/kvm/pmu.c -@@ -95,9 +95,8 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event, - } - - static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, -- unsigned config, bool exclude_user, -- bool exclude_kernel, bool intr, -- bool in_tx, bool in_tx_cp) -+ u64 config, bool exclude_user, -+ bool exclude_kernel, bool intr) - { - struct perf_event *event; - struct perf_event_attr attr = { -@@ -113,16 +112,14 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, - - attr.sample_period = get_sample_period(pmc, pmc->counter); - -- if (in_tx) -- attr.config |= HSW_IN_TX; -- if (in_tx_cp) { -+ if ((attr.config & HSW_IN_TX_CHECKPOINTED) && -+ guest_cpuid_is_intel(pmc->vcpu)) { - /* - * HSW_IN_TX_CHECKPOINTED is not supported with nonzero - * period. Just clear the sample period so at least - * allocating the counter doesn't fail. - */ - attr.sample_period = 0; -- attr.config |= HSW_IN_TX_CHECKPOINTED; - } - - event = perf_event_create_kernel_counter(&attr, -1, current, -@@ -173,11 +170,12 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) - - void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) - { -- unsigned config, type = PERF_TYPE_RAW; -- u8 event_select, unit_mask; -+ u64 config; -+ u32 type = PERF_TYPE_RAW; - struct kvm *kvm = pmc->vcpu->kvm; - struct kvm_pmu_event_filter *filter; - int i; -+ struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu); - bool allow_event = true; - - if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) -@@ -206,23 +204,18 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) - if (!allow_event) - return; - -- event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; -- unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; -- - if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | - ARCH_PERFMON_EVENTSEL_INV | - ARCH_PERFMON_EVENTSEL_CMASK | - HSW_IN_TX | - HSW_IN_TX_CHECKPOINTED))) { -- config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc), -- event_select, -- unit_mask); -+ config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc); - if (config != PERF_COUNT_HW_MAX) - type = PERF_TYPE_HARDWARE; - } - - if (type == PERF_TYPE_RAW) -- config = eventsel & X86_RAW_EVENT_MASK; -+ config = eventsel & pmu->raw_event_mask; - - if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) - return; -@@ -233,9 +226,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) - pmc_reprogram_counter(pmc, type, config, - !(eventsel & ARCH_PERFMON_EVENTSEL_USR), - !(eventsel & ARCH_PERFMON_EVENTSEL_OS), -- eventsel & ARCH_PERFMON_EVENTSEL_INT, -- (eventsel & HSW_IN_TX), -- (eventsel & HSW_IN_TX_CHECKPOINTED)); -+ eventsel & ARCH_PERFMON_EVENTSEL_INT); - } - EXPORT_SYMBOL_GPL(reprogram_gp_counter); - -@@ -271,7 +262,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) - kvm_x86_ops.pmu_ops->find_fixed_event(idx), - !(en_field & 0x2), /* exclude user */ - !(en_field & 0x1), /* exclude kernel */ -- pmi, false, false); -+ pmi); - } - EXPORT_SYMBOL_GPL(reprogram_fixed_counter); - -diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h -index 0e4f2b1fa9fbd..c206decb39fab 100644 ---- a/arch/x86/kvm/pmu.h -+++ b/arch/x86/kvm/pmu.h -@@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping { - }; - - struct kvm_pmu_ops { -- unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, -- u8 unit_mask); -+ unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc); - unsigned (*find_fixed_event)(int idx); - bool (*pmc_is_enabled)(struct kvm_pmc *pmc); - struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); -@@ -142,6 +141,15 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) - return sample_period; - } - -+static inline void pmc_update_sample_period(struct kvm_pmc *pmc) -+{ -+ if (!pmc->perf_event || pmc->is_paused) -+ return; -+ -+ perf_event_period(pmc->perf_event, -+ get_sample_period(pmc, pmc->counter)); -+} -+ - void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); - void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); - void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); -diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h -index a19d473d01847..7eeade35a425b 100644 ---- a/arch/x86/kvm/reverse_cpuid.h -+++ b/arch/x86/kvm/reverse_cpuid.h -@@ -48,6 +48,7 @@ static const struct cpuid_reg reverse_cpuid[] = { - [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, - [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, - [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, -+ [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, - }; - - /* -diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c -index 8052d92069e01..b595a33860d70 100644 ---- a/arch/x86/kvm/svm/avic.c -+++ b/arch/x86/kvm/svm/avic.c -@@ -318,20 +318,24 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu) - trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index); - - switch (id) { -+ case AVIC_IPI_FAILURE_INVALID_TARGET: - case AVIC_IPI_FAILURE_INVALID_INT_TYPE: - /* -- * AVIC hardware handles the generation of -- * IPIs when the specified Message Type is Fixed -- * (also known as fixed delivery mode) and -- * the Trigger Mode is edge-triggered. The hardware -- * also supports self and broadcast delivery modes -- * specified via the Destination Shorthand(DSH) -- * field of the ICRL. Logical and physical APIC ID -- * formats are supported. All other IPI types cause -- * a #VMEXIT, which needs to emulated. -+ * Emulate IPIs that are not handled by AVIC hardware, which -+ * only virtualizes Fixed, Edge-Triggered INTRs, and falls over -+ * if _any_ targets are invalid, e.g. if the logical mode mask -+ * is a superset of running vCPUs. -+ * -+ * The exit is a trap, e.g. ICR holds the correct value and RIP -+ * has been advanced, KVM is responsible only for emulating the -+ * IPI. Sadly, hardware may sometimes leave the BUSY flag set, -+ * in which case KVM needs to emulate the ICR write as well in -+ * order to clear the BUSY flag. - */ -- kvm_lapic_reg_write(apic, APIC_ICR2, icrh); -- kvm_lapic_reg_write(apic, APIC_ICR, icrl); -+ if (icrl & APIC_ICR_BUSY) -+ kvm_apic_write_nodecode(vcpu, APIC_ICR); -+ else -+ kvm_apic_send_ipi(apic, icrl, icrh); - break; - case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: - /* -@@ -341,10 +345,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu) - */ - avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh); - break; -- case AVIC_IPI_FAILURE_INVALID_TARGET: -- WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", -- index, vcpu->vcpu_id, icrh, icrl); -- break; - case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: - WARN_ONCE(1, "Invalid backing page\n"); - break; -@@ -801,7 +801,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, - { - struct kvm_kernel_irq_routing_entry *e; - struct kvm_irq_routing_table *irq_rt; -- int idx, ret = -EINVAL; -+ int idx, ret = 0; - - if (!kvm_arch_has_assigned_device(kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP)) -@@ -812,7 +812,13 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, - - idx = srcu_read_lock(&kvm->irq_srcu); - irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); -- WARN_ON(guest_irq >= irq_rt->nr_rt_entries); -+ -+ if (guest_irq >= irq_rt->nr_rt_entries || -+ hlist_empty(&irq_rt->map[guest_irq])) { -+ pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", -+ guest_irq, irq_rt->nr_rt_entries); -+ goto out; -+ } - - hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { - struct vcpu_data vcpu_info; -@@ -943,15 +949,10 @@ out: - void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - { - u64 entry; -- /* ID = 0xff (broadcast), ID > 0xff (reserved) */ - int h_physical_id = kvm_cpu_get_apicid(cpu); - struct vcpu_svm *svm = to_svm(vcpu); - -- /* -- * Since the host physical APIC id is 8 bits, -- * we can support host APIC ID upto 255. -- */ -- if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK)) -+ if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK)) - return; - - entry = READ_ONCE(*(svm->avic_physical_id_cache)); -@@ -988,16 +989,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu) - static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run) - { - struct vcpu_svm *svm = to_svm(vcpu); -+ int cpu = get_cpu(); - -+ WARN_ON(cpu != vcpu->cpu); - svm->avic_is_running = is_run; - -- if (!kvm_vcpu_apicv_active(vcpu)) -- return; -- -- if (is_run) -- avic_vcpu_load(vcpu, vcpu->cpu); -- else -- avic_vcpu_put(vcpu); -+ if (kvm_vcpu_apicv_active(vcpu)) { -+ if (is_run) -+ avic_vcpu_load(vcpu, cpu); -+ else -+ avic_vcpu_put(vcpu); -+ } -+ put_cpu(); - } - - void svm_vcpu_blocking(struct kvm_vcpu *vcpu) -diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c -index 510b833cbd399..e0b4f88b04b3e 100644 ---- a/arch/x86/kvm/svm/nested.c -+++ b/arch/x86/kvm/svm/nested.c -@@ -275,7 +275,8 @@ static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu, - return false; - } - -- if (CC(!kvm_is_valid_cr4(vcpu, save->cr4))) -+ /* Note, SVM doesn't have any additional restrictions on CR4. */ -+ if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4))) - return false; - - return true; -@@ -750,9 +751,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm) - struct kvm_host_map map; - int rc; - -- /* Triple faults in L2 should never escape. */ -- WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); -- - rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); - if (rc) { - if (rc == -EINVAL) -@@ -921,6 +919,9 @@ void svm_free_nested(struct vcpu_svm *svm) - if (!svm->nested.initialized) - return; - -+ if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr)) -+ svm_switch_vmcb(svm, &svm->vmcb01); -+ - svm_vcpu_free_msrpm(svm->nested.msrpm); - svm->nested.msrpm = NULL; - -@@ -939,12 +940,9 @@ void svm_free_nested(struct vcpu_svm *svm) - svm->nested.initialized = false; - } - --/* -- * Forcibly leave nested mode in order to be able to reset the VCPU later on. -- */ --void svm_leave_nested(struct vcpu_svm *svm) -+void svm_leave_nested(struct kvm_vcpu *vcpu) - { -- struct kvm_vcpu *vcpu = &svm->vcpu; -+ struct vcpu_svm *svm = to_svm(vcpu); - - if (is_guest_mode(vcpu)) { - svm->nested.nested_run_pending = 0; -@@ -1313,7 +1311,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, - return -EINVAL; - - if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { -- svm_leave_nested(svm); -+ svm_leave_nested(vcpu); - svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); - return 0; - } -@@ -1357,18 +1355,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, - !nested_vmcb_valid_sregs(vcpu, save)) - goto out_free; - -- /* -- * While the nested guest CR3 is already checked and set by -- * KVM_SET_SREGS, it was set when nested state was yet loaded, -- * thus MMU might not be initialized correctly. -- * Set it again to fix this. -- */ -- -- ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, -- nested_npt_enabled(svm), false); -- if (WARN_ON_ONCE(ret)) -- goto out_free; -- - - /* - * All checks done, we can enter guest mode. Userspace provides -@@ -1378,7 +1364,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, - */ - - if (is_guest_mode(vcpu)) -- svm_leave_nested(svm); -+ svm_leave_nested(vcpu); - else - svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; - -@@ -1394,6 +1380,20 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, - - svm_switch_vmcb(svm, &svm->nested.vmcb02); - nested_vmcb02_prepare_control(svm); -+ -+ /* -+ * While the nested guest CR3 is already checked and set by -+ * KVM_SET_SREGS, it was set when nested state was yet loaded, -+ * thus MMU might not be initialized correctly. -+ * Set it again to fix this. -+ */ -+ -+ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, -+ nested_npt_enabled(svm), false); -+ if (WARN_ON_ONCE(ret)) -+ goto out_free; -+ -+ - kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); - ret = 0; - out_free: -@@ -1432,6 +1432,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) - } - - struct kvm_x86_nested_ops svm_nested_ops = { -+ .leave_nested = svm_leave_nested, - .check_events = svm_check_nested_events, - .triple_fault = nested_svm_triple_fault, - .get_nested_state_pages = svm_get_nested_state_pages, -diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c -index fdf587f19c5fb..d35c94e13afb0 100644 ---- a/arch/x86/kvm/svm/pmu.c -+++ b/arch/x86/kvm/svm/pmu.c -@@ -44,6 +44,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = { - [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, - }; - -+/* duplicated from amd_f17h_perfmon_event_map. */ -+static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = { -+ [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, -+ [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, -+ [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES }, -+ [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES }, -+ [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, -+ [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, -+ [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, -+ [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, -+}; -+ -+/* amd_pmc_perf_hw_id depends on these being the same size */ -+static_assert(ARRAY_SIZE(amd_event_mapping) == -+ ARRAY_SIZE(amd_f17h_event_mapping)); -+ - static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) - { - struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); -@@ -134,21 +150,27 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, - return &pmu->gp_counters[msr_to_index(msr)]; - } - --static unsigned amd_find_arch_event(struct kvm_pmu *pmu, -- u8 event_select, -- u8 unit_mask) -+static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) - { -+ struct kvm_event_hw_type_mapping *event_mapping; -+ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; -+ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; - int i; - -+ if (guest_cpuid_family(pmc->vcpu) >= 0x17) -+ event_mapping = amd_f17h_event_mapping; -+ else -+ event_mapping = amd_event_mapping; -+ - for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) -- if (amd_event_mapping[i].eventsel == event_select -- && amd_event_mapping[i].unit_mask == unit_mask) -+ if (event_mapping[i].eventsel == event_select -+ && event_mapping[i].unit_mask == unit_mask) - break; - - if (i == ARRAY_SIZE(amd_event_mapping)) - return PERF_COUNT_HW_MAX; - -- return amd_event_mapping[i].event_type; -+ return event_mapping[i].event_type; - } - - /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */ -@@ -256,17 +278,16 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) - pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); - if (pmc) { - pmc->counter += data - pmc_read_counter(pmc); -+ pmc_update_sample_period(pmc); - return 0; - } - /* MSR_EVNTSELn */ - pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); - if (pmc) { -- if (data == pmc->eventsel) -- return 0; -- if (!(data & pmu->reserved_bits)) { -+ data &= ~pmu->reserved_bits; -+ if (data != pmc->eventsel) - reprogram_gp_counter(pmc, data); -- return 0; -- } -+ return 0; - } - - return 1; -@@ -282,7 +303,8 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) - pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; - - pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; -- pmu->reserved_bits = 0xffffffff00200000ull; -+ pmu->reserved_bits = 0xfffffff000280000ull; -+ pmu->raw_event_mask = AMD64_RAW_EVENT_MASK; - pmu->version = 1; - /* not applicable to AMD; but clean them to prevent any fall out */ - pmu->counter_bitmask[KVM_PMC_FIXED] = 0; -@@ -320,7 +342,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu) - } - - struct kvm_pmu_ops amd_pmu_ops = { -- .find_arch_event = amd_find_arch_event, -+ .pmc_perf_hw_id = amd_pmc_perf_hw_id, - .find_fixed_event = amd_find_fixed_event, - .pmc_is_enabled = amd_pmc_is_enabled, - .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, -diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c -index 7e34d7163adab..93d73b55ae3e6 100644 ---- a/arch/x86/kvm/svm/sev.c -+++ b/arch/x86/kvm/svm/sev.c -@@ -676,7 +676,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) - if (params.len > SEV_FW_BLOB_MAX_SIZE) - return -EINVAL; - -- blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); -+ blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT); - if (!blob) - return -ENOMEM; - -@@ -796,7 +796,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, - if (!IS_ALIGNED(dst_paddr, 16) || - !IS_ALIGNED(paddr, 16) || - !IS_ALIGNED(size, 16)) { -- tpage = (void *)alloc_page(GFP_KERNEL); -+ tpage = (void *)alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!tpage) - return -ENOMEM; - -@@ -832,7 +832,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, - - /* If source buffer is not aligned then use an intermediate buffer */ - if (!IS_ALIGNED((unsigned long)vaddr, 16)) { -- src_tpage = alloc_page(GFP_KERNEL); -+ src_tpage = alloc_page(GFP_KERNEL_ACCOUNT); - if (!src_tpage) - return -ENOMEM; - -@@ -853,7 +853,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, - if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) { - int dst_offset; - -- dst_tpage = alloc_page(GFP_KERNEL); -+ dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT); - if (!dst_tpage) { - ret = -ENOMEM; - goto e_free; -@@ -1082,7 +1082,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) - if (params.len > SEV_FW_BLOB_MAX_SIZE) - return -EINVAL; - -- blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT); -+ blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT); - if (!blob) - return -ENOMEM; - -@@ -1164,7 +1164,7 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp) - return -EINVAL; - - /* allocate the memory to hold the session data blob */ -- session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT); -+ session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT); - if (!session_data) - return -ENOMEM; - -@@ -1277,7 +1277,7 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) - - /* Check if we are crossing the page boundary */ - offset = params.guest_uaddr & (PAGE_SIZE - 1); -- if ((params.guest_len + offset > PAGE_SIZE)) -+ if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) - return -EINVAL; - - /* Pin guest memory */ -@@ -1288,11 +1288,11 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) - - /* allocate memory for header and transport buffer */ - ret = -ENOMEM; -- hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); -+ hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); - if (!hdr) - goto e_unpin; - -- trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT); -+ trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); - if (!trans_data) - goto e_free_hdr; - -@@ -1457,7 +1457,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) - - /* Check if we are crossing the page boundary */ - offset = params.guest_uaddr & (PAGE_SIZE - 1); -- if ((params.guest_len + offset > PAGE_SIZE)) -+ if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) - return -EINVAL; - - hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); -@@ -1787,7 +1787,12 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd) - mutex_unlock(&source_kvm->lock); - mutex_lock(&kvm->lock); - -- if (sev_guest(kvm)) { -+ /* -+ * Disallow out-of-band SEV/SEV-ES init if the target is already an -+ * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being -+ * created after SEV/SEV-ES initialization, e.g. to init intercepts. -+ */ -+ if (sev_guest(kvm) || kvm->created_vcpus) { - ret = -EINVAL; - goto e_mirror_unlock; - } -@@ -1800,6 +1805,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd) - mirror_sev->fd = source_sev.fd; - mirror_sev->es_active = source_sev.es_active; - mirror_sev->handle = source_sev.handle; -+ INIT_LIST_HEAD(&mirror_sev->regions_list); - /* - * Do not copy ap_jump_table. Since the mirror does not share the same - * KVM contexts as the original, and they may have different -@@ -1984,11 +1990,14 @@ static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va, - unsigned long len) - { - /* -- * If hardware enforced cache coherency for encrypted mappings of the -- * same physical page is supported, nothing to do. -+ * If CPU enforced cache coherency for encrypted mappings of the -+ * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache -+ * flush is still needed in order to work properly with DMA devices. - */ -- if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) -+ if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) { -+ clflush_cache_range(va, PAGE_SIZE); - return; -+ } - - /* - * If the VM Page Flush MSR is supported, use it to flush the page -@@ -2028,6 +2037,14 @@ static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va, - wbinvd_on_all_cpus(); - } - -+void sev_guest_memory_reclaimed(struct kvm *kvm) -+{ -+ if (!sev_guest(kvm)) -+ return; -+ -+ wbinvd_on_all_cpus(); -+} -+ - void sev_free_vcpu(struct kvm_vcpu *vcpu) - { - struct vcpu_svm *svm; -@@ -2311,7 +2328,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) - } - - #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE) --static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) -+static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) - { - struct vmcb_control_area *control = &svm->vmcb->control; - struct ghcb *ghcb = svm->ghcb; -@@ -2322,14 +2339,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) - scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); - if (!scratch_gpa_beg) { - pr_err("vmgexit: scratch gpa not provided\n"); -- return false; -+ return -EINVAL; - } - - scratch_gpa_end = scratch_gpa_beg + len; - if (scratch_gpa_end < scratch_gpa_beg) { - pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n", - len, scratch_gpa_beg); -- return false; -+ return -EINVAL; - } - - if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) { -@@ -2347,7 +2364,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) - scratch_gpa_end > ghcb_scratch_end) { - pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n", - scratch_gpa_beg, scratch_gpa_end); -- return false; -+ return -EINVAL; - } - - scratch_va = (void *)svm->ghcb; -@@ -2360,18 +2377,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) - if (len > GHCB_SCRATCH_AREA_LIMIT) { - pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n", - len, GHCB_SCRATCH_AREA_LIMIT); -- return false; -+ return -EINVAL; - } - scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT); - if (!scratch_va) -- return false; -+ return -ENOMEM; - - if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { - /* Unable to copy scratch area from guest */ - pr_err("vmgexit: kvm_read_guest for scratch area failed\n"); - - kfree(scratch_va); -- return false; -+ return -EFAULT; - } - - /* -@@ -2387,7 +2404,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) - svm->ghcb_sa = scratch_va; - svm->ghcb_sa_len = len; - -- return true; -+ return 0; - } - - static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, -@@ -2526,10 +2543,10 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) - ghcb_set_sw_exit_info_1(ghcb, 0); - ghcb_set_sw_exit_info_2(ghcb, 0); - -- ret = -EINVAL; - switch (exit_code) { - case SVM_VMGEXIT_MMIO_READ: -- if (!setup_vmgexit_scratch(svm, true, control->exit_info_2)) -+ ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); -+ if (ret) - break; - - ret = kvm_sev_es_mmio_read(vcpu, -@@ -2538,7 +2555,8 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) - svm->ghcb_sa); - break; - case SVM_VMGEXIT_MMIO_WRITE: -- if (!setup_vmgexit_scratch(svm, false, control->exit_info_2)) -+ ret = setup_vmgexit_scratch(svm, false, control->exit_info_2); -+ if (ret) - break; - - ret = kvm_sev_es_mmio_write(vcpu, -@@ -2581,6 +2599,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) - vcpu_unimpl(vcpu, - "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", - control->exit_info_1, control->exit_info_2); -+ ret = -EINVAL; - break; - default: - ret = svm_invoke_exit_handler(vcpu, exit_code); -@@ -2593,6 +2612,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) - { - int count; - int bytes; -+ int r; - - if (svm->vmcb->control.exit_info_2 > INT_MAX) - return -EINVAL; -@@ -2601,8 +2621,9 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) - if (unlikely(check_mul_overflow(count, size, &bytes))) - return -EINVAL; - -- if (!setup_vmgexit_scratch(svm, in, bytes)) -- return -EINVAL; -+ r = setup_vmgexit_scratch(svm, in, bytes); -+ if (r) -+ return r; - - return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, count, in); - } -diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index 989685098b3ea..8e9a6c41f9eea 100644 ---- a/arch/x86/kvm/svm/svm.c -+++ b/arch/x86/kvm/svm/svm.c -@@ -281,7 +281,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) - - if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { - if (!(efer & EFER_SVME)) { -- svm_leave_nested(svm); -+ svm_leave_nested(vcpu); - svm_set_gif(svm, true); - /* #GP intercept is still needed for vmware backdoor */ - if (!enable_vmware_backdoor) -@@ -303,7 +303,11 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) - return ret; - } - -- if (svm_gp_erratum_intercept) -+ /* -+ * Never intercept #GP for SEV guests, KVM can't -+ * decrypt guest memory to workaround the erratum. -+ */ -+ if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm)) - set_exception_intercept(svm, GP_VECTOR); - } - } -@@ -313,12 +317,6 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) - return 0; - } - --static int is_external_interrupt(u32 info) --{ -- info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; -- return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); --} -- - static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) - { - struct vcpu_svm *svm = to_svm(vcpu); -@@ -390,6 +388,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu) - */ - (void)skip_emulated_instruction(vcpu); - rip = kvm_rip_read(vcpu); -+ -+ if (boot_cpu_has(X86_FEATURE_NRIPS)) -+ svm->vmcb->control.next_rip = rip; -+ - svm->int3_rip = rip + svm->vmcb->save.cs.base; - svm->int3_injected = rip - old_rip; - } -@@ -463,11 +465,24 @@ static int has_svm(void) - return 1; - } - -+void __svm_write_tsc_multiplier(u64 multiplier) -+{ -+ preempt_disable(); -+ -+ if (multiplier == __this_cpu_read(current_tsc_ratio)) -+ goto out; -+ -+ wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); -+ __this_cpu_write(current_tsc_ratio, multiplier); -+out: -+ preempt_enable(); -+} -+ - static void svm_hardware_disable(void) - { - /* Make sure we clean up behind us */ - if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) -- wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); -+ __svm_write_tsc_multiplier(TSC_RATIO_DEFAULT); - - cpu_svm_disable(); - -@@ -509,8 +524,11 @@ static int svm_hardware_enable(void) - wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area)); - - if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { -- wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); -- __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); -+ /* -+ * Set the default value, even if we don't use TSC scaling -+ * to avoid having stale value in the msr -+ */ -+ __svm_write_tsc_multiplier(TSC_RATIO_DEFAULT); - } - - -@@ -1123,9 +1141,10 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) - - static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) - { -- wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); -+ __svm_write_tsc_multiplier(multiplier); - } - -+ - /* Evaluate instruction intercepts that depend on guest CPUID features. */ - static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, - struct vcpu_svm *svm) -@@ -1176,9 +1195,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu) - * Guest access to VMware backdoor ports could legitimately - * trigger #GP because of TSS I/O permission bitmap. - * We intercept those #GP and allow access to them anyway -- * as VMware does. -+ * as VMware does. Don't intercept #GP for SEV guests as KVM can't -+ * decrypt guest memory to decode the faulting instruction. - */ -- if (enable_vmware_backdoor) -+ if (enable_vmware_backdoor && !sev_guest(vcpu->kvm)) - set_exception_intercept(svm, GP_VECTOR); - - svm_set_intercept(svm, INTERCEPT_INTR); -@@ -1418,6 +1438,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) - */ - svm_clear_current_vmcb(svm->vmcb); - -+ svm_leave_nested(vcpu); - svm_free_nested(svm); - - sev_free_vcpu(vcpu); -@@ -1431,6 +1452,8 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) - struct vcpu_svm *svm = to_svm(vcpu); - struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); - -+ amd_clear_divider(); -+ - if (sev_es_guest(vcpu->kvm)) - sev_es_unmap_ghcb(svm); - -@@ -1447,13 +1470,8 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) - vmsave(__sme_page_pa(sd->save_area)); - } - -- if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { -- u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; -- if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { -- __this_cpu_write(current_tsc_ratio, tsc_ratio); -- wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); -- } -- } -+ if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) -+ __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); - - if (likely(tsc_aux_uret_slot >= 0)) - kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); -@@ -1473,7 +1491,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - - if (sd->current_vmcb != svm->vmcb) { - sd->current_vmcb = svm->vmcb; -- indirect_branch_prediction_barrier(); -+ -+ if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT)) -+ indirect_branch_prediction_barrier(); - } - if (kvm_vcpu_apicv_active(vcpu)) - avic_vcpu_load(vcpu, cpu); -@@ -1517,6 +1537,15 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) - to_svm(vcpu)->vmcb->save.rflags = rflags; - } - -+static bool svm_get_if_flag(struct kvm_vcpu *vcpu) -+{ -+ struct vmcb *vmcb = to_svm(vcpu)->vmcb; -+ -+ return sev_es_guest(vcpu->kvm) -+ ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK -+ : kvm_get_rflags(vcpu) & X86_EFLAGS_IF; -+} -+ - static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) - { - switch (reg) { -@@ -1709,10 +1738,16 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) - vmcb_mark_dirty(svm->vmcb, VMCB_DT); - } - -+static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) -+{ -+ return true; -+} -+ - void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) - { - struct vcpu_svm *svm = to_svm(vcpu); - u64 hcr0 = cr0; -+ bool old_paging = is_paging(vcpu); - - #ifdef CONFIG_X86_64 - if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { -@@ -1729,8 +1764,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) - #endif - vcpu->arch.cr0 = cr0; - -- if (!npt_enabled) -+ if (!npt_enabled) { - hcr0 |= X86_CR0_PG | X86_CR0_WP; -+ if (old_paging != is_paging(vcpu)) -+ svm_set_cr4(vcpu, kvm_read_cr4(vcpu)); -+ } - - /* - * re-enable caching here because the QEMU bios -@@ -1774,8 +1812,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) - svm_flush_tlb(vcpu); - - vcpu->arch.cr4 = cr4; -- if (!npt_enabled) -+ if (!npt_enabled) { - cr4 |= X86_CR4_PAE; -+ -+ if (!is_paging(vcpu)) -+ cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); -+ } - cr4 |= host_cr4_mce; - to_svm(vcpu)->vmcb->save.cr4 = cr4; - vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); -@@ -2224,10 +2266,6 @@ static int gp_interception(struct kvm_vcpu *vcpu) - if (error_code) - goto reinject; - -- /* All SVM instructions expect page aligned RAX */ -- if (svm->vmcb->save.rax & ~PAGE_MASK) -- goto reinject; -- - /* Decode the instruction for usage later */ - if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) - goto reinject; -@@ -2245,8 +2283,13 @@ static int gp_interception(struct kvm_vcpu *vcpu) - if (!is_guest_mode(vcpu)) - return kvm_emulate_instruction(vcpu, - EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); -- } else -+ } else { -+ /* All SVM instructions expect page aligned RAX */ -+ if (svm->vmcb->save.rax & ~PAGE_MASK) -+ goto reinject; -+ - return emulate_svm_instr(vcpu, opcode); -+ } - - reinject: - kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); -@@ -2639,9 +2682,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr) - msr->data = 0; - - switch (msr->index) { -- case MSR_F10H_DECFG: -- if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) -- msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; -+ case MSR_AMD64_DE_CFG: -+ if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) -+ msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; - break; - case MSR_IA32_PERF_CAPABILITIES: - return 0; -@@ -2750,7 +2793,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) - msr_info->data = 0x1E; - } - break; -- case MSR_F10H_DECFG: -+ case MSR_AMD64_DE_CFG: - msr_info->data = svm->msr_decfg; - break; - default: -@@ -2950,7 +2993,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) - case MSR_VM_IGNNE: - vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); - break; -- case MSR_F10H_DECFG: { -+ case MSR_AMD64_DE_CFG: { - struct kvm_msr_entry msr_entry; - - msr_entry.index = msr->index; -@@ -3332,15 +3375,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) - return 0; - } - -- if (is_external_interrupt(svm->vmcb->control.exit_int_info) && -- exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && -- exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && -- exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) -- printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " -- "exit_code 0x%x\n", -- __func__, svm->vmcb->control.exit_int_info, -- exit_code); -- - if (exit_fastpath != EXIT_FASTPATH_NONE) - return 1; - -@@ -3394,8 +3428,6 @@ static void svm_set_irq(struct kvm_vcpu *vcpu) - { - struct vcpu_svm *svm = to_svm(vcpu); - -- BUG_ON(!(gif_set(svm))); -- - trace_kvm_inj_virq(vcpu->arch.interrupt.nr); - ++vcpu->stat.irq_injections; - -@@ -3485,14 +3517,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) - if (!gif_set(svm)) - return true; - -- if (sev_es_guest(vcpu->kvm)) { -- /* -- * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask -- * bit to determine the state of the IF flag. -- */ -- if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK)) -- return true; -- } else if (is_guest_mode(vcpu)) { -+ if (is_guest_mode(vcpu)) { - /* As long as interrupts are being delivered... */ - if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) - ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) -@@ -3503,7 +3528,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) - if (nested_exit_on_intr(svm)) - return false; - } else { -- if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) -+ if (!svm_get_if_flag(vcpu)) - return true; - } - -@@ -3666,6 +3691,18 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu) - vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; - type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; - -+ /* -+ * If NextRIP isn't enabled, KVM must manually advance RIP prior to -+ * injecting the soft exception/interrupt. That advancement needs to -+ * be unwound if vectoring didn't complete. Note, the new event may -+ * not be the injected event, e.g. if KVM injected an INTn, the INTn -+ * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will -+ * be the reported vectored event, but RIP still needs to be unwound. -+ */ -+ if (int3_injected && type == SVM_EXITINTINFO_TYPE_EXEPT && -+ kvm_is_linear_rip(vcpu, svm->int3_rip)) -+ kvm_rip_write(vcpu, kvm_rip_read(vcpu) - int3_injected); -+ - switch (type) { - case SVM_EXITINTINFO_TYPE_NMI: - vcpu->arch.nmi_injected = true; -@@ -3679,16 +3716,11 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu) - - /* - * In case of software exceptions, do not reinject the vector, -- * but re-execute the instruction instead. Rewind RIP first -- * if we emulated INT3 before. -+ * but re-execute the instruction instead. - */ -- if (kvm_exception_is_soft(vector)) { -- if (vector == BP_VECTOR && int3_injected && -- kvm_is_linear_rip(vcpu, svm->int3_rip)) -- kvm_rip_write(vcpu, -- kvm_rip_read(vcpu) - int3_injected); -+ if (kvm_exception_is_soft(vector)) - break; -- } -+ - if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { - u32 err = svm->vmcb->control.exit_int_info_err; - kvm_requeue_exception_e(vcpu, vector, err); -@@ -3717,8 +3749,14 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu) - - static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) - { -- if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && -- to_svm(vcpu)->vmcb->control.exit_info_1) -+ struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; -+ -+ /* -+ * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM -+ * can't read guest memory (dereference memslots) to decode the WRMSR. -+ */ -+ if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 && -+ nrips && control->next_rip) - return handle_fastpath_set_msr_irqoff(vcpu); - - return EXIT_FASTPATH_NONE; -@@ -4247,6 +4285,8 @@ out: - - static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) - { -+ if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR) -+ vcpu->arch.at_instruction_boundary = true; - } - - static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) -@@ -4376,10 +4416,17 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) - * Enter the nested guest now - */ - -+ vmcb_mark_all_dirty(svm->vmcb01.ptr); -+ - vmcb12 = map.hva; - nested_load_control_from_vmcb12(svm, &vmcb12->control); - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); - -+ if (ret) -+ goto unmap_save; -+ -+ svm->nested.nested_run_pending = 1; -+ - unmap_save: - kvm_vcpu_unmap(vcpu, &map_save, true); - unmap_map: -@@ -4405,8 +4452,13 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i - bool smep, smap, is_user; - unsigned long cr4; - -+ /* Emulation is always possible when KVM has access to all guest state. */ -+ if (!sev_guest(vcpu->kvm)) -+ return true; -+ - /* -- * When the guest is an SEV-ES guest, emulation is not possible. -+ * Emulation is impossible for SEV-ES guests as KVM doesn't have access -+ * to guest register state. - */ - if (sev_es_guest(vcpu->kvm)) - return false; -@@ -4454,23 +4506,27 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i - if (likely(!insn || insn_len)) - return true; - -- /* -- * If RIP is invalid, go ahead with emulation which will cause an -- * internal error exit. -- */ -- if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT)) -- return true; -- - cr4 = kvm_read_cr4(vcpu); - smep = cr4 & X86_CR4_SMEP; - smap = cr4 & X86_CR4_SMAP; - is_user = svm_get_cpl(vcpu) == 3; - if (smap && (!smep || is_user)) { -- if (!sev_guest(vcpu->kvm)) -- return true; -- - pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); -- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); -+ -+ /* -+ * If the fault occurred in userspace, arbitrarily inject #GP -+ * to avoid killing the guest and to hopefully avoid confusing -+ * the guest kernel too much, e.g. injecting #PF would not be -+ * coherent with respect to the guest's page tables. Request -+ * triple fault if the fault occurred in the kernel as there's -+ * no fault that KVM can inject without confusing the guest. -+ * In practice, the triple fault is moot as no sane SEV kernel -+ * will execute from user memory while also running with SMAP=1. -+ */ -+ if (is_user) -+ kvm_inject_gp(vcpu, 0); -+ else -+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - } - - return false; -@@ -4549,6 +4605,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { - .set_segment = svm_set_segment, - .get_cpl = svm_get_cpl, - .get_cs_db_l_bits = kvm_get_cs_db_l_bits, -+ .is_valid_cr0 = svm_is_valid_cr0, - .set_cr0 = svm_set_cr0, - .is_valid_cr4 = svm_is_valid_cr4, - .set_cr4 = svm_set_cr4, -@@ -4562,6 +4619,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { - .cache_reg = svm_cache_reg, - .get_rflags = svm_get_rflags, - .set_rflags = svm_set_rflags, -+ .get_if_flag = svm_get_if_flag, - - .tlb_flush_all = svm_flush_tlb, - .tlb_flush_current = svm_flush_tlb, -@@ -4592,7 +4650,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { - .load_eoi_exitmap = svm_load_eoi_exitmap, - .hwapic_irr_update = svm_hwapic_irr_update, - .hwapic_isr_update = svm_hwapic_isr_update, -- .sync_pir_to_irr = kvm_lapic_find_highest_irr, - .apicv_post_state_restore = avic_post_state_restore, - - .set_tss_addr = svm_set_tss_addr, -@@ -4635,6 +4692,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { - .mem_enc_op = svm_mem_enc_op, - .mem_enc_reg_region = svm_register_enc_region, - .mem_enc_unreg_region = svm_unregister_enc_region, -+ .guest_memory_reclaimed = sev_guest_memory_reclaimed, - - .vm_copy_enc_context_from = svm_vm_copy_asid_from, - -diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h -index 5d30db599e10d..1d9b1a9e4398f 100644 ---- a/arch/x86/kvm/svm/svm.h -+++ b/arch/x86/kvm/svm/svm.h -@@ -22,6 +22,8 @@ - #include - #include - -+#include "kvm_cache_regs.h" -+ - #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) - - #define IOPM_SIZE PAGE_SIZE * 3 -@@ -461,7 +463,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) - - int enter_svm_guest_mode(struct kvm_vcpu *vcpu, - u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); --void svm_leave_nested(struct vcpu_svm *svm); -+void svm_leave_nested(struct kvm_vcpu *vcpu); - void svm_free_nested(struct vcpu_svm *svm); - int svm_allocate_nested(struct vcpu_svm *svm); - int nested_svm_vmrun(struct kvm_vcpu *vcpu); -@@ -485,6 +487,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, - int nested_svm_exit_special(struct vcpu_svm *svm); - void nested_load_control_from_vmcb12(struct vcpu_svm *svm, - struct vmcb_control_area *control); -+void __svm_write_tsc_multiplier(u64 multiplier); - void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); - void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); - void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); -@@ -497,7 +500,7 @@ extern struct kvm_x86_nested_ops svm_nested_ops; - #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 - #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) - --#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) -+#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0) - #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) - #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) - #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) -@@ -553,6 +556,8 @@ int svm_register_enc_region(struct kvm *kvm, - int svm_unregister_enc_region(struct kvm *kvm, - struct kvm_enc_region *range); - int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd); -+void sev_guest_memory_reclaimed(struct kvm *kvm); -+ - void pre_sev_run(struct vcpu_svm *svm, int cpu); - void __init sev_set_cpu_caps(void); - void __init sev_hardware_setup(void); -diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c -index 98aa981c04ec5..8cdc62c74a964 100644 ---- a/arch/x86/kvm/svm/svm_onhyperv.c -+++ b/arch/x86/kvm/svm/svm_onhyperv.c -@@ -4,7 +4,6 @@ - */ - - #include --#include "kvm_cache_regs.h" - - #include - -diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h -index c53b8bf8d0138..3a0c3814a3770 100644 ---- a/arch/x86/kvm/svm/svm_onhyperv.h -+++ b/arch/x86/kvm/svm/svm_onhyperv.h -@@ -48,7 +48,7 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb) - hve->hv_enlightenments_control.enlightened_npt_tlb = 1; - } - --static inline void svm_hv_hardware_setup(void) -+static inline __init void svm_hv_hardware_setup(void) - { - if (npt_enabled && - ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) { -@@ -112,7 +112,7 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb) - { - } - --static inline void svm_hv_hardware_setup(void) -+static inline __init void svm_hv_hardware_setup(void) - { - } - -diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S -index 4fa17df123cd6..f960608555226 100644 ---- a/arch/x86/kvm/svm/vmenter.S -+++ b/arch/x86/kvm/svm/vmenter.S -@@ -110,6 +110,18 @@ SYM_FUNC_START(__svm_vcpu_run) - mov %r15, VCPU_R15(%_ASM_AX) - #endif - -+ /* -+ * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be -+ * untrained as soon as we exit the VM and are back to the -+ * kernel. This should be done before re-enabling interrupts -+ * because interrupt handlers won't sanitize 'ret' if the return is -+ * from the kernel. -+ */ -+ UNTRAIN_RET -+ -+ /* SRSO */ -+ ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT -+ - /* - * Clear all general purpose registers except RSP and RAX to prevent - * speculative use of the guest's values, even those that are reloaded -@@ -148,7 +160,7 @@ SYM_FUNC_START(__svm_vcpu_run) - pop %edi - #endif - pop %_ASM_BP -- ret -+ RET - - 3: cmpb $0, kvm_rebooting - jne 2b -@@ -190,6 +202,15 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) - FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE - #endif - -+ /* -+ * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be -+ * untrained as soon as we exit the VM and are back to the -+ * kernel. This should be done before re-enabling interrupts -+ * because interrupt handlers won't sanitize RET if the return is -+ * from the kernel. -+ */ -+ UNTRAIN_RET -+ - pop %_ASM_BX - - #ifdef CONFIG_X86_64 -@@ -202,7 +223,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) - pop %edi - #endif - pop %_ASM_BP -- ret -+ RET - - 3: cmpb $0, kvm_rebooting - jne 2b -diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h -index 03ebe368333ef..c41506ed8c7dd 100644 ---- a/arch/x86/kvm/trace.h -+++ b/arch/x86/kvm/trace.h -@@ -355,25 +355,29 @@ TRACE_EVENT(kvm_inj_virq, - * Tracepoint for kvm interrupt injection: - */ - TRACE_EVENT(kvm_inj_exception, -- TP_PROTO(unsigned exception, bool has_error, unsigned error_code), -- TP_ARGS(exception, has_error, error_code), -+ TP_PROTO(unsigned exception, bool has_error, unsigned error_code, -+ bool reinjected), -+ TP_ARGS(exception, has_error, error_code, reinjected), - - TP_STRUCT__entry( - __field( u8, exception ) - __field( u8, has_error ) - __field( u32, error_code ) -+ __field( bool, reinjected ) - ), - - TP_fast_assign( - __entry->exception = exception; - __entry->has_error = has_error; - __entry->error_code = error_code; -+ __entry->reinjected = reinjected; - ), - -- TP_printk("%s (0x%x)", -+ TP_printk("%s (0x%x)%s", - __print_symbolic(__entry->exception, kvm_trace_sym_exc), - /* FIXME: don't print error_code if not present */ -- __entry->has_error ? __entry->error_code : 0) -+ __entry->has_error ? __entry->error_code : 0, -+ __entry->reinjected ? " [reinjected]" : "") - ); - - /* -diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c -index ba6f99f584ac3..a7ed30d5647af 100644 ---- a/arch/x86/kvm/vmx/evmcs.c -+++ b/arch/x86/kvm/vmx/evmcs.c -@@ -362,6 +362,7 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata) - case MSR_IA32_VMX_PROCBASED_CTLS2: - ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; - break; -+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS: - case MSR_IA32_VMX_PINBASED_CTLS: - ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; - break; -diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h -index 152ab0aa82cf6..57451cf622d3e 100644 ---- a/arch/x86/kvm/vmx/evmcs.h -+++ b/arch/x86/kvm/vmx/evmcs.h -@@ -59,7 +59,9 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs); - SECONDARY_EXEC_SHADOW_VMCS | \ - SECONDARY_EXEC_TSC_SCALING | \ - SECONDARY_EXEC_PAUSE_LOOP_EXITING) --#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) -+#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \ -+ (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \ -+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) - #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) - #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING) - -@@ -160,16 +162,6 @@ static inline u16 evmcs_read16(unsigned long field) - return *(u16 *)((char *)current_evmcs + offset); - } - --static inline void evmcs_touch_msr_bitmap(void) --{ -- if (unlikely(!current_evmcs)) -- return; -- -- if (current_evmcs->hv_enlightenments_control.msr_bitmap) -- current_evmcs->hv_clean_fields &= -- ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP; --} -- - static inline void evmcs_load(u64 phys_addr) - { - struct hv_vp_assist_page *vp_ap = -@@ -190,7 +182,6 @@ static inline u64 evmcs_read64(unsigned long field) { return 0; } - static inline u32 evmcs_read32(unsigned long field) { return 0; } - static inline u16 evmcs_read16(unsigned long field) { return 0; } - static inline void evmcs_load(u64 phys_addr) {} --static inline void evmcs_touch_msr_bitmap(void) {} - #endif /* IS_ENABLED(CONFIG_HYPERV) */ - - #define EVMPTR_INVALID (-1ULL) -diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c -index eedcebf580041..e4e4c1d3aa179 100644 ---- a/arch/x86/kvm/vmx/nested.c -+++ b/arch/x86/kvm/vmx/nested.c -@@ -523,29 +523,6 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, - return 0; - } - --/* -- * Check if MSR is intercepted for L01 MSR bitmap. -- */ --static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) --{ -- unsigned long *msr_bitmap; -- int f = sizeof(unsigned long); -- -- if (!cpu_has_vmx_msr_bitmap()) -- return true; -- -- msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; -- -- if (msr <= 0x1fff) { -- return !!test_bit(msr, msr_bitmap + 0x800 / f); -- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { -- msr &= 0x1fff; -- return !!test_bit(msr, msr_bitmap + 0xc00 / f); -- } -- -- return true; --} -- - /* - * If a msr is allowed by L0, we should check whether it is allowed by L1. - * The corresponding bit will be cleared unless both of L0 and L1 allow it. -@@ -599,6 +576,34 @@ static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) - } - } - -+#define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \ -+static inline \ -+void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \ -+ unsigned long *msr_bitmap_l1, \ -+ unsigned long *msr_bitmap_l0, u32 msr) \ -+{ \ -+ if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \ -+ vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \ -+ vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \ -+ else \ -+ vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \ -+} -+BUILD_NVMX_MSR_INTERCEPT_HELPER(read) -+BUILD_NVMX_MSR_INTERCEPT_HELPER(write) -+ -+static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx, -+ unsigned long *msr_bitmap_l1, -+ unsigned long *msr_bitmap_l0, -+ u32 msr, int types) -+{ -+ if (types & MSR_TYPE_R) -+ nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1, -+ msr_bitmap_l0, msr); -+ if (types & MSR_TYPE_W) -+ nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1, -+ msr_bitmap_l0, msr); -+} -+ - /* - * Merge L0's and L1's MSR bitmap, return false to indicate that - * we do not use the hardware. -@@ -606,10 +611,11 @@ static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) - static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) - { -+ struct vcpu_vmx *vmx = to_vmx(vcpu); - int msr; - unsigned long *msr_bitmap_l1; -- unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; -- struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; -+ unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; -+ struct kvm_host_map *map = &vmx->nested.msr_bitmap_map; - - /* Nothing to do if the MSR bitmap is not in use. */ - if (!cpu_has_vmx_msr_bitmap() || -@@ -660,44 +666,27 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, - } - } - -- /* KVM unconditionally exposes the FS/GS base MSRs to L1. */ -+ /* -+ * Always check vmcs01's bitmap to honor userspace MSR filters and any -+ * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through. -+ */ - #ifdef CONFIG_X86_64 -- nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, -- MSR_FS_BASE, MSR_TYPE_RW); -+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, -+ MSR_FS_BASE, MSR_TYPE_RW); - -- nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, -- MSR_GS_BASE, MSR_TYPE_RW); -+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, -+ MSR_GS_BASE, MSR_TYPE_RW); - -- nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, -- MSR_KERNEL_GS_BASE, MSR_TYPE_RW); -+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, -+ MSR_KERNEL_GS_BASE, MSR_TYPE_RW); - #endif -+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, -+ MSR_IA32_SPEC_CTRL, MSR_TYPE_RW); - -- /* -- * Checking the L0->L1 bitmap is trying to verify two things: -- * -- * 1. L0 gave a permission to L1 to actually passthrough the MSR. This -- * ensures that we do not accidentally generate an L02 MSR bitmap -- * from the L12 MSR bitmap that is too permissive. -- * 2. That L1 or L2s have actually used the MSR. This avoids -- * unnecessarily merging of the bitmap if the MSR is unused. This -- * works properly because we only update the L01 MSR bitmap lazily. -- * So even if L0 should pass L1 these MSRs, the L01 bitmap is only -- * updated to reflect this when L1 (or its L2s) actually write to -- * the MSR. -- */ -- if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL)) -- nested_vmx_disable_intercept_for_msr( -- msr_bitmap_l1, msr_bitmap_l0, -- MSR_IA32_SPEC_CTRL, -- MSR_TYPE_R | MSR_TYPE_W); -+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, -+ MSR_IA32_PRED_CMD, MSR_TYPE_W); - -- if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD)) -- nested_vmx_disable_intercept_for_msr( -- msr_bitmap_l1, msr_bitmap_l0, -- MSR_IA32_PRED_CMD, -- MSR_TYPE_W); -- -- kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false); -+ kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); - - return true; - } -@@ -1191,29 +1180,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, - WARN_ON(!enable_vpid); - - /* -- * If VPID is enabled and used by vmc12, but L2 does not have a unique -- * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate -- * a VPID for L2, flush the current context as the effective ASID is -- * common to both L1 and L2. -- * -- * Defer the flush so that it runs after vmcs02.EPTP has been set by -- * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid -- * redundant flushes further down the nested pipeline. -- * -- * If a TLB flush isn't required due to any of the above, and vpid12 is -- * changing then the new "virtual" VPID (vpid12) will reuse the same -- * "real" VPID (vpid02), and so needs to be flushed. There's no direct -- * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for -- * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate -- * guest-physical mappings, so there is no need to sync the nEPT MMU. -+ * VPID is enabled and in use by vmcs12. If vpid12 is changing, then -+ * emulate a guest TLB flush as KVM does not track vpid12 history nor -+ * is the VPID incorporated into the MMU context. I.e. KVM must assume -+ * that the new vpid12 has never been used and thus represents a new -+ * guest ASID that cannot have entries in the TLB. - */ -- if (!nested_has_guest_tlb_tag(vcpu)) { -- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); -- } else if (is_vmenter && -- vmcs12->virtual_processor_id != vmx->nested.last_vpid) { -+ if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { - vmx->nested.last_vpid = vmcs12->virtual_processor_id; -- vpid_sync_context(nested_get_vpid02(vcpu)); -+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); -+ return; - } -+ -+ /* -+ * If VPID is enabled, used by vmc12, and vpid12 is not changing but -+ * does not have a unique TLB tag (ASID), i.e. EPT is disabled and -+ * KVM was unable to allocate a VPID for L2, flush the current context -+ * as the effective ASID is common to both L1 and L2. -+ */ -+ if (!nested_has_guest_tlb_tag(vcpu)) -+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); - } - - static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) -@@ -1231,7 +1217,7 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) - BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | - /* reserved */ - BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); -- u64 vmx_basic = vmx->nested.msrs.basic; -+ u64 vmx_basic = vmcs_config.nested.basic; - - if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) - return -EINVAL; -@@ -1254,36 +1240,42 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) - return 0; - } - --static int --vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) -+static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index, -+ u32 **low, u32 **high) - { -- u64 supported; -- u32 *lowp, *highp; -- - switch (msr_index) { - case MSR_IA32_VMX_TRUE_PINBASED_CTLS: -- lowp = &vmx->nested.msrs.pinbased_ctls_low; -- highp = &vmx->nested.msrs.pinbased_ctls_high; -+ *low = &msrs->pinbased_ctls_low; -+ *high = &msrs->pinbased_ctls_high; - break; - case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: -- lowp = &vmx->nested.msrs.procbased_ctls_low; -- highp = &vmx->nested.msrs.procbased_ctls_high; -+ *low = &msrs->procbased_ctls_low; -+ *high = &msrs->procbased_ctls_high; - break; - case MSR_IA32_VMX_TRUE_EXIT_CTLS: -- lowp = &vmx->nested.msrs.exit_ctls_low; -- highp = &vmx->nested.msrs.exit_ctls_high; -+ *low = &msrs->exit_ctls_low; -+ *high = &msrs->exit_ctls_high; - break; - case MSR_IA32_VMX_TRUE_ENTRY_CTLS: -- lowp = &vmx->nested.msrs.entry_ctls_low; -- highp = &vmx->nested.msrs.entry_ctls_high; -+ *low = &msrs->entry_ctls_low; -+ *high = &msrs->entry_ctls_high; - break; - case MSR_IA32_VMX_PROCBASED_CTLS2: -- lowp = &vmx->nested.msrs.secondary_ctls_low; -- highp = &vmx->nested.msrs.secondary_ctls_high; -+ *low = &msrs->secondary_ctls_low; -+ *high = &msrs->secondary_ctls_high; - break; - default: - BUG(); - } -+} -+ -+static int -+vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) -+{ -+ u32 *lowp, *highp; -+ u64 supported; -+ -+ vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp); - - supported = vmx_control_msr(*lowp, *highp); - -@@ -1295,6 +1287,7 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) - if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) - return -EINVAL; - -+ vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); - *lowp = data; - *highp = data >> 32; - return 0; -@@ -1308,10 +1301,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) - BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | - /* reserved */ - GENMASK_ULL(13, 9) | BIT_ULL(31); -- u64 vmx_misc; -- -- vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, -- vmx->nested.msrs.misc_high); -+ u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low, -+ vmcs_config.nested.misc_high); - - if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) - return -EINVAL; -@@ -1339,10 +1330,8 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) - - static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) - { -- u64 vmx_ept_vpid_cap; -- -- vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, -- vmx->nested.msrs.vpid_caps); -+ u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps, -+ vmcs_config.nested.vpid_caps); - - /* Every bit is either reserved or a feature bit. */ - if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) -@@ -1353,20 +1342,21 @@ static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) - return 0; - } - --static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) -+static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index) - { -- u64 *msr; -- - switch (msr_index) { - case MSR_IA32_VMX_CR0_FIXED0: -- msr = &vmx->nested.msrs.cr0_fixed0; -- break; -+ return &msrs->cr0_fixed0; - case MSR_IA32_VMX_CR4_FIXED0: -- msr = &vmx->nested.msrs.cr4_fixed0; -- break; -+ return &msrs->cr4_fixed0; - default: - BUG(); - } -+} -+ -+static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) -+{ -+ const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index); - - /* - * 1 bits (which indicates bits which "must-be-1" during VMX operation) -@@ -1375,7 +1365,7 @@ static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) - if (!is_bitwise_subset(data, *msr, -1ULL)) - return -EINVAL; - -- *msr = data; -+ *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; - return 0; - } - -@@ -1436,7 +1426,7 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) - vmx->nested.msrs.vmcs_enum = data; - return 0; - case MSR_IA32_VMX_VMFUNC: -- if (data & ~vmx->nested.msrs.vmfunc_controls) -+ if (data & ~vmcs_config.nested.vmfunc_controls) - return -EINVAL; - vmx->nested.msrs.vmfunc_controls = data; - return 0; -@@ -2283,7 +2273,6 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0 - SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | - SECONDARY_EXEC_APIC_REGISTER_VIRT | - SECONDARY_EXEC_ENABLE_VMFUNC | -- SECONDARY_EXEC_TSC_SCALING | - SECONDARY_EXEC_DESC); - - if (nested_cpu_has(vmcs12, -@@ -2324,9 +2313,14 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0 - * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate - * on the related bits (if supported by the CPU) in the hope that - * we can avoid VMWrites during vmx_set_efer(). -+ * -+ * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is -+ * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to -+ * do the same for L2. - */ - exec_control = __vm_entry_controls_get(vmcs01); -- exec_control |= vmcs12->vm_entry_controls; -+ exec_control |= (vmcs12->vm_entry_controls & -+ ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL); - exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER); - if (cpu_has_load_ia32_efer()) { - if (guest_efer & EFER_LMA) -@@ -2622,9 +2616,12 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; - - if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && -+ intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) && - WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, -- vmcs12->guest_ia32_perf_global_ctrl))) -+ vmcs12->guest_ia32_perf_global_ctrl))) { -+ *entry_failure_code = ENTRY_FAIL_DEFAULT; - return -EINVAL; -+ } - - kvm_rsp_write(vcpu, vmcs12->guest_rsp); - kvm_rip_write(vcpu, vmcs12->guest_rip); -@@ -2865,6 +2862,17 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, - return 0; - } - -+static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, -+ struct vmcs12 *vmcs12) -+{ -+#ifdef CONFIG_X86_64 -+ if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != -+ !!(vcpu->arch.efer & EFER_LMA))) -+ return -EINVAL; -+#endif -+ return 0; -+} -+ - static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) - { -@@ -2889,18 +2897,16 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, - return -EINVAL; - - #ifdef CONFIG_X86_64 -- ia32e = !!(vcpu->arch.efer & EFER_LMA); -+ ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); - #else - ia32e = false; - #endif - - if (ia32e) { -- if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) || -- CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) -+ if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) - return -EINVAL; - } else { -- if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) || -- CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || -+ if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || - CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || - CC((vmcs12->host_rip) >> 32)) - return -EINVAL; -@@ -2985,7 +2991,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, - enum vm_entry_failure_code *entry_failure_code) - { -- bool ia32e; -+ bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE); - - *entry_failure_code = ENTRY_FAIL_DEFAULT; - -@@ -3011,6 +3017,13 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, - vmcs12->guest_ia32_perf_global_ctrl))) - return -EINVAL; - -+ if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG)) -+ return -EINVAL; -+ -+ if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) || -+ CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG))) -+ return -EINVAL; -+ - /* - * If the load IA32_EFER VM-entry control is 1, the following checks - * are performed on the field for the IA32_EFER MSR: -@@ -3022,7 +3035,6 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, - */ - if (to_vmx(vcpu)->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { -- ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; - if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || - CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || - CC(((vmcs12->guest_cr0 & X86_CR0_PG) && -@@ -3080,7 +3092,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) - } - - vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, -- vmx->loaded_vmcs->launched); -+ __vmx_vcpu_run_flags(vmx)); - - if (vmx->msr_autoload.host.nr) - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); -@@ -3360,18 +3372,19 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, - }; - u32 failed_index; - -- if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) -- kvm_vcpu_flush_tlb_current(vcpu); -+ kvm_service_local_tlb_flush_requests(vcpu); - - evaluate_pending_interrupts = exec_controls_get(vmx) & - (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); - if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) - evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); - -- if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) -+ if (!vmx->nested.nested_run_pending || -+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) - vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); - if (kvm_mpx_supported() && -- !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) -+ (!vmx->nested.nested_run_pending || -+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) - vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); - - /* -@@ -3570,6 +3583,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) - if (nested_vmx_check_controls(vcpu, vmcs12)) - return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); - -+ if (nested_vmx_check_address_space_size(vcpu, vmcs12)) -+ return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); -+ - if (nested_vmx_check_host_state(vcpu, vmcs12)) - return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); - -@@ -3679,12 +3695,34 @@ vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) - } - - static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, -- struct vmcs12 *vmcs12) -+ struct vmcs12 *vmcs12, -+ u32 vm_exit_reason, u32 exit_intr_info) - { - u32 idt_vectoring; - unsigned int nr; - -- if (vcpu->arch.exception.injected) { -+ /* -+ * Per the SDM, VM-Exits due to double and triple faults are never -+ * considered to occur during event delivery, even if the double/triple -+ * fault is the result of an escalating vectoring issue. -+ * -+ * Note, the SDM qualifies the double fault behavior with "The original -+ * event results in a double-fault exception". It's unclear why the -+ * qualification exists since exits due to double fault can occur only -+ * while vectoring a different exception (injected events are never -+ * subject to interception), i.e. there's _always_ an original event. -+ * -+ * The SDM also uses NMI as a confusing example for the "original event -+ * causes the VM exit directly" clause. NMI isn't special in any way, -+ * the same rule applies to all events that cause an exit directly. -+ * NMI is an odd choice for the example because NMIs can only occur on -+ * instruction boundaries, i.e. they _can't_ occur during vectoring. -+ */ -+ if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT || -+ ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI && -+ is_double_fault(exit_intr_info))) { -+ vmcs12->idt_vectoring_info_field = 0; -+ } else if (vcpu->arch.exception.injected) { - nr = vcpu->arch.exception.nr; - idt_vectoring = nr | VECTORING_INFO_VALID_MASK; - -@@ -3717,6 +3755,8 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, - idt_vectoring |= INTR_TYPE_EXT_INTR; - - vmcs12->idt_vectoring_info_field = idt_vectoring; -+ } else { -+ vmcs12->idt_vectoring_info_field = 0; - } - } - -@@ -3792,7 +3832,16 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, - u32 intr_info = nr | INTR_INFO_VALID_MASK; - - if (vcpu->arch.exception.has_error_code) { -- vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; -+ /* -+ * Intel CPUs do not generate error codes with bits 31:16 set, -+ * and more importantly VMX disallows setting bits 31:16 in the -+ * injected error code for VM-Entry. Drop the bits to mimic -+ * hardware and avoid inducing failure on nested VM-Entry if L1 -+ * chooses to inject the exception back to L2. AMD CPUs _do_ -+ * generate "full" 32-bit error codes, so KVM allows userspace -+ * to inject exception error codes with bits 31:16 set. -+ */ -+ vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code; - intr_info |= INTR_INFO_DELIVER_CODE_MASK; - } - -@@ -4186,12 +4235,12 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - if (to_vmx(vcpu)->exit_reason.enclave_mode) - vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; - vmcs12->exit_qualification = exit_qualification; -- vmcs12->vm_exit_intr_info = exit_intr_info; -- -- vmcs12->idt_vectoring_info_field = 0; -- vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); -- vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - -+ /* -+ * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched -+ * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other -+ * exit info fields are unmodified. -+ */ - if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { - vmcs12->launch_state = 1; - -@@ -4203,7 +4252,12 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - * Transfer the event that L0 or L1 may wanted to inject into - * L2 to IDT_VECTORING_INFO_FIELD. - */ -- vmcs12_save_pending_event(vcpu, vmcs12); -+ vmcs12_save_pending_event(vcpu, vmcs12, -+ vm_exit_reason, exit_intr_info); -+ -+ vmcs12->vm_exit_intr_info = exit_intr_info; -+ vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); -+ vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); - - /* - * According to spec, there's no need to store the guest's -@@ -4217,14 +4271,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - nested_vmx_abort(vcpu, - VMX_ABORT_SAVE_GUEST_MSR_FAIL); - } -- -- /* -- * Drop what we picked up for L2 via vmx_complete_interrupts. It is -- * preserved above and would only end up incorrectly in L1. -- */ -- vcpu->arch.nmi_injected = false; -- kvm_clear_exception_queue(vcpu); -- kvm_clear_interrupt_queue(vcpu); - } - - /* -@@ -4296,7 +4342,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, - vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); - vcpu->arch.pat = vmcs12->host_ia32_pat; - } -- if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) -+ if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && -+ intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu))) - WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, - vmcs12->host_ia32_perf_global_ctrl)); - -@@ -4502,9 +4549,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, - /* trying to cancel vmlaunch/vmresume is a bug */ - WARN_ON_ONCE(vmx->nested.nested_run_pending); - -- /* Similarly, triple faults in L2 should never escape. */ -- WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); -- - if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { - /* - * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map -@@ -4515,9 +4559,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, - (void)nested_get_evmcs_page(vcpu); - } - -- /* Service the TLB flush request for L2 before switching to L1. */ -- if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) -- kvm_vcpu_flush_tlb_current(vcpu); -+ /* Service pending TLB flush requests for L2 before switching to L1. */ -+ kvm_service_local_tlb_flush_requests(vcpu); - - /* - * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between -@@ -4567,8 +4610,30 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, - WARN_ON_ONCE(nested_early_check); - } - -+ /* -+ * Drop events/exceptions that were queued for re-injection to L2 -+ * (picked up via vmx_complete_interrupts()), as well as exceptions -+ * that were pending for L2. Note, this must NOT be hoisted above -+ * prepare_vmcs12(), events/exceptions queued for re-injection need to -+ * be captured in vmcs12 (see vmcs12_save_pending_event()). -+ */ -+ vcpu->arch.nmi_injected = false; -+ kvm_clear_exception_queue(vcpu); -+ kvm_clear_interrupt_queue(vcpu); -+ - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - -+ /* -+ * If IBRS is advertised to the vCPU, KVM must flush the indirect -+ * branch predictors when transitioning from L2 to L1, as L1 expects -+ * hardware (KVM in this case) to provide separate predictor modes. -+ * Bare metal isolates VMX root (host) from VMX non-root (guest), but -+ * doesn't isolate different VMCSs, i.e. in this case, doesn't provide -+ * separate modes for L2 vs L1. -+ */ -+ if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) -+ indirect_branch_prediction_barrier(); -+ - /* Update any VMCS fields that might have changed while L2 ran */ - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); -@@ -4603,6 +4668,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, - kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); - } - -+ if (vmx->nested.update_vmcs01_apicv_status) { -+ vmx->nested.update_vmcs01_apicv_status = false; -+ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); -+ } -+ - if ((vm_exit_reason != -1) && - (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))) - vmx->nested.need_vmcs12_to_shadow_sync = true; -@@ -4917,20 +4987,36 @@ static int handle_vmon(struct kvm_vcpu *vcpu) - | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; - - /* -- * The Intel VMX Instruction Reference lists a bunch of bits that are -- * prerequisite to running VMXON, most notably cr4.VMXE must be set to -- * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this). -- * Otherwise, we should fail with #UD. But most faulting conditions -- * have already been checked by hardware, prior to the VM-exit for -- * VMXON. We do test guest cr4.VMXE because processor CR4 always has -- * that bit set to 1 in non-root mode. -+ * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter -+ * the guest and so cannot rely on hardware to perform the check, -+ * which has higher priority than VM-Exit (see Intel SDM's pseudocode -+ * for VMXON). -+ * -+ * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 -+ * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't -+ * force any of the relevant guest state. For a restricted guest, KVM -+ * does force CR0.PE=1, but only to also force VM86 in order to emulate -+ * Real Mode, and so there's no need to check CR0.PE manually. - */ - if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } - -- /* CPL=0 must be checked manually. */ -+ /* -+ * The CPL is checked for "not in VMX operation" and for "in VMX root", -+ * and has higher priority than the VM-Fail due to being post-VMXON, -+ * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, -+ * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits -+ * from L2 to L1, i.e. there's no need to check for the vCPU being in -+ * VMX non-root. -+ * -+ * Forwarding the VM-Exit unconditionally, i.e. without performing the -+ * #UD checks (see above), is functionally ok because KVM doesn't allow -+ * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's -+ * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are -+ * missed by hardware due to shadowing CR0 and/or CR4. -+ */ - if (vmx_get_cpl(vcpu)) { - kvm_inject_gp(vcpu, 0); - return 1; -@@ -4939,6 +5025,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu) - if (vmx->nested.vmxon) - return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); - -+ /* -+ * Invalid CR0/CR4 generates #GP. These checks are performed if and -+ * only if the vCPU isn't already in VMX operation, i.e. effectively -+ * have lower priority than the VM-Fail above. -+ */ -+ if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || -+ !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { -+ kvm_inject_gp(vcpu, 0); -+ return 1; -+ } -+ - if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) - != VMXON_NEEDED_FEATURES) { - kvm_inject_gp(vcpu, 0); -@@ -6218,9 +6315,6 @@ out: - return kvm_state.size; - } - --/* -- * Forcibly leave nested mode in order to be able to reset the VCPU later on. -- */ - void vmx_leave_nested(struct kvm_vcpu *vcpu) - { - if (is_guest_mode(vcpu)) { -@@ -6589,7 +6683,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) - SECONDARY_EXEC_ENABLE_INVPCID | - SECONDARY_EXEC_RDSEED_EXITING | - SECONDARY_EXEC_XSAVES | -- SECONDARY_EXEC_TSC_SCALING; -+ SECONDARY_EXEC_TSC_SCALING | -+ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; - - /* - * We can emulate "VMCS shadowing," even if the hardware -@@ -6697,6 +6792,9 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) - rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); - rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); - -+ if (vmx_umip_emulated()) -+ msrs->cr4_fixed1 |= X86_CR4_UMIP; -+ - msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr(); - } - -@@ -6750,6 +6848,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) - } - - struct kvm_x86_nested_ops vmx_nested_ops = { -+ .leave_nested = vmx_leave_nested, - .check_events = vmx_check_nested_events, - .hv_timer_pending = nested_vmx_preemption_timer_pending, - .triple_fault = nested_vmx_triple_fault, -diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h -index b69a80f43b37e..2d0ac8a86d4a4 100644 ---- a/arch/x86/kvm/vmx/nested.h -+++ b/arch/x86/kvm/vmx/nested.h -@@ -280,7 +280,8 @@ static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val) - u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; - u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1; - -- return fixed_bits_valid(val, fixed0, fixed1); -+ return fixed_bits_valid(val, fixed0, fixed1) && -+ __kvm_is_valid_cr4(vcpu, val); - } - - /* No difference in the restrictions on guest and host CR4 in VMX operation. */ -diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c -index 10cc4f65c4efd..e624a39365ecb 100644 ---- a/arch/x86/kvm/vmx/pmu_intel.c -+++ b/arch/x86/kvm/vmx/pmu_intel.c -@@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) - reprogram_counter(pmu, bit); - } - --static unsigned intel_find_arch_event(struct kvm_pmu *pmu, -- u8 event_select, -- u8 unit_mask) -+static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc) - { -+ struct kvm_pmu *pmu = pmc_to_pmu(pmc); -+ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; -+ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; - int i; - - for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) -@@ -103,6 +104,9 @@ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) - { - struct kvm_pmu *pmu = pmc_to_pmu(pmc); - -+ if (!intel_pmu_has_perf_global_ctrl(pmu)) -+ return true; -+ - return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); - } - -@@ -218,7 +222,7 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) - case MSR_CORE_PERF_GLOBAL_STATUS: - case MSR_CORE_PERF_GLOBAL_CTRL: - case MSR_CORE_PERF_GLOBAL_OVF_CTRL: -- ret = pmu->version > 1; -+ return intel_pmu_has_perf_global_ctrl(pmu); - break; - default: - ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || -@@ -395,12 +399,13 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) - struct kvm_pmc *pmc; - u32 msr = msr_info->index; - u64 data = msr_info->data; -+ u64 reserved_bits; - - switch (msr) { - case MSR_CORE_PERF_FIXED_CTR_CTRL: - if (pmu->fixed_ctr_ctrl == data) - return 0; -- if (!(data & 0xfffffffffffff444ull)) { -+ if (!(data & pmu->fixed_ctr_ctrl_mask)) { - reprogram_fixed_counters(pmu, data); - return 0; - } -@@ -437,20 +442,20 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) - !(msr & MSR_PMC_FULL_WIDTH_BIT)) - data = (s64)(s32)data; - pmc->counter += data - pmc_read_counter(pmc); -- if (pmc->perf_event && !pmc->is_paused) -- perf_event_period(pmc->perf_event, -- get_sample_period(pmc, data)); -+ pmc_update_sample_period(pmc); - return 0; - } else if ((pmc = get_fixed_pmc(pmu, msr))) { - pmc->counter += data - pmc_read_counter(pmc); -- if (pmc->perf_event && !pmc->is_paused) -- perf_event_period(pmc->perf_event, -- get_sample_period(pmc, data)); -+ pmc_update_sample_period(pmc); - return 0; - } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { - if (data == pmc->eventsel) - return 0; -- if (!(data & pmu->reserved_bits)) { -+ reserved_bits = pmu->reserved_bits; -+ if ((pmc->idx == 2) && -+ (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) -+ reserved_bits ^= HSW_IN_TX_CHECKPOINTED; -+ if (!(data & reserved_bits)) { - reprogram_gp_counter(pmc, data); - return 0; - } -@@ -470,6 +475,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) - struct kvm_cpuid_entry2 *entry; - union cpuid10_eax eax; - union cpuid10_edx edx; -+ int i; - - pmu->nr_arch_gp_counters = 0; - pmu->nr_arch_fixed_counters = 0; -@@ -477,6 +483,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) - pmu->counter_bitmask[KVM_PMC_FIXED] = 0; - pmu->version = 0; - pmu->reserved_bits = 0xffffffff00200000ull; -+ pmu->raw_event_mask = X86_RAW_EVENT_MASK; -+ pmu->global_ctrl_mask = ~0ull; -+ pmu->global_ovf_ctrl_mask = ~0ull; -+ pmu->fixed_ctr_ctrl_mask = ~0ull; - - entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); - if (!entry) -@@ -510,6 +520,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) - ((u64)1 << edx.split.bit_width_fixed) - 1; - } - -+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++) -+ pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); - pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | - (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); - pmu->global_ctrl_mask = ~pmu->global_ctrl; -@@ -523,8 +535,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) - entry = kvm_find_cpuid_entry(vcpu, 7, 0); - if (entry && - (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) && -- (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) -- pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; -+ (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) { -+ pmu->reserved_bits ^= HSW_IN_TX; -+ pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); -+ } - - bitmap_set(pmu->all_valid_pmc_idx, - 0, pmu->nr_arch_gp_counters); -@@ -706,7 +720,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) - } - - struct kvm_pmu_ops intel_pmu_ops = { -- .find_arch_event = intel_find_arch_event, -+ .pmc_perf_hw_id = intel_pmc_perf_hw_id, - .find_fixed_event = intel_find_fixed_event, - .pmc_is_enabled = intel_pmc_is_enabled, - .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, -diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c -index 5f81ef092bd43..46fb83d6a286e 100644 ---- a/arch/x86/kvm/vmx/posted_intr.c -+++ b/arch/x86/kvm/vmx/posted_intr.c -@@ -5,6 +5,7 @@ - #include - - #include "lapic.h" -+#include "irq.h" - #include "posted_intr.h" - #include "trace.h" - #include "vmx.h" -@@ -14,7 +15,7 @@ - * can find which vCPU should be waken up. - */ - static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); --static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); -+static DEFINE_PER_CPU(raw_spinlock_t, blocked_vcpu_on_cpu_lock); - - static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) - { -@@ -50,7 +51,7 @@ void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) - - /* The full case. */ - do { -- old.control = new.control = pi_desc->control; -+ old.control = new.control = READ_ONCE(pi_desc->control); - - dest = cpu_physical_id(cpu); - -@@ -77,13 +78,18 @@ after_clear_sn: - pi_set_on(pi_desc); - } - -+static bool vmx_can_use_vtd_pi(struct kvm *kvm) -+{ -+ return irqchip_in_kernel(kvm) && enable_apicv && -+ kvm_arch_has_assigned_device(kvm) && -+ irq_remapping_cap(IRQ_POSTING_CAP); -+} -+ - void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) - { - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - -- if (!kvm_arch_has_assigned_device(vcpu->kvm) || -- !irq_remapping_cap(IRQ_POSTING_CAP) || -- !kvm_vcpu_apicv_active(vcpu)) -+ if (!vmx_can_use_vtd_pi(vcpu->kvm)) - return; - - /* Set SN when the vCPU is preempted */ -@@ -98,7 +104,7 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) - unsigned int dest; - - do { -- old.control = new.control = pi_desc->control; -+ old.control = new.control = READ_ONCE(pi_desc->control); - WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, - "Wakeup handler not enabled while the VCPU is blocked\n"); - -@@ -115,9 +121,9 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) - new.control) != old.control); - - if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { -- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); -+ raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - list_del(&vcpu->blocked_vcpu_list); -- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); -+ raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - vcpu->pre_pcpu = -1; - } - } -@@ -141,24 +147,23 @@ int pi_pre_block(struct kvm_vcpu *vcpu) - struct pi_desc old, new; - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - -- if (!kvm_arch_has_assigned_device(vcpu->kvm) || -- !irq_remapping_cap(IRQ_POSTING_CAP) || -- !kvm_vcpu_apicv_active(vcpu)) -+ if (!vmx_can_use_vtd_pi(vcpu->kvm) || -+ vmx_interrupt_blocked(vcpu)) - return 0; - - WARN_ON(irqs_disabled()); - local_irq_disable(); - if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { - vcpu->pre_pcpu = vcpu->cpu; -- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); -+ raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - list_add_tail(&vcpu->blocked_vcpu_list, - &per_cpu(blocked_vcpu_on_cpu, - vcpu->pre_pcpu)); -- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); -+ raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); - } - - do { -- old.control = new.control = pi_desc->control; -+ old.control = new.control = READ_ONCE(pi_desc->control); - - WARN((pi_desc->sn == 1), - "Warning: SN field of posted-interrupts " -@@ -211,7 +216,7 @@ void pi_wakeup_handler(void) - struct kvm_vcpu *vcpu; - int cpu = smp_processor_id(); - -- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); -+ raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); - list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), - blocked_vcpu_list) { - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); -@@ -219,13 +224,13 @@ void pi_wakeup_handler(void) - if (pi_test_on(pi_desc) == 1) - kvm_vcpu_kick(vcpu); - } -- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); -+ raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); - } - - void __init pi_init_cpu(int cpu) - { - INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); -- spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); -+ raw_spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); - } - - bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu) -@@ -270,9 +275,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, - struct vcpu_data vcpu_info; - int idx, ret = 0; - -- if (!kvm_arch_has_assigned_device(kvm) || -- !irq_remapping_cap(IRQ_POSTING_CAP) || -- !kvm_vcpu_apicv_active(kvm->vcpus[0])) -+ if (!vmx_can_use_vtd_pi(kvm)) - return 0; - - idx = srcu_read_lock(&kvm->irq_srcu); -diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h -new file mode 100644 -index 0000000000000..edc3f16cc1896 ---- /dev/null -+++ b/arch/x86/kvm/vmx/run_flags.h -@@ -0,0 +1,8 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+#ifndef __KVM_X86_VMX_RUN_FLAGS_H -+#define __KVM_X86_VMX_RUN_FLAGS_H -+ -+#define VMX_RUN_VMRESUME (1 << 0) -+#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1) -+ -+#endif /* __KVM_X86_VMX_RUN_FLAGS_H */ -diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c -index 6693ebdc07701..b8cf9a59c145e 100644 ---- a/arch/x86/kvm/vmx/sgx.c -+++ b/arch/x86/kvm/vmx/sgx.c -@@ -188,8 +188,10 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, - /* Enforce CPUID restriction on max enclave size. */ - max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 : - sgx_12_0->edx; -- if (size >= BIT_ULL(max_size_log2)) -+ if (size >= BIT_ULL(max_size_log2)) { - kvm_inject_gp(vcpu, 0); -+ return 1; -+ } - - /* - * sgx_virt_ecreate() returns: -diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h -index 6e5de2e2b0da6..4de2a6e3b1900 100644 ---- a/arch/x86/kvm/vmx/vmcs.h -+++ b/arch/x86/kvm/vmx/vmcs.h -@@ -104,6 +104,11 @@ static inline bool is_breakpoint(u32 intr_info) - return is_exception_n(intr_info, BP_VECTOR); - } - -+static inline bool is_double_fault(u32 intr_info) -+{ -+ return is_exception_n(intr_info, DF_VECTOR); -+} -+ - static inline bool is_page_fault(u32 intr_info) - { - return is_exception_n(intr_info, PF_VECTOR); -diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S -index 3a6461694fc25..982138bebb70f 100644 ---- a/arch/x86/kvm/vmx/vmenter.S -+++ b/arch/x86/kvm/vmx/vmenter.S -@@ -5,6 +5,7 @@ - #include - #include - #include -+#include "run_flags.h" - - #define WORD_SIZE (BITS_PER_LONG / 8) - -@@ -30,73 +31,12 @@ - - .section .noinstr.text, "ax" - --/** -- * vmx_vmenter - VM-Enter the current loaded VMCS -- * -- * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME -- * -- * Returns: -- * %RFLAGS.CF is set on VM-Fail Invalid -- * %RFLAGS.ZF is set on VM-Fail Valid -- * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit -- * -- * Note that VMRESUME/VMLAUNCH fall-through and return directly if -- * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump -- * to vmx_vmexit. -- */ --SYM_FUNC_START_LOCAL(vmx_vmenter) -- /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ -- je 2f -- --1: vmresume -- ret -- --2: vmlaunch -- ret -- --3: cmpb $0, kvm_rebooting -- je 4f -- ret --4: ud2 -- -- _ASM_EXTABLE(1b, 3b) -- _ASM_EXTABLE(2b, 3b) -- --SYM_FUNC_END(vmx_vmenter) -- --/** -- * vmx_vmexit - Handle a VMX VM-Exit -- * -- * Returns: -- * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit -- * -- * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump -- * here after hardware loads the host's state, i.e. this is the destination -- * referred to by VMCS.HOST_RIP. -- */ --SYM_FUNC_START(vmx_vmexit) --#ifdef CONFIG_RETPOLINE -- ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE -- /* Preserve guest's RAX, it's used to stuff the RSB. */ -- push %_ASM_AX -- -- /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ -- FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE -- -- /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */ -- or $1, %_ASM_AX -- -- pop %_ASM_AX --.Lvmexit_skip_rsb: --#endif -- ret --SYM_FUNC_END(vmx_vmexit) -- - /** - * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode -- * @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp) -+ * @vmx: struct vcpu_vmx * - * @regs: unsigned long * (to guest registers) -- * @launched: %true if the VMCS has been launched -+ * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH -+ * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl - * - * Returns: - * 0 on VM-Exit, 1 on VM-Fail -@@ -115,24 +55,29 @@ SYM_FUNC_START(__vmx_vcpu_run) - #endif - push %_ASM_BX - -+ /* Save @vmx for SPEC_CTRL handling */ -+ push %_ASM_ARG1 -+ -+ /* Save @flags for SPEC_CTRL handling */ -+ push %_ASM_ARG3 -+ - /* - * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and - * @regs is needed after VM-Exit to save the guest's register values. - */ - push %_ASM_ARG2 - -- /* Copy @launched to BL, _ASM_ARG3 is volatile. */ -+ /* Copy @flags to BL, _ASM_ARG3 is volatile. */ - mov %_ASM_ARG3B, %bl - -- /* Adjust RSP to account for the CALL to vmx_vmenter(). */ -- lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2 -+ lea (%_ASM_SP), %_ASM_ARG2 - call vmx_update_host_rsp - - /* Load @regs to RAX. */ - mov (%_ASM_SP), %_ASM_AX - - /* Check if vmlaunch or vmresume is needed */ -- testb %bl, %bl -+ testb $VMX_RUN_VMRESUME, %bl - - /* Load guest registers. Don't clobber flags. */ - mov VCPU_RCX(%_ASM_AX), %_ASM_CX -@@ -154,11 +99,36 @@ SYM_FUNC_START(__vmx_vcpu_run) - /* Load guest RAX. This kills the @regs pointer! */ - mov VCPU_RAX(%_ASM_AX), %_ASM_AX - -- /* Enter guest mode */ -- call vmx_vmenter -+ /* Check EFLAGS.ZF from 'testb' above */ -+ jz .Lvmlaunch -+ -+ /* -+ * After a successful VMRESUME/VMLAUNCH, control flow "magically" -+ * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting. -+ * So this isn't a typical function and objtool needs to be told to -+ * save the unwind state here and restore it below. -+ */ -+ UNWIND_HINT_SAVE -+ -+/* -+ * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at -+ * the 'vmx_vmexit' label below. -+ */ -+.Lvmresume: -+ vmresume -+ jmp .Lvmfail -+ -+.Lvmlaunch: -+ vmlaunch -+ jmp .Lvmfail -+ -+ _ASM_EXTABLE(.Lvmresume, .Lfixup) -+ _ASM_EXTABLE(.Lvmlaunch, .Lfixup) - -- /* Jump on VM-Fail. */ -- jbe 2f -+SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) -+ -+ /* Restore unwind state from before the VMRESUME/VMLAUNCH. */ -+ UNWIND_HINT_RESTORE - - /* Temporarily save guest's RAX. */ - push %_ASM_AX -@@ -185,21 +155,23 @@ SYM_FUNC_START(__vmx_vcpu_run) - mov %r15, VCPU_R15(%_ASM_AX) - #endif - -- /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */ -- xor %eax, %eax -+ /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */ -+ xor %ebx, %ebx - -+.Lclear_regs: - /* -- * Clear all general purpose registers except RSP and RAX to prevent -+ * Clear all general purpose registers except RSP and RBX to prevent - * speculative use of the guest's values, even those that are reloaded - * via the stack. In theory, an L1 cache miss when restoring registers - * could lead to speculative execution with the guest's values. - * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially - * free. RSP and RAX are exempt as RSP is restored by hardware during -- * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail. -+ * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return -+ * value. - */ --1: xor %ecx, %ecx -+ xor %eax, %eax -+ xor %ecx, %ecx - xor %edx, %edx -- xor %ebx, %ebx - xor %ebp, %ebp - xor %esi, %esi - xor %edi, %edi -@@ -216,8 +188,32 @@ SYM_FUNC_START(__vmx_vcpu_run) - - /* "POP" @regs. */ - add $WORD_SIZE, %_ASM_SP -- pop %_ASM_BX - -+ /* -+ * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before -+ * the first unbalanced RET after vmexit! -+ * -+ * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB -+ * entries and (in some cases) RSB underflow. -+ * -+ * eIBRS has its own protection against poisoned RSB, so it doesn't -+ * need the RSB filling sequence. But it does need to be enabled, and a -+ * single call to retire, before the first unbalanced RET. -+ */ -+ -+ FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\ -+ X86_FEATURE_RSB_VMEXIT_LITE -+ -+ -+ pop %_ASM_ARG2 /* @flags */ -+ pop %_ASM_ARG1 /* @vmx */ -+ -+ call vmx_spec_ctrl_restore_host -+ -+ /* Put return value in AX */ -+ mov %_ASM_BX, %_ASM_AX -+ -+ pop %_ASM_BX - #ifdef CONFIG_X86_64 - pop %r12 - pop %r13 -@@ -228,11 +224,17 @@ SYM_FUNC_START(__vmx_vcpu_run) - pop %edi - #endif - pop %_ASM_BP -- ret -+ RET -+ -+.Lfixup: -+ cmpb $0, kvm_rebooting -+ jne .Lvmfail -+ ud2 -+.Lvmfail: -+ /* VM-Fail: set return value to 1 */ -+ mov $1, %_ASM_BX -+ jmp .Lclear_regs - -- /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ --2: mov $1, %eax -- jmp 1b - SYM_FUNC_END(__vmx_vcpu_run) - - -@@ -293,7 +295,7 @@ SYM_FUNC_START(vmread_error_trampoline) - pop %_ASM_AX - pop %_ASM_BP - -- ret -+ RET - SYM_FUNC_END(vmread_error_trampoline) - - SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) -@@ -326,5 +328,5 @@ SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) - */ - mov %_ASM_BP, %_ASM_SP - pop %_ASM_BP -- ret -+ RET - SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff) -diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c -index 7d595effb66f0..89744ee06101a 100644 ---- a/arch/x86/kvm/vmx/vmx.c -+++ b/arch/x86/kvm/vmx/vmx.c -@@ -226,6 +226,9 @@ static const struct { - #define L1D_CACHE_ORDER 4 - static void *vmx_l1d_flush_pages; - -+/* Control for disabling CPU Fill buffer clear */ -+static bool __read_mostly vmx_fb_clear_ctrl_available; -+ - static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) - { - struct page *page; -@@ -357,6 +360,60 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) - return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); - } - -+static void vmx_setup_fb_clear_ctrl(void) -+{ -+ u64 msr; -+ -+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && -+ !boot_cpu_has_bug(X86_BUG_MDS) && -+ !boot_cpu_has_bug(X86_BUG_TAA)) { -+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); -+ if (msr & ARCH_CAP_FB_CLEAR_CTRL) -+ vmx_fb_clear_ctrl_available = true; -+ } -+} -+ -+static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) -+{ -+ u64 msr; -+ -+ if (!vmx->disable_fb_clear) -+ return; -+ -+ msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL); -+ msr |= FB_CLEAR_DIS; -+ native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); -+ /* Cache the MSR value to avoid reading it later */ -+ vmx->msr_ia32_mcu_opt_ctrl = msr; -+} -+ -+static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) -+{ -+ if (!vmx->disable_fb_clear) -+ return; -+ -+ vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; -+ native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); -+} -+ -+static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) -+{ -+ vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; -+ -+ /* -+ * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS -+ * at VMEntry. Skip the MSR read/write when a guest has no use case to -+ * execute VERW. -+ */ -+ if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || -+ ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && -+ (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && -+ (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && -+ (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && -+ (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) -+ vmx->disable_fb_clear = false; -+} -+ - static const struct kernel_param_ops vmentry_l1d_flush_ops = { - .set = vmentry_l1d_flush_set, - .get = vmentry_l1d_flush_get, -@@ -769,24 +826,30 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) - /* - * Check if MSR is intercepted for currently loaded MSR bitmap. - */ --static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) -+static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) - { -- unsigned long *msr_bitmap; -- int f = sizeof(unsigned long); -- -- if (!cpu_has_vmx_msr_bitmap()) -+ if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS)) - return true; - -- msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; -+ return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr); -+} - -- if (msr <= 0x1fff) { -- return !!test_bit(msr, msr_bitmap + 0x800 / f); -- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { -- msr &= 0x1fff; -- return !!test_bit(msr, msr_bitmap + 0xc00 / f); -- } -+unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) -+{ -+ unsigned int flags = 0; - -- return true; -+ if (vmx->loaded_vmcs->launched) -+ flags |= VMX_RUN_VMRESUME; -+ -+ /* -+ * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free -+ * to change it directly without causing a vmexit. In that case read -+ * it after vmexit and store it in vmx->spec_ctrl. -+ */ -+ if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))) -+ flags |= VMX_RUN_SAVE_SPEC_CTRL; -+ -+ return flags; - } - - static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, -@@ -1269,8 +1332,10 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, - - /* - * No indirect branch prediction barrier needed when switching -- * the active VMCS within a guest, e.g. on nested VM-Enter. -- * The L1 VMM can protect itself with retpolines, IBPB or IBRS. -+ * the active VMCS within a vCPU, unless IBRS is advertised to -+ * the vCPU. To minimize the number of IBPBs executed, KVM -+ * performs IBPB on nested VM-Exit (a single nested transition -+ * may switch the active VMCS multiple times). - */ - if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) - indirect_branch_prediction_barrier(); -@@ -1351,6 +1416,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) - struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long old_rflags; - -+ /* -+ * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU -+ * is an unrestricted guest in order to mark L2 as needing emulation -+ * if L1 runs L2 as a restricted guest. -+ */ - if (is_unrestricted_guest(vcpu)) { - kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); - vmx->rflags = rflags; -@@ -1370,6 +1440,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) - vmx->emulation_required = vmx_emulation_required(vcpu); - } - -+static bool vmx_get_if_flag(struct kvm_vcpu *vcpu) -+{ -+ return vmx_get_rflags(vcpu) & X86_EFLAGS_IF; -+} -+ - u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) - { - u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); -@@ -1608,7 +1683,17 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) - kvm_deliver_exception_payload(vcpu); - - if (has_error_code) { -- vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); -+ /* -+ * Despite the error code being architecturally defined as 32 -+ * bits, and the VMCS field being 32 bits, Intel CPUs and thus -+ * VMX don't actually supporting setting bits 31:16. Hardware -+ * will (should) never provide a bogus error code, but AMD CPUs -+ * do generate error codes with bits 31:16 set, and so KVM's -+ * ABI lets userspace shove in arbitrary 32-bit values. Drop -+ * the upper bits to avoid VM-Fail, losing information that -+ * does't really exist is preferable to killing the VM. -+ */ -+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)error_code); - intr_info |= INTR_INFO_DELIVER_CODE_MASK; - } - -@@ -2234,6 +2319,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) - ret = kvm_set_msr_common(vcpu, msr_info); - } - -+ /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ -+ if (msr_index == MSR_IA32_ARCH_CAPABILITIES) -+ vmx_update_fb_clear_dis(vcpu, vmx); -+ - return ret; - } - -@@ -2655,15 +2744,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) - if (!loaded_vmcs->msr_bitmap) - goto out_vmcs; - memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); -- -- if (IS_ENABLED(CONFIG_HYPERV) && -- static_branch_unlikely(&enable_evmcs) && -- (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { -- struct hv_enlightened_vmcs *evmcs = -- (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; -- -- evmcs->hv_enlightenments_control.msr_bitmap = 1; -- } - } - - memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); -@@ -2814,6 +2894,15 @@ static void enter_rmode(struct kvm_vcpu *vcpu) - struct vcpu_vmx *vmx = to_vmx(vcpu); - struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); - -+ /* -+ * KVM should never use VM86 to virtualize Real Mode when L2 is active, -+ * as using VM86 is unnecessary if unrestricted guest is enabled, and -+ * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0 -+ * should VM-Fail and KVM should reject userspace attempts to stuff -+ * CR0.PG=0 when L2 is active. -+ */ -+ WARN_ON_ONCE(is_guest_mode(vcpu)); -+ - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); - vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); -@@ -2927,6 +3016,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu) - } - } - -+static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu) -+{ -+ if (is_guest_mode(vcpu)) -+ return nested_get_vpid02(vcpu); -+ return to_vmx(vcpu)->vpid; -+} -+ - static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) - { - struct kvm_mmu *mmu = vcpu->arch.mmu; -@@ -2939,31 +3035,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) - if (enable_ept) - ept_sync_context(construct_eptp(vcpu, root_hpa, - mmu->shadow_root_level)); -- else if (!is_guest_mode(vcpu)) -- vpid_sync_context(to_vmx(vcpu)->vpid); - else -- vpid_sync_context(nested_get_vpid02(vcpu)); -+ vpid_sync_context(vmx_get_current_vpid(vcpu)); - } - - static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) - { - /* -- * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in -+ * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in - * vmx_flush_tlb_guest() for an explanation of why this is ok. - */ -- vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr); -+ vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr); - } - - static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu) - { - /* -- * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0 -- * or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit -- * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is -+ * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a -+ * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are -+ * required to flush GVA->{G,H}PA mappings from the TLB if vpid is - * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed), - * i.e. no explicit INVVPID is necessary. - */ -- vpid_sync_context(to_vmx(vcpu)->vpid); -+ vpid_sync_context(vmx_get_current_vpid(vcpu)); - } - - void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu) -@@ -2999,6 +3093,17 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu) - #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \ - CPU_BASED_CR3_STORE_EXITING) - -+static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) -+{ -+ if (is_guest_mode(vcpu)) -+ return nested_guest_cr0_valid(vcpu, cr0); -+ -+ if (to_vmx(vcpu)->nested.vmxon) -+ return nested_host_cr0_valid(vcpu, cr0); -+ -+ return true; -+} -+ - void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) - { - struct vcpu_vmx *vmx = to_vmx(vcpu); -@@ -3008,7 +3113,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) - old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG); - - hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); -- if (is_unrestricted_guest(vcpu)) -+ if (enable_unrestricted_guest) - hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; - else { - hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; -@@ -3036,7 +3141,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) - } - #endif - -- if (enable_ept && !is_unrestricted_guest(vcpu)) { -+ if (enable_ept && !enable_unrestricted_guest) { - /* - * Ensure KVM has an up-to-date snapshot of the guest's CR3. If - * the below code _enables_ CR3 exiting, vmx_cache_reg() will -@@ -3135,8 +3240,8 @@ static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) - { - /* - * We operate under the default treatment of SMM, so VMX cannot be -- * enabled under SMM. Note, whether or not VMXE is allowed at all is -- * handled by kvm_is_valid_cr4(). -+ * enabled under SMM. Note, whether or not VMXE is allowed at all, -+ * i.e. is a reserved bit, is handled by common x86 code. - */ - if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu)) - return false; -@@ -3159,7 +3264,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) - unsigned long hw_cr4; - - hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); -- if (is_unrestricted_guest(vcpu)) -+ if (enable_unrestricted_guest) - hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; - else if (vmx->rmode.vm86_active) - hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; -@@ -3179,7 +3284,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) - vcpu->arch.cr4 = cr4; - kvm_register_mark_available(vcpu, VCPU_EXREG_CR4); - -- if (!is_unrestricted_guest(vcpu)) { -+ if (!enable_unrestricted_guest) { - if (enable_ept) { - if (!is_paging(vcpu)) { - hw_cr4 &= ~X86_CR4_PAE; -@@ -3274,18 +3379,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var) - { - u32 ar; - -- if (var->unusable || !var->present) -- ar = 1 << 16; -- else { -- ar = var->type & 15; -- ar |= (var->s & 1) << 4; -- ar |= (var->dpl & 3) << 5; -- ar |= (var->present & 1) << 7; -- ar |= (var->avl & 1) << 12; -- ar |= (var->l & 1) << 13; -- ar |= (var->db & 1) << 14; -- ar |= (var->g & 1) << 15; -- } -+ ar = var->type & 15; -+ ar |= (var->s & 1) << 4; -+ ar |= (var->dpl & 3) << 5; -+ ar |= (var->present & 1) << 7; -+ ar |= (var->avl & 1) << 12; -+ ar |= (var->l & 1) << 13; -+ ar |= (var->db & 1) << 14; -+ ar |= (var->g & 1) << 15; -+ ar |= (var->unusable || !var->present) << 16; - - return ar; - } -@@ -3695,44 +3797,20 @@ void free_vpid(int vpid) - spin_unlock(&vmx_vpid_lock); - } - --static void vmx_clear_msr_bitmap_read(ulong *msr_bitmap, u32 msr) --{ -- int f = sizeof(unsigned long); -- -- if (msr <= 0x1fff) -- __clear_bit(msr, msr_bitmap + 0x000 / f); -- else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -- __clear_bit(msr & 0x1fff, msr_bitmap + 0x400 / f); --} -- --static void vmx_clear_msr_bitmap_write(ulong *msr_bitmap, u32 msr) -+static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) - { -- int f = sizeof(unsigned long); -- -- if (msr <= 0x1fff) -- __clear_bit(msr, msr_bitmap + 0x800 / f); -- else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -- __clear_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f); --} -- --static void vmx_set_msr_bitmap_read(ulong *msr_bitmap, u32 msr) --{ -- int f = sizeof(unsigned long); -- -- if (msr <= 0x1fff) -- __set_bit(msr, msr_bitmap + 0x000 / f); -- else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -- __set_bit(msr & 0x1fff, msr_bitmap + 0x400 / f); --} -- --static void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr) --{ -- int f = sizeof(unsigned long); -+ /* -+ * When KVM is a nested hypervisor on top of Hyper-V and uses -+ * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR -+ * bitmap has changed. -+ */ -+ if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs)) { -+ struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; - -- if (msr <= 0x1fff) -- __set_bit(msr, msr_bitmap + 0x800 / f); -- else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -- __set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f); -+ if (evmcs->hv_enlightenments_control.msr_bitmap) -+ evmcs->hv_clean_fields &= -+ ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP; -+ } - } - - void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) -@@ -3743,8 +3821,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) - if (!cpu_has_vmx_msr_bitmap()) - return; - -- if (static_branch_unlikely(&enable_evmcs)) -- evmcs_touch_msr_bitmap(); -+ vmx_msr_bitmap_l01_changed(vmx); - - /* - * Mark the desired intercept state in shadow bitmap, this is needed -@@ -3788,8 +3865,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) - if (!cpu_has_vmx_msr_bitmap()) - return; - -- if (static_branch_unlikely(&enable_evmcs)) -- evmcs_touch_msr_bitmap(); -+ vmx_msr_bitmap_l01_changed(vmx); - - /* - * Mark the desired intercept state in shadow bitmap, this is needed -@@ -4012,8 +4088,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) - if (pi_test_and_set_on(&vmx->pi_desc)) - return 0; - -- if (vcpu != kvm_get_running_vcpu() && -- !kvm_vcpu_trigger_posted_interrupt(vcpu, false)) -+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) - kvm_vcpu_kick(vcpu); - - return 0; -@@ -4140,6 +4215,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) - { - struct vcpu_vmx *vmx = to_vmx(vcpu); - -+ if (is_guest_mode(vcpu)) { -+ vmx->nested.update_vmcs01_apicv_status = true; -+ return; -+ } -+ - pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); - if (cpu_has_secondary_exec_ctrls()) { - if (kvm_vcpu_apicv_active(vcpu)) -@@ -4487,6 +4567,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) - kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); - - vpid_sync_context(vmx->vpid); -+ -+ vmx_update_fb_clear_dis(vcpu, vmx); - } - - static void vmx_enable_irq_window(struct kvm_vcpu *vcpu) -@@ -4833,8 +4915,33 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) - dr6 = vmx_get_exit_qual(vcpu); - if (!(vcpu->guest_debug & - (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { -+ /* -+ * If the #DB was due to ICEBP, a.k.a. INT1, skip the -+ * instruction. ICEBP generates a trap-like #DB, but -+ * despite its interception control being tied to #DB, -+ * is an instruction intercept, i.e. the VM-Exit occurs -+ * on the ICEBP itself. Note, skipping ICEBP also -+ * clears STI and MOVSS blocking. -+ * -+ * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS -+ * if single-step is enabled in RFLAGS and STI or MOVSS -+ * blocking is active, as the CPU doesn't set the bit -+ * on VM-Exit due to #DB interception. VM-Entry has a -+ * consistency check that a single-step #DB is pending -+ * in this scenario as the previous instruction cannot -+ * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV -+ * don't modify RFLAGS), therefore the one instruction -+ * delay when activating single-step breakpoints must -+ * have already expired. Note, the CPU sets/clears BS -+ * as appropriate for all other VM-Exits types. -+ */ - if (is_icebp(intr_info)) - WARN_ON(!skip_emulated_instruction(vcpu)); -+ else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) && -+ (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & -+ (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS))) -+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, -+ vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS); - - kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); - return 1; -@@ -4940,18 +5047,11 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) - val = (val & ~vmcs12->cr0_guest_host_mask) | - (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); - -- if (!nested_guest_cr0_valid(vcpu, val)) -- return 1; -- - if (kvm_set_cr0(vcpu, val)) - return 1; - vmcs_writel(CR0_READ_SHADOW, orig_val); - return 0; - } else { -- if (to_vmx(vcpu)->nested.vmxon && -- !nested_host_cr0_valid(vcpu, val)) -- return 1; -- - return kvm_set_cr0(vcpu, val); - } - } -@@ -5907,18 +6007,14 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) - vmx_flush_pml_buffer(vcpu); - - /* -- * We should never reach this point with a pending nested VM-Enter, and -- * more specifically emulation of L2 due to invalid guest state (see -- * below) should never happen as that means we incorrectly allowed a -- * nested VM-Enter with an invalid vmcs12. -+ * KVM should never reach this point with a pending nested VM-Enter. -+ * More specifically, short-circuiting VM-Entry to emulate L2 due to -+ * invalid guest state should never happen as that means KVM knowingly -+ * allowed a nested VM-Enter with an invalid vmcs12. More below. - */ - if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm)) - return -EIO; - -- /* If guest state is invalid, start emulating */ -- if (vmx->emulation_required) -- return handle_invalid_guest_state(vcpu); -- - if (is_guest_mode(vcpu)) { - /* - * PML is never enabled when running L2, bail immediately if a -@@ -5940,10 +6036,30 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) - */ - nested_mark_vmcs12_pages_dirty(vcpu); - -+ /* -+ * Synthesize a triple fault if L2 state is invalid. In normal -+ * operation, nested VM-Enter rejects any attempt to enter L2 -+ * with invalid state. However, those checks are skipped if -+ * state is being stuffed via RSM or KVM_SET_NESTED_STATE. If -+ * L2 state is invalid, it means either L1 modified SMRAM state -+ * or userspace provided bad state. Synthesize TRIPLE_FAULT as -+ * doing so is architecturally allowed in the RSM case, and is -+ * the least awful solution for the userspace case without -+ * risking false positives. -+ */ -+ if (vmx->emulation_required) { -+ nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); -+ return 1; -+ } -+ - if (nested_vmx_reflect_vmexit(vcpu)) - return 1; - } - -+ /* If guest state is invalid, start emulating. L2 is handled above. */ -+ if (vmx->emulation_required) -+ return handle_invalid_guest_state(vcpu); -+ - if (exit_reason.failed_vmentry) { - dump_vmcs(vcpu); - vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; -@@ -6288,9 +6404,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) - { - struct vcpu_vmx *vmx = to_vmx(vcpu); - int max_irr; -- bool max_irr_updated; -+ bool got_posted_interrupt; - -- if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm)) -+ if (KVM_BUG_ON(!enable_apicv, vcpu->kvm)) - return -EIO; - - if (pi_test_on(&vmx->pi_desc)) { -@@ -6300,22 +6416,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) - * But on x86 this is just a compiler barrier anyway. - */ - smp_mb__after_atomic(); -- max_irr_updated = -+ got_posted_interrupt = - kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); -- -- /* -- * If we are running L2 and L1 has a new pending interrupt -- * which can be injected, this may cause a vmexit or it may -- * be injected into L2. Either way, this interrupt will be -- * processed via KVM_REQ_EVENT, not RVI, because we do not use -- * virtual interrupt delivery to inject L1 interrupts into L2. -- */ -- if (is_guest_mode(vcpu) && max_irr_updated) -- kvm_make_request(KVM_REQ_EVENT, vcpu); - } else { - max_irr = kvm_lapic_find_highest_irr(vcpu); -+ got_posted_interrupt = false; - } -- vmx_hwapic_irr_update(vcpu, max_irr); -+ -+ /* -+ * Newly recognized interrupts are injected via either virtual interrupt -+ * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is -+ * disabled in two cases: -+ * -+ * 1) If L2 is running and the vCPU has a new pending interrupt. If L1 -+ * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a -+ * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected -+ * into L2, but KVM doesn't use virtual interrupt delivery to inject -+ * interrupts into L2, and so KVM_REQ_EVENT is again needed. -+ * -+ * 2) If APICv is disabled for this vCPU, assigned devices may still -+ * attempt to post interrupts. The posted interrupt vector will cause -+ * a VM-Exit and the subsequent entry will call sync_pir_to_irr. -+ */ -+ if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu)) -+ vmx_set_rvi(max_irr); -+ else if (got_posted_interrupt) -+ kvm_make_request(KVM_REQ_EVENT, vcpu); -+ - return max_irr; - } - -@@ -6375,6 +6502,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) - return; - - handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc)); -+ vcpu->arch.at_instruction_boundary = true; - } - - static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) -@@ -6576,6 +6704,31 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) - } - } - -+void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, -+ unsigned int flags) -+{ -+ u64 hostval = this_cpu_read(x86_spec_ctrl_current); -+ -+ if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) -+ return; -+ -+ if (flags & VMX_RUN_SAVE_SPEC_CTRL) -+ vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL); -+ -+ /* -+ * If the guest/host SPEC_CTRL values differ, restore the host value. -+ * -+ * For legacy IBRS, the IBRS bit always needs to be written after -+ * transitioning from a less privileged predictor mode, regardless of -+ * whether the guest/host values differ. -+ */ -+ if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) || -+ vmx->spec_ctrl != hostval) -+ native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval); -+ -+ barrier_nospec(); -+} -+ - static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) - { - switch (to_vmx(vcpu)->exit_reason.basic) { -@@ -6589,7 +6742,8 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) - } - - static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, -- struct vcpu_vmx *vmx) -+ struct vcpu_vmx *vmx, -+ unsigned long flags) - { - kvm_guest_enter_irqoff(); - -@@ -6598,15 +6752,22 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, - vmx_l1d_flush(vcpu); - else if (static_branch_unlikely(&mds_user_clear)) - mds_clear_cpu_buffers(); -+ else if (static_branch_unlikely(&mmio_stale_data_clear) && -+ kvm_arch_has_assigned_device(vcpu->kvm)) -+ mds_clear_cpu_buffers(); -+ -+ vmx_disable_fb_clear(vmx); - - if (vcpu->arch.cr2 != native_read_cr2()) - native_write_cr2(vcpu->arch.cr2); - - vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, -- vmx->loaded_vmcs->launched); -+ flags); - - vcpu->arch.cr2 = native_read_cr2(); - -+ vmx_enable_fb_clear(vmx); -+ - kvm_guest_exit_irqoff(); - } - -@@ -6626,9 +6787,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) - * consistency check VM-Exit due to invalid guest state and bail. - */ - if (unlikely(vmx->emulation_required)) { -- -- /* We don't emulate invalid state of a nested guest */ -- vmx->fail = is_guest_mode(vcpu); -+ vmx->fail = 0; - - vmx->exit_reason.full = EXIT_REASON_INVALID_STATE; - vmx->exit_reason.failed_vmentry = 1; -@@ -6703,27 +6862,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) - x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); - - /* The actual VMENTER/EXIT is in the .noinstr.text section. */ -- vmx_vcpu_enter_exit(vcpu, vmx); -- -- /* -- * We do not use IBRS in the kernel. If this vCPU has used the -- * SPEC_CTRL MSR it may have left it on; save the value and -- * turn it off. This is much more efficient than blindly adding -- * it to the atomic save/restore list. Especially as the former -- * (Saving guest MSRs on vmexit) doesn't even exist in KVM. -- * -- * For non-nested case: -- * If the L01 MSR bitmap does not intercept the MSR, then we need to -- * save it. -- * -- * For nested case: -- * If the L02 MSR bitmap does not intercept the MSR, then we need to -- * save it. -- */ -- if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) -- vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); -- -- x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); -+ vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx)); - - /* All fields are clean at this point */ - if (static_branch_unlikely(&enable_evmcs)) { -@@ -6853,6 +6992,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) - if (err < 0) - goto free_pml; - -+ /* -+ * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a -+ * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the -+ * feature only for vmcs01, KVM currently isn't equipped to realize any -+ * performance benefits from enabling it for vmcs02. -+ */ -+ if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) && -+ (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { -+ struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; -+ -+ evmcs->hv_enlightenments_control.msr_bitmap = 1; -+ } -+ - /* The MSR bitmap starts with all ones */ - bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS); - bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); -@@ -7359,6 +7511,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu, - /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ - break; - -+ case x86_intercept_pause: -+ /* -+ * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides -+ * with vanilla NOPs in the emulator. Apply the interception -+ * check only to actual PAUSE instructions. Don't check -+ * PAUSE-loop-exiting, software can't expect a given PAUSE to -+ * exit, i.e. KVM is within its rights to allow L2 to execute -+ * the PAUSE. -+ */ -+ if ((info->rep_prefix != REPE_PREFIX) || -+ !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING)) -+ return X86EMUL_CONTINUE; -+ -+ break; -+ - /* TODO: check more intercepts... */ - default: - break; -@@ -7463,17 +7630,11 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu) - if (pi_pre_block(vcpu)) - return 1; - -- if (kvm_lapic_hv_timer_in_use(vcpu)) -- kvm_lapic_switch_to_sw_timer(vcpu); -- - return 0; - } - - static void vmx_post_block(struct kvm_vcpu *vcpu) - { -- if (kvm_x86_ops.set_hv_timer) -- kvm_lapic_switch_to_hv_timer(vcpu); -- - pi_post_block(vcpu); - } - -@@ -7524,6 +7685,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) - if (ret) - return ret; - -+ vmx->nested.nested_run_pending = 1; - vmx->nested.smm.guest_mode = false; - } - return 0; -@@ -7551,6 +7713,8 @@ static void vmx_migrate_timers(struct kvm_vcpu *vcpu) - - static void hardware_unsetup(void) - { -+ kvm_set_posted_intr_wakeup_handler(NULL); -+ - if (nested) - nested_vmx_hardware_unsetup(); - -@@ -7593,6 +7757,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { - .set_segment = vmx_set_segment, - .get_cpl = vmx_get_cpl, - .get_cs_db_l_bits = vmx_get_cs_db_l_bits, -+ .is_valid_cr0 = vmx_is_valid_cr0, - .set_cr0 = vmx_set_cr0, - .is_valid_cr4 = vmx_is_valid_cr4, - .set_cr4 = vmx_set_cr4, -@@ -7606,6 +7771,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { - .cache_reg = vmx_cache_reg, - .get_rflags = vmx_get_rflags, - .set_rflags = vmx_set_rflags, -+ .get_if_flag = vmx_get_if_flag, - - .tlb_flush_all = vmx_flush_tlb_all, - .tlb_flush_current = vmx_flush_tlb_current, -@@ -7781,6 +7947,11 @@ static __init int hardware_setup(void) - if (!cpu_has_virtual_nmis()) - enable_vnmi = 0; - -+#ifdef CONFIG_X86_SGX_KVM -+ if (!cpu_has_vmx_encls_vmexit()) -+ enable_sgx = false; -+#endif -+ - /* - * set_apic_access_page_addr() is used to reload apic access - * page upon invalidation. No need to do anything if not -@@ -7809,10 +7980,10 @@ static __init int hardware_setup(void) - ple_window_shrink = 0; - } - -- if (!cpu_has_vmx_apicv()) { -+ if (!cpu_has_vmx_apicv()) - enable_apicv = 0; -+ if (!enable_apicv) - vmx_x86_ops.sync_pir_to_irr = NULL; -- } - - if (cpu_has_vmx_tsc_scaling()) { - kvm_has_tsc_control = true; -@@ -7879,8 +8050,6 @@ static __init int hardware_setup(void) - vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit; - } - -- kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler); -- - kvm_mce_cap_supported |= MCG_LMCE_P; - - if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST) -@@ -7904,6 +8073,9 @@ static __init int hardware_setup(void) - r = alloc_kvm_area(); - if (r) - nested_vmx_hardware_unsetup(); -+ -+ kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler); -+ - return r; - } - -@@ -7912,6 +8084,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = { - .disabled_by_bios = vmx_disabled_by_bios, - .check_processor_compatibility = vmx_check_processor_compat, - .hardware_setup = hardware_setup, -+ .intel_pt_intr_in_guest = vmx_pt_mode_is_host_guest, - - .runtime_ops = &vmx_x86_ops, - }; -@@ -8020,6 +8193,8 @@ static int __init vmx_init(void) - return r; - } - -+ vmx_setup_fb_clear_ctrl(); -+ - for_each_possible_cpu(cpu) { - INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); - -diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h -index 592217fd7d920..20f1213a93685 100644 ---- a/arch/x86/kvm/vmx/vmx.h -+++ b/arch/x86/kvm/vmx/vmx.h -@@ -13,6 +13,7 @@ - #include "vmcs.h" - #include "vmx_ops.h" - #include "cpuid.h" -+#include "run_flags.h" - - #define MSR_TYPE_R 1 - #define MSR_TYPE_W 2 -@@ -91,6 +92,18 @@ union vmx_exit_reason { - u32 full; - }; - -+static inline bool intel_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu) -+{ -+ /* -+ * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is -+ * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is -+ * greater than zero. However, KVM only exposes and emulates the MSR -+ * to/for the guest if the guest PMU supports at least "Architectural -+ * Performance Monitoring Version 2". -+ */ -+ return pmu->version > 1; -+} -+ - #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc) - #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records) - -@@ -164,6 +177,7 @@ struct nested_vmx { - bool change_vmcs01_virtual_apic_mode; - bool reload_vmcs01_apic_access_page; - bool update_vmcs01_cpu_dirty_logging; -+ bool update_vmcs01_apicv_status; - - /* - * Enlightened VMCS has been enabled. It does not mean that L1 has to -@@ -325,6 +339,8 @@ struct vcpu_vmx { - u64 msr_ia32_feature_control_valid_bits; - /* SGX Launch Control public key hash */ - u64 msr_ia32_sgxlepubkeyhash[4]; -+ u64 msr_ia32_mcu_opt_ctrl; -+ bool disable_fb_clear; - - struct pt_desc pt_desc; - struct lbr_desc lbr_desc; -@@ -379,7 +395,10 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); - struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); - void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); - void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); --bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); -+void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags); -+unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx); -+bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, -+ unsigned int flags); - int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); - void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); - -@@ -400,6 +419,69 @@ static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, - - void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); - -+static inline bool vmx_test_msr_bitmap_read(ulong *msr_bitmap, u32 msr) -+{ -+ int f = sizeof(unsigned long); -+ -+ if (msr <= 0x1fff) -+ return test_bit(msr, msr_bitmap + 0x000 / f); -+ else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -+ return test_bit(msr & 0x1fff, msr_bitmap + 0x400 / f); -+ return true; -+} -+ -+static inline bool vmx_test_msr_bitmap_write(ulong *msr_bitmap, u32 msr) -+{ -+ int f = sizeof(unsigned long); -+ -+ if (msr <= 0x1fff) -+ return test_bit(msr, msr_bitmap + 0x800 / f); -+ else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -+ return test_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f); -+ return true; -+} -+ -+static inline void vmx_clear_msr_bitmap_read(ulong *msr_bitmap, u32 msr) -+{ -+ int f = sizeof(unsigned long); -+ -+ if (msr <= 0x1fff) -+ __clear_bit(msr, msr_bitmap + 0x000 / f); -+ else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -+ __clear_bit(msr & 0x1fff, msr_bitmap + 0x400 / f); -+} -+ -+static inline void vmx_clear_msr_bitmap_write(ulong *msr_bitmap, u32 msr) -+{ -+ int f = sizeof(unsigned long); -+ -+ if (msr <= 0x1fff) -+ __clear_bit(msr, msr_bitmap + 0x800 / f); -+ else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -+ __clear_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f); -+} -+ -+static inline void vmx_set_msr_bitmap_read(ulong *msr_bitmap, u32 msr) -+{ -+ int f = sizeof(unsigned long); -+ -+ if (msr <= 0x1fff) -+ __set_bit(msr, msr_bitmap + 0x000 / f); -+ else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -+ __set_bit(msr & 0x1fff, msr_bitmap + 0x400 / f); -+} -+ -+static inline void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr) -+{ -+ int f = sizeof(unsigned long); -+ -+ if (msr <= 0x1fff) -+ __set_bit(msr, msr_bitmap + 0x800 / f); -+ else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) -+ __set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f); -+} -+ -+ - static inline u8 vmx_get_rvi(void) - { - return vmcs_read16(GUEST_INTR_STATUS) & 0xff; -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index bfe0de3008a60..a26200c3e82b5 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -184,6 +184,10 @@ module_param(force_emulation_prefix, bool, S_IRUGO); - int __read_mostly pi_inject_timer = -1; - module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); - -+/* Enable/disable SMT_RSB bug mitigation */ -+bool __read_mostly mitigate_smt_rsb; -+module_param(mitigate_smt_rsb, bool, 0444); -+ - /* - * Restoring the host value for MSRs that are only consumed when running in - * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU -@@ -277,6 +281,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { - STATS_DESC_COUNTER(VCPU, nested_run), - STATS_DESC_COUNTER(VCPU, directed_yield_attempted), - STATS_DESC_COUNTER(VCPU, directed_yield_successful), -+ STATS_DESC_COUNTER(VCPU, preemption_reported), -+ STATS_DESC_COUNTER(VCPU, preemption_other), - STATS_DESC_ICOUNTER(VCPU, guest_mode) - }; - -@@ -523,6 +529,7 @@ static int exception_class(int vector) - #define EXCPT_TRAP 1 - #define EXCPT_ABORT 2 - #define EXCPT_INTERRUPT 3 -+#define EXCPT_DB 4 - - static int exception_type(int vector) - { -@@ -533,8 +540,14 @@ static int exception_type(int vector) - - mask = 1 << vector; - -- /* #DB is trap, as instruction watchpoints are handled elsewhere */ -- if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) -+ /* -+ * #DBs can be trap-like or fault-like, the caller must check other CPU -+ * state, e.g. DR6, to determine whether a #DB is a trap or fault. -+ */ -+ if (mask & (1 << DB_VECTOR)) -+ return EXCPT_DB; -+ -+ if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR))) - return EXCPT_TRAP; - - if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) -@@ -599,6 +612,12 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) - } - EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); - -+/* Forcibly leave the nested mode in cases like a vCPU reset */ -+static void kvm_leave_nested(struct kvm_vcpu *vcpu) -+{ -+ kvm_x86_ops.nested_ops->leave_nested(vcpu); -+} -+ - static void kvm_multiple_exception(struct kvm_vcpu *vcpu, - unsigned nr, bool has_error, u32 error_code, - bool has_payload, unsigned long payload, bool reinject) -@@ -848,6 +867,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) - - memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); - kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); -+ kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); - vcpu->arch.pdptrs_from_userspace = false; - - out: -@@ -856,6 +876,22 @@ out: - } - EXPORT_SYMBOL_GPL(load_pdptrs); - -+static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) -+{ -+#ifdef CONFIG_X86_64 -+ if (cr0 & 0xffffffff00000000UL) -+ return false; -+#endif -+ -+ if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) -+ return false; -+ -+ if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) -+ return false; -+ -+ return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0); -+} -+ - void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) - { - if ((cr0 ^ old_cr0) & X86_CR0_PG) { -@@ -878,20 +914,13 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) - unsigned long old_cr0 = kvm_read_cr0(vcpu); - unsigned long pdptr_bits = X86_CR0_CD | X86_CR0_NW | X86_CR0_PG; - -- cr0 |= X86_CR0_ET; -- --#ifdef CONFIG_X86_64 -- if (cr0 & 0xffffffff00000000UL) -+ if (!kvm_is_valid_cr0(vcpu, cr0)) - return 1; --#endif - -- cr0 &= ~CR0_RESERVED_BITS; -- -- if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) -- return 1; -+ cr0 |= X86_CR0_ET; - -- if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) -- return 1; -+ /* Write to CR0 reserved bits are ignored, even on Intel. */ -+ cr0 &= ~CR0_RESERVED_BITS; - - #ifdef CONFIG_X86_64 - if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && -@@ -1018,6 +1047,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) - - int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) - { -+ /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */ - if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || - __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { - kvm_inject_gp(vcpu, 0); -@@ -1028,7 +1058,7 @@ int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) - } - EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); - --bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) -+bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) - { - if (cr4 & cr4_reserved_bits) - return false; -@@ -1036,9 +1066,15 @@ bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) - if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) - return false; - -- return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); -+ return true; -+} -+EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4); -+ -+static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) -+{ -+ return __kvm_is_valid_cr4(vcpu, cr4) && -+ static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); - } --EXPORT_SYMBOL_GPL(kvm_is_valid_cr4); - - void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) - { -@@ -1091,6 +1127,18 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) - unsigned long roots_to_free = 0; - int i; - -+ /* -+ * MOV CR3 and INVPCID are usually not intercepted when using TDP, but -+ * this is reachable when running EPT=1 and unrestricted_guest=0, and -+ * also via the emulator. KVM's TDP page tables are not in the scope of -+ * the invalidation, but the guest's TLB entries need to be flushed as -+ * the CPU may have cached entries in its TLB for the target PCID. -+ */ -+ if (unlikely(tdp_enabled)) { -+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); -+ return; -+ } -+ - /* - * If neither the current CR3 nor any of the prev_roots use the given - * PCID, then nothing needs to be done here because a resync will -@@ -1311,27 +1359,17 @@ static const u32 msrs_to_save_all[] = { - MSR_IA32_UMWAIT_CONTROL, - - MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, -- MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3, -+ MSR_ARCH_PERFMON_FIXED_CTR0 + 2, - MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, - MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, - MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, - MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, - MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, - MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, -- MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9, -- MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11, -- MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13, -- MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15, -- MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17, - MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, - MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, - MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, - MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, -- MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9, -- MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11, -- MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, -- MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, -- MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, - - MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, - MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, -@@ -1435,7 +1473,7 @@ static const u32 msr_based_features_all[] = { - MSR_IA32_VMX_EPT_VPID_CAP, - MSR_IA32_VMX_VMFUNC, - -- MSR_F10H_DECFG, -+ MSR_AMD64_DE_CFG, - MSR_IA32_UCODE_REV, - MSR_IA32_ARCH_CAPABILITIES, - MSR_IA32_PERF_CAPABILITIES, -@@ -1444,12 +1482,32 @@ static const u32 msr_based_features_all[] = { - static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; - static unsigned int num_msr_based_features; - -+/* -+ * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM -+ * does not yet virtualize. These include: -+ * 10 - MISC_PACKAGE_CTRLS -+ * 11 - ENERGY_FILTERING_CTL -+ * 12 - DOITM -+ * 18 - FB_CLEAR_CTRL -+ * 21 - XAPIC_DISABLE_STATUS -+ * 23 - OVERCLOCKING_STATUS -+ */ -+ -+#define KVM_SUPPORTED_ARCH_CAP \ -+ (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \ -+ ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \ -+ ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ -+ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ -+ ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO) -+ - static u64 kvm_get_arch_capabilities(void) - { - u64 data = 0; - -- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) -+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { - rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data); -+ data &= KVM_SUPPORTED_ARCH_CAP; -+ } - - /* - * If nx_huge_pages is enabled, KVM's shadow paging will ensure that -@@ -1497,6 +1555,9 @@ static u64 kvm_get_arch_capabilities(void) - */ - } - -+ if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated()) -+ data |= ARCH_CAP_GDS_NO; -+ - return data; - } - -@@ -1592,8 +1653,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) - return r; - } - -- /* Update reserved bits */ -- if ((efer ^ old_efer) & EFER_NX) -+ if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS) - kvm_mmu_reset_context(vcpu); - - return 0; -@@ -3079,17 +3139,20 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) - /* only 0 or all 1s can be written to IA32_MCi_CTL - * some Linux kernels though clear bit 10 in bank 4 to - * workaround a BIOS/GART TBL issue on AMD K8s, ignore -- * this to avoid an uncatched #GP in the guest -+ * this to avoid an uncatched #GP in the guest. -+ * -+ * UNIXWARE clears bit 0 of MC1_CTL to ignore -+ * correctable, single-bit ECC data errors. - */ - if ((offset & 0x3) == 0 && -- data != 0 && (data | (1 << 10)) != ~(u64)0) -- return -1; -+ data != 0 && (data | (1 << 10) | 1) != ~(u64)0) -+ return 1; - - /* MCi_STATUS */ - if (!msr_info->host_initiated && - (offset & 0x3) == 1 && data != 0) { - if (!can_set_mci_status(vcpu)) -- return -1; -+ return 1; - } - - vcpu->arch.mce_banks[offset] = data; -@@ -3193,10 +3256,37 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) - static_call(kvm_x86_tlb_flush_guest)(vcpu); - } - -+ -+static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) -+{ -+ ++vcpu->stat.tlb_flush; -+ static_call(kvm_x86_tlb_flush_current)(vcpu); -+} -+ -+/* -+ * Service "local" TLB flush requests, which are specific to the current MMU -+ * context. In addition to the generic event handling in vcpu_enter_guest(), -+ * TLB flushes that are targeted at an MMU context also need to be serviced -+ * prior before nested VM-Enter/VM-Exit. -+ */ -+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) -+{ -+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) -+ kvm_vcpu_flush_tlb_current(vcpu); -+ -+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) -+ kvm_vcpu_flush_tlb_guest(vcpu); -+} -+EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); -+ - static void record_steal_time(struct kvm_vcpu *vcpu) - { -- struct kvm_host_map map; -- struct kvm_steal_time *st; -+ struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; -+ struct kvm_steal_time __user *st; -+ struct kvm_memslots *slots; -+ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; -+ u64 steal; -+ u32 version; - - if (kvm_xen_msr_enabled(vcpu->kvm)) { - kvm_xen_runstate_set_running(vcpu); -@@ -3206,47 +3296,85 @@ static void record_steal_time(struct kvm_vcpu *vcpu) - if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) - return; - -- /* -EAGAIN is returned in atomic context so we can just return. */ -- if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, -- &map, &vcpu->arch.st.cache, false)) -+ if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) - return; - -- st = map.hva + -- offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); -+ slots = kvm_memslots(vcpu->kvm); -+ -+ if (unlikely(slots->generation != ghc->generation || -+ gpa != ghc->gpa || -+ kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { -+ /* We rely on the fact that it fits in a single page. */ -+ BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); -+ -+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || -+ kvm_is_error_hva(ghc->hva) || !ghc->memslot) -+ return; -+ } - -+ st = (struct kvm_steal_time __user *)ghc->hva; - /* - * Doing a TLB flush here, on the guest's behalf, can avoid - * expensive IPIs. - */ - if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { -- u8 st_preempted = xchg(&st->preempted, 0); -+ u8 st_preempted = 0; -+ int err = -EFAULT; -+ -+ if (!user_access_begin(st, sizeof(*st))) -+ return; -+ -+ asm volatile("1: xchgb %0, %2\n" -+ "xor %1, %1\n" -+ "2:\n" -+ _ASM_EXTABLE_UA(1b, 2b) -+ : "+q" (st_preempted), -+ "+&r" (err), -+ "+m" (st->preempted)); -+ if (err) -+ goto out; -+ -+ user_access_end(); -+ -+ vcpu->arch.st.preempted = 0; - - trace_kvm_pv_tlb_flush(vcpu->vcpu_id, - st_preempted & KVM_VCPU_FLUSH_TLB); - if (st_preempted & KVM_VCPU_FLUSH_TLB) - kvm_vcpu_flush_tlb_guest(vcpu); -+ -+ if (!user_access_begin(st, sizeof(*st))) -+ goto dirty; - } else { -- st->preempted = 0; -- } -+ if (!user_access_begin(st, sizeof(*st))) -+ return; - -- vcpu->arch.st.preempted = 0; -+ unsafe_put_user(0, &st->preempted, out); -+ vcpu->arch.st.preempted = 0; -+ } - -- if (st->version & 1) -- st->version += 1; /* first time write, random junk */ -+ unsafe_get_user(version, &st->version, out); -+ if (version & 1) -+ version += 1; /* first time write, random junk */ - -- st->version += 1; -+ version += 1; -+ unsafe_put_user(version, &st->version, out); - - smp_wmb(); - -- st->steal += current->sched_info.run_delay - -+ unsafe_get_user(steal, &st->steal, out); -+ steal += current->sched_info.run_delay - - vcpu->arch.st.last_steal; - vcpu->arch.st.last_steal = current->sched_info.run_delay; -+ unsafe_put_user(steal, &st->steal, out); - -- smp_wmb(); -- -- st->version += 1; -+ version += 1; -+ unsafe_put_user(version, &st->version, out); - -- kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); -+ out: -+ user_access_end(); -+ dirty: -+ mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); - } - - int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) -@@ -3282,7 +3410,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) - - if (!msr_info->host_initiated) - return 1; -- if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent)) -+ if (kvm_get_msr_feature(&msr_ent)) - return 1; - if (data & ~msr_ent.data) - return 1; -@@ -3376,6 +3504,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) - if (data & ~supported_xss) - return 1; - vcpu->arch.ia32_xss = data; -+ kvm_update_cpuid_runtime(vcpu); - break; - case MSR_SMI_COUNT: - if (!msr_info->host_initiated) -@@ -4051,10 +4180,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) - r = KVM_CLOCK_TSC_STABLE; - break; - case KVM_CAP_X86_DISABLE_EXITS: -- r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE | -- KVM_X86_DISABLE_EXITS_CSTATE; -- if(kvm_can_mwait_in_guest()) -- r |= KVM_X86_DISABLE_EXITS_MWAIT; -+ r = KVM_X86_DISABLE_EXITS_PAUSE; -+ -+ if (!mitigate_smt_rsb) { -+ r |= KVM_X86_DISABLE_EXITS_HLT | -+ KVM_X86_DISABLE_EXITS_CSTATE; -+ -+ if (kvm_can_mwait_in_guest()) -+ r |= KVM_X86_DISABLE_EXITS_MWAIT; -+ } - break; - case KVM_CAP_X86_SMM: - /* SMBASE is usually relocated above 1M on modern chipsets, -@@ -4285,44 +4419,70 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) - - static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) - { -- struct kvm_host_map map; -- struct kvm_steal_time *st; -+ struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; -+ struct kvm_steal_time __user *st; -+ struct kvm_memslots *slots; -+ static const u8 preempted = KVM_VCPU_PREEMPTED; -+ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; -+ -+ /* -+ * The vCPU can be marked preempted if and only if the VM-Exit was on -+ * an instruction boundary and will not trigger guest emulation of any -+ * kind (see vcpu_run). Vendor specific code controls (conservatively) -+ * when this is true, for example allowing the vCPU to be marked -+ * preempted if and only if the VM-Exit was due to a host interrupt. -+ */ -+ if (!vcpu->arch.at_instruction_boundary) { -+ vcpu->stat.preemption_other++; -+ return; -+ } - -+ vcpu->stat.preemption_reported++; - if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) - return; - - if (vcpu->arch.st.preempted) - return; - -- if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, -- &vcpu->arch.st.cache, true)) -+ /* This happens on process exit */ -+ if (unlikely(current->mm != vcpu->kvm->mm)) -+ return; -+ -+ slots = kvm_memslots(vcpu->kvm); -+ -+ if (unlikely(slots->generation != ghc->generation || -+ gpa != ghc->gpa || -+ kvm_is_error_hva(ghc->hva) || !ghc->memslot)) - return; - -- st = map.hva + -- offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); -+ st = (struct kvm_steal_time __user *)ghc->hva; -+ BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); - -- st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; -+ if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) -+ vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; - -- kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); -+ mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); - } - - void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) - { - int idx; - -- if (vcpu->preempted && !vcpu->arch.guest_state_protected) -- vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); -+ if (vcpu->preempted) { -+ if (!vcpu->arch.guest_state_protected) -+ vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); - -- /* -- * Take the srcu lock as memslots will be accessed to check the gfn -- * cache generation against the memslots generation. -- */ -- idx = srcu_read_lock(&vcpu->kvm->srcu); -- if (kvm_xen_msr_enabled(vcpu->kvm)) -- kvm_xen_runstate_set_preempted(vcpu); -- else -- kvm_steal_time_set_preempted(vcpu); -- srcu_read_unlock(&vcpu->kvm->srcu, idx); -+ /* -+ * Take the srcu lock as memslots will be accessed to check the gfn -+ * cache generation against the memslots generation. -+ */ -+ idx = srcu_read_lock(&vcpu->kvm->srcu); -+ if (kvm_xen_msr_enabled(vcpu->kvm)) -+ kvm_xen_runstate_set_preempted(vcpu); -+ else -+ kvm_steal_time_set_preempted(vcpu); -+ srcu_read_unlock(&vcpu->kvm->srcu, idx); -+ } - - static_call(kvm_x86_vcpu_put)(vcpu); - vcpu->arch.last_host_tsc = rdtsc(); -@@ -4331,8 +4491,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) - static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, - struct kvm_lapic_state *s) - { -- if (vcpu->arch.apicv_active) -- static_call(kvm_x86_sync_pir_to_irr)(vcpu); -+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); - - return kvm_apic_get_state(vcpu, s); - } -@@ -4642,8 +4801,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, - vcpu->arch.apic->sipi_vector = events->sipi_vector; - - if (events->flags & KVM_VCPUEVENT_VALID_SMM) { -- if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) -+ if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { -+ kvm_leave_nested(vcpu); - kvm_smm_changed(vcpu, events->smi.smm); -+ } - - vcpu->arch.smi_pending = events->smi.pending; - -@@ -4672,12 +4833,11 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, - { - unsigned long val; - -+ memset(dbgregs, 0, sizeof(*dbgregs)); - memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); - kvm_get_dr(vcpu, 6, &val); - dbgregs->dr6 = val; - dbgregs->dr7 = vcpu->arch.dr7; -- dbgregs->flags = 0; -- memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); - } - - static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, -@@ -5606,15 +5766,26 @@ split_irqchip_unlock: - if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) - break; - -- if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && -- kvm_can_mwait_in_guest()) -- kvm->arch.mwait_in_guest = true; -- if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) -- kvm->arch.hlt_in_guest = true; - if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) - kvm->arch.pause_in_guest = true; -- if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) -- kvm->arch.cstate_in_guest = true; -+ -+#define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \ -+ "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests." -+ -+ if (!mitigate_smt_rsb) { -+ if (boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() && -+ (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE)) -+ pr_warn_once(SMT_RSB_MSG); -+ -+ if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && -+ kvm_can_mwait_in_guest()) -+ kvm->arch.mwait_in_guest = true; -+ if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) -+ kvm->arch.hlt_in_guest = true; -+ if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) -+ kvm->arch.cstate_in_guest = true; -+ } -+ - r = 0; - break; - case KVM_CAP_MSR_PLATFORM_INFO: -@@ -5626,6 +5797,11 @@ split_irqchip_unlock: - r = 0; - break; - case KVM_CAP_X86_USER_SPACE_MSR: -+ r = -EINVAL; -+ if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | -+ KVM_MSR_EXIT_REASON_UNKNOWN | -+ KVM_MSR_EXIT_REASON_FILTER)) -+ break; - kvm->arch.user_space_msr_mask = cap->args[0]; - r = 0; - break; -@@ -5746,23 +5922,22 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, - return 0; - } - --static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) -+static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, -+ struct kvm_msr_filter *filter) - { -- struct kvm_msr_filter __user *user_msr_filter = argp; - struct kvm_x86_msr_filter *new_filter, *old_filter; -- struct kvm_msr_filter filter; - bool default_allow; - bool empty = true; - int r = 0; - u32 i; - -- if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) -- return -EFAULT; -+ if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) -+ return -EINVAL; - -- for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) -- empty &= !filter.ranges[i].nmsrs; -+ for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) -+ empty &= !filter->ranges[i].nmsrs; - -- default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY); -+ default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); - if (empty && !default_allow) - return -EINVAL; - -@@ -5770,8 +5945,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) - if (!new_filter) - return -ENOMEM; - -- for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { -- r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); -+ for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { -+ r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); - if (r) { - kvm_free_msr_filter(new_filter); - return r; -@@ -5794,6 +5969,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) - return 0; - } - -+#ifdef CONFIG_KVM_COMPAT -+/* for KVM_X86_SET_MSR_FILTER */ -+struct kvm_msr_filter_range_compat { -+ __u32 flags; -+ __u32 nmsrs; -+ __u32 base; -+ __u32 bitmap; -+}; -+ -+struct kvm_msr_filter_compat { -+ __u32 flags; -+ struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES]; -+}; -+ -+#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat) -+ -+long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, -+ unsigned long arg) -+{ -+ void __user *argp = (void __user *)arg; -+ struct kvm *kvm = filp->private_data; -+ long r = -ENOTTY; -+ -+ switch (ioctl) { -+ case KVM_X86_SET_MSR_FILTER_COMPAT: { -+ struct kvm_msr_filter __user *user_msr_filter = argp; -+ struct kvm_msr_filter_compat filter_compat; -+ struct kvm_msr_filter filter; -+ int i; -+ -+ if (copy_from_user(&filter_compat, user_msr_filter, -+ sizeof(filter_compat))) -+ return -EFAULT; -+ -+ filter.flags = filter_compat.flags; -+ for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { -+ struct kvm_msr_filter_range_compat *cr; -+ -+ cr = &filter_compat.ranges[i]; -+ filter.ranges[i] = (struct kvm_msr_filter_range) { -+ .flags = cr->flags, -+ .nmsrs = cr->nmsrs, -+ .base = cr->base, -+ .bitmap = (__u8 *)(ulong)cr->bitmap, -+ }; -+ } -+ -+ r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); -+ break; -+ } -+ } -+ -+ return r; -+} -+#endif -+ - #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER - static int kvm_arch_suspend_notifier(struct kvm *kvm) - { -@@ -6168,9 +6399,16 @@ set_pit2_out: - case KVM_SET_PMU_EVENT_FILTER: - r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); - break; -- case KVM_X86_SET_MSR_FILTER: -- r = kvm_vm_ioctl_set_msr_filter(kvm, argp); -+ case KVM_X86_SET_MSR_FILTER: { -+ struct kvm_msr_filter __user *user_msr_filter = argp; -+ struct kvm_msr_filter filter; -+ -+ if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) -+ return -EFAULT; -+ -+ r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); - break; -+ } - default: - r = -ENOTTY; - } -@@ -6238,12 +6476,12 @@ static void kvm_init_msr_list(void) - intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) - continue; - break; -- case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17: -+ case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 7: - if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= - min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) - continue; - break; -- case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17: -+ case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 7: - if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= - min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) - continue; -@@ -6803,15 +7041,8 @@ static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, - exception, &write_emultor); - } - --#define CMPXCHG_TYPE(t, ptr, old, new) \ -- (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) -- --#ifdef CONFIG_X86_64 --# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) --#else --# define CMPXCHG64(ptr, old, new) \ -- (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) --#endif -+#define emulator_try_cmpxchg_user(t, ptr, old, new) \ -+ (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t)) - - static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, - unsigned long addr, -@@ -6820,12 +7051,11 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, - unsigned int bytes, - struct x86_exception *exception) - { -- struct kvm_host_map map; - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - u64 page_line_mask; -+ unsigned long hva; - gpa_t gpa; -- char *kaddr; -- bool exchanged; -+ int r; - - /* guests cmpxchg8b have to be emulated atomically */ - if (bytes > 8 || (bytes & (bytes - 1))) -@@ -6849,31 +7079,32 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, - if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) - goto emul_write; - -- if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map)) -+ hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); -+ if (kvm_is_error_hva(hva)) - goto emul_write; - -- kaddr = map.hva + offset_in_page(gpa); -+ hva += offset_in_page(gpa); - - switch (bytes) { - case 1: -- exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); -+ r = emulator_try_cmpxchg_user(u8, hva, old, new); - break; - case 2: -- exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); -+ r = emulator_try_cmpxchg_user(u16, hva, old, new); - break; - case 4: -- exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); -+ r = emulator_try_cmpxchg_user(u32, hva, old, new); - break; - case 8: -- exchanged = CMPXCHG64(kaddr, old, new); -+ r = emulator_try_cmpxchg_user(u64, hva, old, new); - break; - default: - BUG(); - } - -- kvm_vcpu_unmap(vcpu, &map, true); -- -- if (!exchanged) -+ if (r < 0) -+ goto emul_write; -+ if (r) - return X86EMUL_CMPXCHG_FAILED; - - kvm_page_track_write(vcpu, gpa, new, bytes); -@@ -6948,7 +7179,13 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, - unsigned short port, void *val, unsigned int count) - { - if (vcpu->arch.pio.count) { -- /* Complete previous iteration. */ -+ /* -+ * Complete a previous iteration that required userspace I/O. -+ * Note, @count isn't guaranteed to match pio.count as userspace -+ * can modify ECX before rerunning the vCPU. Ignore any such -+ * shenanigans as KVM doesn't support modifying the rep count, -+ * and the emulator ensures @count doesn't overflow the buffer. -+ */ - } else { - int r = __emulator_pio_in(vcpu, size, port, count); - if (!r) -@@ -6957,7 +7194,6 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, - /* Results already available, fall through. */ - } - -- WARN_ON(count != vcpu->arch.pio.count); - complete_emulator_pio_in(vcpu, val); - return 1; - } -@@ -7300,6 +7536,11 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) - return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); - } - -+static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) -+{ -+ return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); -+} -+ - static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) - { - return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); -@@ -7382,6 +7623,7 @@ static const struct x86_emulate_ops emulate_ops = { - .guest_has_long_mode = emulator_guest_has_long_mode, - .guest_has_movbe = emulator_guest_has_movbe, - .guest_has_fxsr = emulator_guest_has_fxsr, -+ .guest_has_rdpid = emulator_guest_has_rdpid, - .set_nmi_mask = emulator_set_nmi_mask, - .get_hflags = emulator_get_hflags, - .exiting_smm = emulator_exiting_smm, -@@ -7747,7 +7989,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) - } - EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); - --static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) -+static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r) - { - if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && - (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { -@@ -7816,25 +8058,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) - } - - /* -- * Decode to be emulated instruction. Return EMULATION_OK if success. -+ * Decode an instruction for emulation. The caller is responsible for handling -+ * code breakpoints. Note, manually detecting code breakpoints is unnecessary -+ * (and wrong) when emulating on an intercepted fault-like exception[*], as -+ * code breakpoints have higher priority and thus have already been done by -+ * hardware. -+ * -+ * [*] Except #MC, which is higher priority, but KVM should never emulate in -+ * response to a machine check. - */ - int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, - void *insn, int insn_len) - { -- int r = EMULATION_OK; - struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; -+ int r; - - init_emulate_ctxt(vcpu); - -- /* -- * We will reenter on the same instruction since we do not set -- * complete_userspace_io. This does not handle watchpoints yet, -- * those would be handled in the emulate_ops. -- */ -- if (!(emulation_type & EMULTYPE_SKIP) && -- kvm_vcpu_check_breakpoint(vcpu, &r)) -- return r; -- - r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); - - trace_kvm_emulate_insn_start(vcpu); -@@ -7867,6 +8107,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - if (!(emulation_type & EMULTYPE_NO_DECODE)) { - kvm_clear_exception_queue(vcpu); - -+ /* -+ * Return immediately if RIP hits a code breakpoint, such #DBs -+ * are fault-like and are higher priority than any faults on -+ * the code fetch itself. -+ */ -+ if (!(emulation_type & EMULTYPE_SKIP) && -+ kvm_vcpu_check_code_breakpoint(vcpu, &r)) -+ return r; -+ - r = x86_decode_emulated_instruction(vcpu, emulation_type, - insn, insn_len); - if (r != EMULATION_OK) { -@@ -7879,7 +8128,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - write_fault_to_spt, - emulation_type)) - return 1; -- if (ctxt->have_exception) { -+ -+ if (ctxt->have_exception && -+ !(emulation_type & EMULTYPE_SKIP)) { - /* - * #UD should result in just EMULATION_FAILED, and trap-like - * exception should not be encountered during decode. -@@ -7905,7 +8156,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - * updating interruptibility state and injecting single-step #DBs. - */ - if (emulation_type & EMULTYPE_SKIP) { -- kvm_rip_write(vcpu, ctxt->_eip); -+ if (ctxt->mode != X86EMUL_MODE_PROT64) -+ ctxt->eip = (u32)ctxt->_eip; -+ else -+ ctxt->eip = ctxt->_eip; -+ -+ kvm_rip_write(vcpu, ctxt->eip); - if (ctxt->eflags & X86_EFLAGS_RF) - kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); - return 1; -@@ -7969,6 +8225,9 @@ restart: - writeback = false; - r = 0; - vcpu->arch.complete_userspace_io = complete_emulated_mmio; -+ } else if (vcpu->arch.complete_userspace_io) { -+ writeback = false; -+ r = 0; - } else if (r == EMULATION_RESTART) - goto restart; - else -@@ -7978,6 +8237,12 @@ restart: - unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); - toggle_interruptibility(vcpu, ctxt->interruptibility); - vcpu->arch.emulate_regs_need_sync_to_vcpu = false; -+ -+ /* -+ * Note, EXCPT_DB is assumed to be fault-like as the emulator -+ * only supports code breakpoints and general detect #DB, both -+ * of which are fault-like. -+ */ - if (!ctxt->have_exception || - exception_type(ctxt->exception.vector) == EXCPT_TRAP) { - kvm_rip_write(vcpu, ctxt->eip); -@@ -8340,7 +8605,7 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { - .is_in_guest = kvm_is_in_guest, - .is_user_mode = kvm_is_user_mode, - .get_guest_ip = kvm_get_guest_ip, -- .handle_intel_pt_intr = kvm_handle_intel_pt_intr, -+ .handle_intel_pt_intr = NULL, - }; - - #ifdef CONFIG_X86_64 -@@ -8455,14 +8720,12 @@ int kvm_arch_init(void *opaque) - } - kvm_nr_uret_msrs = 0; - -- r = kvm_mmu_module_init(); -+ r = kvm_mmu_vendor_module_init(); - if (r) - goto out_free_percpu; - - kvm_timer_init(); - -- perf_register_guest_info_callbacks(&kvm_guest_cbs); -- - if (boot_cpu_has(X86_FEATURE_XSAVE)) { - host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); - supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; -@@ -8496,7 +8759,6 @@ void kvm_arch_exit(void) - clear_hv_tscchange_cb(); - #endif - kvm_lapic_exit(); -- perf_unregister_guest_info_callbacks(&kvm_guest_cbs); - - if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) - cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, -@@ -8508,7 +8770,7 @@ void kvm_arch_exit(void) - cancel_work_sync(&pvclock_gtod_work); - #endif - kvm_x86_ops.hardware_enable = NULL; -- kvm_mmu_module_exit(); -+ kvm_mmu_vendor_module_exit(); - free_percpu(user_return_msrs); - kmem_cache_destroy(x86_emulator_cache); - kmem_cache_destroy(x86_fpu_cache); -@@ -8567,6 +8829,13 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, - if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) - return -KVM_EOPNOTSUPP; - -+ /* -+ * When tsc is in permanent catchup mode guests won't be able to use -+ * pvclock_read_retry loop to get consistent view of pvclock -+ */ -+ if (vcpu->arch.tsc_always_catchup) -+ return -KVM_EOPNOTSUPP; -+ - if (!kvm_get_walltime_and_clockread(&ts, &cycle)) - return -KVM_EOPNOTSUPP; - -@@ -8592,15 +8861,17 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, - */ - static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) - { -- struct kvm_lapic_irq lapic_irq; -- -- lapic_irq.shorthand = APIC_DEST_NOSHORT; -- lapic_irq.dest_mode = APIC_DEST_PHYSICAL; -- lapic_irq.level = 0; -- lapic_irq.dest_id = apicid; -- lapic_irq.msi_redir_hint = false; -+ /* -+ * All other fields are unused for APIC_DM_REMRD, but may be consumed by -+ * common code, e.g. for tracing. Defer initialization to the compiler. -+ */ -+ struct kvm_lapic_irq lapic_irq = { -+ .delivery_mode = APIC_DM_REMRD, -+ .dest_mode = APIC_DEST_PHYSICAL, -+ .shorthand = APIC_DEST_NOSHORT, -+ .dest_id = apicid, -+ }; - -- lapic_irq.delivery_mode = APIC_DM_REMRD; - kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); - } - -@@ -8686,7 +8957,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) - - trace_kvm_hypercall(nr, a0, a1, a2, a3); - -- op_64_bit = is_64_bit_mode(vcpu); -+ op_64_bit = is_64_bit_hypercall(vcpu); - if (!op_64_bit) { - nr &= 0xFFFFFFFF; - a0 &= 0xFFFFFFFF; -@@ -8790,14 +9061,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) - { - struct kvm_run *kvm_run = vcpu->run; - -- /* -- * if_flag is obsolete and useless, so do not bother -- * setting it for SEV-ES guests. Userspace can just -- * use kvm_run->ready_for_interrupt_injection. -- */ -- kvm_run->if_flag = !vcpu->arch.guest_state_protected -- && (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; -- -+ kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); - kvm_run->cr8 = kvm_get_cr8(vcpu); - kvm_run->apic_base = kvm_get_apic_base(vcpu); - -@@ -8855,6 +9119,11 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu) - - static void kvm_inject_exception(struct kvm_vcpu *vcpu) - { -+ trace_kvm_inj_exception(vcpu->arch.exception.nr, -+ vcpu->arch.exception.has_error_code, -+ vcpu->arch.exception.error_code, -+ vcpu->arch.exception.injected); -+ - if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) - vcpu->arch.exception.error_code = false; - static_call(kvm_x86_queue_exception)(vcpu); -@@ -8912,13 +9181,16 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) - - /* try to inject new event if pending */ - if (vcpu->arch.exception.pending) { -- trace_kvm_inj_exception(vcpu->arch.exception.nr, -- vcpu->arch.exception.has_error_code, -- vcpu->arch.exception.error_code); -- -- vcpu->arch.exception.pending = false; -- vcpu->arch.exception.injected = true; -- -+ /* -+ * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS -+ * value pushed on the stack. Trap-like exception and all #DBs -+ * leave RF as-is (KVM follows Intel's behavior in this regard; -+ * AMD states that code breakpoint #DBs excplitly clear RF=0). -+ * -+ * Note, most versions of Intel's SDM and AMD's APM incorrectly -+ * describe the behavior of General Detect #DBs, which are -+ * fault-like. They do _not_ set RF, a la code breakpoints. -+ */ - if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) - __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | - X86_EFLAGS_RF); -@@ -8932,6 +9204,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) - } - - kvm_inject_exception(vcpu); -+ -+ vcpu->arch.exception.pending = false; -+ vcpu->arch.exception.injected = true; -+ - can_inject = false; - } - -@@ -9359,8 +9635,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) - if (irqchip_split(vcpu->kvm)) - kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); - else { -- if (vcpu->arch.apicv_active) -- static_call(kvm_x86_sync_pir_to_irr)(vcpu); -+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); - if (ioapic_in_kernel(vcpu->kvm)) - kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); - } -@@ -9378,12 +9653,16 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) - if (!kvm_apic_hw_enabled(vcpu->arch.apic)) - return; - -- if (to_hv_vcpu(vcpu)) -+ if (to_hv_vcpu(vcpu)) { - bitmap_or((ulong *)eoi_exit_bitmap, - vcpu->arch.ioapic_handled_vectors, - to_hv_synic(vcpu)->vec_bitmap, 256); -+ static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); -+ return; -+ } - -- static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); -+ static_call(kvm_x86_load_eoi_exitmap)( -+ vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); - } - - void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, -@@ -9400,6 +9679,11 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); - } - -+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) -+{ -+ static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); -+} -+ - void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) - { - if (!lapic_in_kernel(vcpu)) -@@ -9475,10 +9759,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) - /* Flushing all ASIDs flushes the current ASID... */ - kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); - } -- if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) -- kvm_vcpu_flush_tlb_current(vcpu); -- if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) -- kvm_vcpu_flush_tlb_guest(vcpu); -+ kvm_service_local_tlb_flush_requests(vcpu); - - if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { - vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; -@@ -9629,10 +9910,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) - - /* - * This handles the case where a posted interrupt was -- * notified with kvm_vcpu_kick. -+ * notified with kvm_vcpu_kick. Assigned devices can -+ * use the POSTED_INTR_VECTOR even if APICv is disabled, -+ * so do it even if APICv is disabled on this vCPU. - */ -- if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) -- static_call(kvm_x86_sync_pir_to_irr)(vcpu); -+ if (kvm_lapic_enabled(vcpu)) -+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); - - if (kvm_vcpu_exit_request(vcpu)) { - vcpu->mode = OUTSIDE_GUEST_MODE; -@@ -9668,13 +9951,16 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) - if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) - break; - -- if (vcpu->arch.apicv_active) -- static_call(kvm_x86_sync_pir_to_irr)(vcpu); -+ if (kvm_lapic_enabled(vcpu)) -+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); - - if (unlikely(kvm_vcpu_exit_request(vcpu))) { - exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; - break; - } -+ -+ /* Note, VM-Exits that go down the "slow" path are accounted below. */ -+ ++vcpu->stat.exits; - } - - /* -@@ -9772,12 +10058,28 @@ out: - - static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) - { -+ bool hv_timer; -+ - if (!kvm_arch_vcpu_runnable(vcpu) && - (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { -+ /* -+ * Switch to the software timer before halt-polling/blocking as -+ * the guest's timer may be a break event for the vCPU, and the -+ * hypervisor timer runs only when the CPU is in guest mode. -+ * Switch before halt-polling so that KVM recognizes an expired -+ * timer before blocking. -+ */ -+ hv_timer = kvm_lapic_hv_timer_in_use(vcpu); -+ if (hv_timer) -+ kvm_lapic_switch_to_sw_timer(vcpu); -+ - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - kvm_vcpu_block(vcpu); - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - -+ if (hv_timer) -+ kvm_lapic_switch_to_hv_timer(vcpu); -+ - if (kvm_x86_ops.post_block) - static_call(kvm_x86_post_block)(vcpu); - -@@ -9823,6 +10125,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu) - vcpu->arch.l1tf_flush_l1d = true; - - for (;;) { -+ /* -+ * If another guest vCPU requests a PV TLB flush in the middle -+ * of instruction emulation, the rest of the emulation could -+ * use a stale page translation. Assume that any code after -+ * this point can start executing an instruction. -+ */ -+ vcpu->arch.at_instruction_boundary = false; - if (kvm_vcpu_running(vcpu)) { - r = vcpu_enter_guest(vcpu); - } else { -@@ -10009,6 +10318,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) - r = -EINTR; - goto out; - } -+ /* -+ * It should be impossible for the hypervisor timer to be in -+ * use before KVM has ever run the vCPU. -+ */ -+ WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); - kvm_vcpu_block(vcpu); - if (kvm_apic_accept_events(vcpu) < 0) { - r = 0; -@@ -10341,7 +10655,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) - return false; - } - -- return kvm_is_valid_cr4(vcpu, sregs->cr4); -+ return kvm_is_valid_cr4(vcpu, sregs->cr4) && -+ kvm_is_valid_cr0(vcpu, sregs->cr0); - } - - static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, -@@ -10707,8 +11022,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) - r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); - if (r < 0) - goto fail_mmu_destroy; -- if (kvm_apicv_activated(vcpu->kvm)) -+ -+ /* -+ * Defer evaluating inhibits until the vCPU is first run, as -+ * this vCPU will not get notified of any changes until this -+ * vCPU is visible to other vCPUs (marked online and added to -+ * the set of vCPUs). Opportunistically mark APICv active as -+ * VMX in particularly is highly unlikely to have inhibits. -+ * Ignore the current per-VM APICv state so that vCPU creation -+ * is guaranteed to run with a deterministic value, the request -+ * will ensure the vCPU gets the correct state before VM-Entry. -+ */ -+ if (enable_apicv) { - vcpu->arch.apicv_active = true; -+ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); -+ } - } else - static_branch_inc(&kvm_has_noapic_vcpu); - -@@ -10817,11 +11145,8 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) - - void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) - { -- struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; - int idx; - -- kvm_release_pfn(cache->pfn, cache->dirty, cache); -- - kvmclock_reset(vcpu); - - static_call(kvm_x86_vcpu_free)(vcpu); -@@ -10850,8 +11175,18 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) - unsigned long new_cr0; - u32 eax, dummy; - -+ /* -+ * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's -+ * possible to INIT the vCPU while L2 is active. Force the vCPU back -+ * into L1 as EFER.SVME is cleared on INIT (along with all other EFER -+ * bits), i.e. virtualization is disabled. -+ */ -+ if (is_guest_mode(vcpu)) -+ kvm_leave_nested(vcpu); -+ - kvm_lapic_reset(vcpu, init_event); - -+ WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu)); - vcpu->arch.hflags = 0; - - vcpu->arch.smi_pending = 0; -@@ -10908,7 +11243,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) - - vcpu->arch.msr_misc_features_enables = 0; - -- vcpu->arch.xcr0 = XFEATURE_MASK_FP; -+ __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); -+ __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); - } - - memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); -@@ -10927,8 +11263,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) - eax = 0x600; - kvm_rdx_write(vcpu, eax); - -- vcpu->arch.ia32_xss = 0; -- - static_call(kvm_x86_vcpu_reset)(vcpu, init_event); - - kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); -@@ -11104,6 +11438,10 @@ int kvm_arch_hardware_setup(void *opaque) - memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); - kvm_ops_static_call_update(); - -+ if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest()) -+ kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr; -+ perf_register_guest_info_callbacks(&kvm_guest_cbs); -+ - if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) - supported_xss = 0; - -@@ -11131,6 +11469,9 @@ int kvm_arch_hardware_setup(void *opaque) - - void kvm_arch_hardware_unsetup(void) - { -+ perf_unregister_guest_info_callbacks(&kvm_guest_cbs); -+ kvm_guest_cbs.handle_intel_pt_intr = NULL; -+ - static_call(kvm_x86_hardware_unsetup)(); - } - -@@ -11420,7 +11761,7 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot, - if (slot->arch.rmap[i]) - continue; - -- slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); -+ slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); - if (!slot->arch.rmap[i]) { - memslot_rmap_free(slot); - return -ENOMEM; -@@ -11501,7 +11842,7 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm, - - lpages = __kvm_mmu_slot_lpages(slot, npages, level); - -- linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); -+ linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); - if (!linfo) - goto out_free; - -@@ -12045,9 +12386,9 @@ void kvm_arch_end_assignment(struct kvm *kvm) - } - EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); - --bool kvm_arch_has_assigned_device(struct kvm *kvm) -+bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) - { -- return atomic_read(&kvm->arch.assigned_device_count); -+ return arch_atomic_read(&kvm->arch.assigned_device_count); - } - EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); - -@@ -12509,3 +12850,20 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); - EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); - EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); - EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); -+ -+static int __init kvm_x86_init(void) -+{ -+ kvm_mmu_x86_module_init(); -+ mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible(); -+ return 0; -+} -+module_init(kvm_x86_init); -+ -+static void __exit kvm_x86_exit(void) -+{ -+ /* -+ * If module_init() is implemented, module_exit() must also be -+ * implemented to allow module unload. -+ */ -+} -+module_exit(kvm_x86_exit); -diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h -index 7d66d63dc55a6..cd0c93ec72fad 100644 ---- a/arch/x86/kvm/x86.h -+++ b/arch/x86/kvm/x86.h -@@ -26,7 +26,7 @@ static __always_inline void kvm_guest_enter_irqoff(void) - */ - instrumentation_begin(); - trace_hardirqs_on_prepare(); -- lockdep_hardirqs_on_prepare(CALLER_ADDR0); -+ lockdep_hardirqs_on_prepare(); - instrumentation_end(); - - guest_enter_irqoff(); -@@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val, - - #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL - -+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); - int kvm_check_nested_events(struct kvm_vcpu *vcpu); - - static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) -@@ -153,12 +154,24 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) - { - int cs_db, cs_l; - -+ WARN_ON_ONCE(vcpu->arch.guest_state_protected); -+ - if (!is_long_mode(vcpu)) - return false; - static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); - return cs_l; - } - -+static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu) -+{ -+ /* -+ * If running with protected guest state, the CS register is not -+ * accessible. The hypercall register values will have had to been -+ * provided in 64-bit mode, so assume the guest is in 64-bit. -+ */ -+ return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); -+} -+ - static inline bool x86_exception_has_error_code(unsigned int vector) - { - static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | -@@ -173,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) - return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; - } - --static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) --{ -- ++vcpu->stat.tlb_flush; -- static_call(kvm_x86_tlb_flush_current)(vcpu); --} -- - static inline int is_pae(struct kvm_vcpu *vcpu) - { - return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); -@@ -441,7 +448,7 @@ static inline void kvm_machine_check(void) - void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); - void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); - int kvm_spec_ctrl_test_value(u64 value); --bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); -+bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); - int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, - struct x86_exception *e); - int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); -diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c -index 8f62baebd0286..ab9f88de6deb9 100644 ---- a/arch/x86/kvm/xen.c -+++ b/arch/x86/kvm/xen.c -@@ -93,32 +93,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) - void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) - { - struct kvm_vcpu_xen *vx = &v->arch.xen; -+ struct gfn_to_hva_cache *ghc = &vx->runstate_cache; -+ struct kvm_memslots *slots = kvm_memslots(v->kvm); -+ bool atomic = (state == RUNSTATE_runnable); - uint64_t state_entry_time; -- unsigned int offset; -+ int __user *user_state; -+ uint64_t __user *user_times; - - kvm_xen_update_runstate(v, state); - - if (!vx->runstate_set) - return; - -- BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c); -+ if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) && -+ kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len)) -+ return; -+ -+ /* We made sure it fits in a single page */ -+ BUG_ON(!ghc->memslot); -+ -+ if (atomic) -+ pagefault_disable(); - -- offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time); --#ifdef CONFIG_X86_64 - /* -- * The only difference is alignment of uint64_t in 32-bit. -- * So the first field 'state' is accessed directly using -- * offsetof() (where its offset happens to be zero), while the -- * remaining fields which are all uint64_t, start at 'offset' -- * which we tweak here by adding 4. -+ * The only difference between 32-bit and 64-bit versions of the -+ * runstate struct us the alignment of uint64_t in 32-bit, which -+ * means that the 64-bit version has an additional 4 bytes of -+ * padding after the first field 'state'. -+ * -+ * So we use 'int __user *user_state' to point to the state field, -+ * and 'uint64_t __user *user_times' for runstate_entry_time. So -+ * the actual array of time[] in each state starts at user_times[1]. - */ -+ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0); -+ BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0); -+ user_state = (int __user *)ghc->hva; -+ -+ BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c); -+ -+ user_times = (uint64_t __user *)(ghc->hva + -+ offsetof(struct compat_vcpu_runstate_info, -+ state_entry_time)); -+#ifdef CONFIG_X86_64 - BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) != - offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4); - BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) != - offsetof(struct compat_vcpu_runstate_info, time) + 4); - - if (v->kvm->arch.xen.long_mode) -- offset = offsetof(struct vcpu_runstate_info, state_entry_time); -+ user_times = (uint64_t __user *)(ghc->hva + -+ offsetof(struct vcpu_runstate_info, -+ state_entry_time)); - #endif - /* - * First write the updated state_entry_time at the appropriate -@@ -132,10 +157,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) - BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) != - sizeof(state_entry_time)); - -- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, -- &state_entry_time, offset, -- sizeof(state_entry_time))) -- return; -+ if (__put_user(state_entry_time, user_times)) -+ goto out; - smp_wmb(); - - /* -@@ -149,11 +172,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) - BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) != - sizeof(vx->current_runstate)); - -- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, -- &vx->current_runstate, -- offsetof(struct vcpu_runstate_info, state), -- sizeof(vx->current_runstate))) -- return; -+ if (__put_user(vx->current_runstate, user_state)) -+ goto out; - - /* - * Write the actual runstate times immediately after the -@@ -168,24 +188,23 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) - BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) != - sizeof(vx->runstate_times)); - -- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, -- &vx->runstate_times[0], -- offset + sizeof(u64), -- sizeof(vx->runstate_times))) -- return; -- -+ if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times))) -+ goto out; - smp_wmb(); - - /* - * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's - * runstate_entry_time field. - */ -- - state_entry_time &= ~XEN_RUNSTATE_UPDATE; -- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, -- &state_entry_time, offset, -- sizeof(state_entry_time))) -- return; -+ __put_user(state_entry_time, user_times); -+ smp_wmb(); -+ -+ out: -+ mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); -+ -+ if (atomic) -+ pagefault_enable(); - } - - int __kvm_xen_has_interrupt(struct kvm_vcpu *v) -@@ -299,7 +318,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) - break; - - case KVM_XEN_ATTR_TYPE_SHARED_INFO: -- data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_gfn); -+ data->u.shared_info.gfn = kvm->arch.xen.shinfo_gfn; - r = 0; - break; - -@@ -337,6 +356,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) - break; - } - -+ /* It must fit within a single page */ -+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) { -+ r = -EINVAL; -+ break; -+ } -+ - r = kvm_gfn_to_hva_cache_init(vcpu->kvm, - &vcpu->arch.xen.vcpu_info_cache, - data->u.gpa, -@@ -354,6 +379,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) - break; - } - -+ /* It must fit within a single page */ -+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) { -+ r = -EINVAL; -+ break; -+ } -+ - r = kvm_gfn_to_hva_cache_init(vcpu->kvm, - &vcpu->arch.xen.vcpu_time_info_cache, - data->u.gpa, -@@ -375,6 +406,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) - break; - } - -+ /* It must fit within a single page */ -+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) { -+ r = -EINVAL; -+ break; -+ } -+ - r = kvm_gfn_to_hva_cache_init(vcpu->kvm, - &vcpu->arch.xen.runstate_cache, - data->u.gpa, -@@ -698,7 +735,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) - kvm_hv_hypercall_enabled(vcpu)) - return kvm_hv_hypercall(vcpu); - -- longmode = is_64_bit_mode(vcpu); -+ longmode = is_64_bit_hypercall(vcpu); - if (!longmode) { - params[0] = (u32)kvm_rbx_read(vcpu); - params[1] = (u32)kvm_rcx_read(vcpu); -diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h -index cc0cf5f37450b..a7693a286e401 100644 ---- a/arch/x86/kvm/xen.h -+++ b/arch/x86/kvm/xen.h -@@ -97,8 +97,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) - * behalf of the vCPU. Only if the VMM does actually block - * does it need to enter RUNSTATE_blocked. - */ -- if (vcpu->preempted) -- kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); -+ if (WARN_ON_ONCE(!vcpu->preempted)) -+ return; -+ -+ kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); - } - - /* 32-bit compatibility definitions, also used natively in 32-bit build */ -diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S -index 16bc9130e7a5e..e768815e58ae4 100644 ---- a/arch/x86/lib/atomic64_386_32.S -+++ b/arch/x86/lib/atomic64_386_32.S -@@ -9,81 +9,83 @@ - #include - - /* if you want SMP support, implement these with real spinlocks */ --.macro LOCK reg -+.macro IRQ_SAVE reg - pushfl - cli - .endm - --.macro UNLOCK reg -+.macro IRQ_RESTORE reg - popfl - .endm - --#define BEGIN(op) \ -+#define BEGIN_IRQ_SAVE(op) \ - .macro endp; \ - SYM_FUNC_END(atomic64_##op##_386); \ - .purgem endp; \ - .endm; \ - SYM_FUNC_START(atomic64_##op##_386); \ -- LOCK v; -+ IRQ_SAVE v; - - #define ENDP endp - --#define RET \ -- UNLOCK v; \ -- ret -- --#define RET_ENDP \ -- RET; \ -- ENDP -+#define RET_IRQ_RESTORE \ -+ IRQ_RESTORE v; \ -+ RET - - #define v %ecx --BEGIN(read) -+BEGIN_IRQ_SAVE(read) - movl (v), %eax - movl 4(v), %edx --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %esi --BEGIN(set) -+BEGIN_IRQ_SAVE(set) - movl %ebx, (v) - movl %ecx, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %esi --BEGIN(xchg) -+BEGIN_IRQ_SAVE(xchg) - movl (v), %eax - movl 4(v), %edx - movl %ebx, (v) - movl %ecx, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %ecx --BEGIN(add) -+BEGIN_IRQ_SAVE(add) - addl %eax, (v) - adcl %edx, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %ecx --BEGIN(add_return) -+BEGIN_IRQ_SAVE(add_return) - addl (v), %eax - adcl 4(v), %edx - movl %eax, (v) - movl %edx, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %ecx --BEGIN(sub) -+BEGIN_IRQ_SAVE(sub) - subl %eax, (v) - sbbl %edx, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %ecx --BEGIN(sub_return) -+BEGIN_IRQ_SAVE(sub_return) - negl %edx - negl %eax - sbbl $0, %edx -@@ -91,47 +93,52 @@ BEGIN(sub_return) - adcl 4(v), %edx - movl %eax, (v) - movl %edx, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %esi --BEGIN(inc) -+BEGIN_IRQ_SAVE(inc) - addl $1, (v) - adcl $0, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %esi --BEGIN(inc_return) -+BEGIN_IRQ_SAVE(inc_return) - movl (v), %eax - movl 4(v), %edx - addl $1, %eax - adcl $0, %edx - movl %eax, (v) - movl %edx, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %esi --BEGIN(dec) -+BEGIN_IRQ_SAVE(dec) - subl $1, (v) - sbbl $0, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %esi --BEGIN(dec_return) -+BEGIN_IRQ_SAVE(dec_return) - movl (v), %eax - movl 4(v), %edx - subl $1, %eax - sbbl $0, %edx - movl %eax, (v) - movl %edx, 4(v) --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v - - #define v %esi --BEGIN(add_unless) -+BEGIN_IRQ_SAVE(add_unless) - addl %eax, %ecx - adcl %edx, %edi - addl (v), %eax -@@ -143,7 +150,7 @@ BEGIN(add_unless) - movl %edx, 4(v) - movl $1, %eax - 2: -- RET -+ RET_IRQ_RESTORE - 3: - cmpl %edx, %edi - jne 1b -@@ -153,7 +160,7 @@ ENDP - #undef v - - #define v %esi --BEGIN(inc_not_zero) -+BEGIN_IRQ_SAVE(inc_not_zero) - movl (v), %eax - movl 4(v), %edx - testl %eax, %eax -@@ -165,7 +172,7 @@ BEGIN(inc_not_zero) - movl %edx, 4(v) - movl $1, %eax - 2: -- RET -+ RET_IRQ_RESTORE - 3: - testl %edx, %edx - jne 1b -@@ -174,7 +181,7 @@ ENDP - #undef v - - #define v %esi --BEGIN(dec_if_positive) -+BEGIN_IRQ_SAVE(dec_if_positive) - movl (v), %eax - movl 4(v), %edx - subl $1, %eax -@@ -183,5 +190,6 @@ BEGIN(dec_if_positive) - movl %eax, (v) - movl %edx, 4(v) - 1: --RET_ENDP -+ RET_IRQ_RESTORE -+ENDP - #undef v -diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S -index ce6935690766f..90afb488b396a 100644 ---- a/arch/x86/lib/atomic64_cx8_32.S -+++ b/arch/x86/lib/atomic64_cx8_32.S -@@ -18,7 +18,7 @@ - - SYM_FUNC_START(atomic64_read_cx8) - read64 %ecx -- ret -+ RET - SYM_FUNC_END(atomic64_read_cx8) - - SYM_FUNC_START(atomic64_set_cx8) -@@ -28,7 +28,7 @@ SYM_FUNC_START(atomic64_set_cx8) - cmpxchg8b (%esi) - jne 1b - -- ret -+ RET - SYM_FUNC_END(atomic64_set_cx8) - - SYM_FUNC_START(atomic64_xchg_cx8) -@@ -37,7 +37,7 @@ SYM_FUNC_START(atomic64_xchg_cx8) - cmpxchg8b (%esi) - jne 1b - -- ret -+ RET - SYM_FUNC_END(atomic64_xchg_cx8) - - .macro addsub_return func ins insc -@@ -68,7 +68,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8) - popl %esi - popl %ebx - popl %ebp -- ret -+ RET - SYM_FUNC_END(atomic64_\func\()_return_cx8) - .endm - -@@ -93,7 +93,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8) - movl %ebx, %eax - movl %ecx, %edx - popl %ebx -- ret -+ RET - SYM_FUNC_END(atomic64_\func\()_return_cx8) - .endm - -@@ -118,7 +118,7 @@ SYM_FUNC_START(atomic64_dec_if_positive_cx8) - movl %ebx, %eax - movl %ecx, %edx - popl %ebx -- ret -+ RET - SYM_FUNC_END(atomic64_dec_if_positive_cx8) - - SYM_FUNC_START(atomic64_add_unless_cx8) -@@ -149,7 +149,7 @@ SYM_FUNC_START(atomic64_add_unless_cx8) - addl $8, %esp - popl %ebx - popl %ebp -- ret -+ RET - 4: - cmpl %edx, 4(%esp) - jne 2b -@@ -176,5 +176,5 @@ SYM_FUNC_START(atomic64_inc_not_zero_cx8) - movl $1, %eax - 3: - popl %ebx -- ret -+ RET - SYM_FUNC_END(atomic64_inc_not_zero_cx8) -diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S -index 4304320e51f4d..929ad1747dea0 100644 ---- a/arch/x86/lib/checksum_32.S -+++ b/arch/x86/lib/checksum_32.S -@@ -127,7 +127,7 @@ SYM_FUNC_START(csum_partial) - 8: - popl %ebx - popl %esi -- ret -+ RET - SYM_FUNC_END(csum_partial) - - #else -@@ -245,7 +245,7 @@ SYM_FUNC_START(csum_partial) - 90: - popl %ebx - popl %esi -- ret -+ RET - SYM_FUNC_END(csum_partial) - - #endif -@@ -371,7 +371,7 @@ EXC( movb %cl, (%edi) ) - popl %esi - popl %edi - popl %ecx # equivalent to addl $4,%esp -- ret -+ RET - SYM_FUNC_END(csum_partial_copy_generic) - - #else -@@ -447,7 +447,7 @@ EXC( movb %dl, (%edi) ) - popl %esi - popl %edi - popl %ebx -- ret -+ RET - SYM_FUNC_END(csum_partial_copy_generic) - - #undef ROUND -diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S -index c4c7dd115953c..fe59b8ac4fccd 100644 ---- a/arch/x86/lib/clear_page_64.S -+++ b/arch/x86/lib/clear_page_64.S -@@ -17,7 +17,7 @@ SYM_FUNC_START(clear_page_rep) - movl $4096/8,%ecx - xorl %eax,%eax - rep stosq -- ret -+ RET - SYM_FUNC_END(clear_page_rep) - EXPORT_SYMBOL_GPL(clear_page_rep) - -@@ -39,7 +39,7 @@ SYM_FUNC_START(clear_page_orig) - leaq 64(%rdi),%rdi - jnz .Lloop - nop -- ret -+ RET - SYM_FUNC_END(clear_page_orig) - EXPORT_SYMBOL_GPL(clear_page_orig) - -@@ -47,6 +47,6 @@ SYM_FUNC_START(clear_page_erms) - movl $4096,%ecx - xorl %eax,%eax - rep stosb -- ret -+ RET - SYM_FUNC_END(clear_page_erms) - EXPORT_SYMBOL_GPL(clear_page_erms) -diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S -index 3542502faa3b7..33c70c0160ea0 100644 ---- a/arch/x86/lib/cmpxchg16b_emu.S -+++ b/arch/x86/lib/cmpxchg16b_emu.S -@@ -37,11 +37,11 @@ SYM_FUNC_START(this_cpu_cmpxchg16b_emu) - - popfq - mov $1, %al -- ret -+ RET - - .Lnot_same: - popfq - xor %al,%al -- ret -+ RET - - SYM_FUNC_END(this_cpu_cmpxchg16b_emu) -diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S -index ca01ed6029f4f..6a912d58fecc3 100644 ---- a/arch/x86/lib/cmpxchg8b_emu.S -+++ b/arch/x86/lib/cmpxchg8b_emu.S -@@ -32,7 +32,7 @@ SYM_FUNC_START(cmpxchg8b_emu) - movl %ecx, 4(%esi) - - popfl -- ret -+ RET - - .Lnot_same: - movl (%esi), %eax -@@ -40,7 +40,7 @@ SYM_FUNC_START(cmpxchg8b_emu) - movl 4(%esi), %edx - - popfl -- ret -+ RET - - SYM_FUNC_END(cmpxchg8b_emu) - EXPORT_SYMBOL(cmpxchg8b_emu) -diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S -index e5f77e2930349..2c623a2bbd26e 100644 ---- a/arch/x86/lib/copy_mc_64.S -+++ b/arch/x86/lib/copy_mc_64.S -@@ -77,7 +77,7 @@ SYM_FUNC_START(copy_mc_fragile) - .L_done_memcpy_trap: - xorl %eax, %eax - .L_done: -- ret -+ RET - SYM_FUNC_END(copy_mc_fragile) - - .section .fixup, "ax" -@@ -132,7 +132,7 @@ SYM_FUNC_START(copy_mc_enhanced_fast_string) - rep movsb - /* Copy successful. Return zero */ - xorl %eax, %eax -- ret -+ RET - SYM_FUNC_END(copy_mc_enhanced_fast_string) - - .section .fixup, "ax" -@@ -145,7 +145,7 @@ SYM_FUNC_END(copy_mc_enhanced_fast_string) - * user-copy routines. - */ - movq %rcx, %rax -- ret -+ RET - - .previous - -diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S -index db4b4f9197c7d..30ea644bf446d 100644 ---- a/arch/x86/lib/copy_page_64.S -+++ b/arch/x86/lib/copy_page_64.S -@@ -17,7 +17,7 @@ SYM_FUNC_START(copy_page) - ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD - movl $4096/8, %ecx - rep movsq -- ret -+ RET - SYM_FUNC_END(copy_page) - EXPORT_SYMBOL(copy_page) - -@@ -85,5 +85,5 @@ SYM_FUNC_START_LOCAL(copy_page_regs) - movq (%rsp), %rbx - movq 1*8(%rsp), %r12 - addq $2*8, %rsp -- ret -+ RET - SYM_FUNC_END(copy_page_regs) -diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S -index 57b79c577496d..84cee84fc658a 100644 ---- a/arch/x86/lib/copy_user_64.S -+++ b/arch/x86/lib/copy_user_64.S -@@ -105,7 +105,7 @@ SYM_FUNC_START(copy_user_generic_unrolled) - jnz 21b - 23: xor %eax,%eax - ASM_CLAC -- ret -+ RET - - .section .fixup,"ax" - 30: shll $6,%ecx -@@ -173,7 +173,7 @@ SYM_FUNC_START(copy_user_generic_string) - movsb - xorl %eax,%eax - ASM_CLAC -- ret -+ RET - - .section .fixup,"ax" - 11: leal (%rdx,%rcx,8),%ecx -@@ -207,7 +207,7 @@ SYM_FUNC_START(copy_user_enhanced_fast_string) - movsb - xorl %eax,%eax - ASM_CLAC -- ret -+ RET - - .section .fixup,"ax" - 12: movl %ecx,%edx /* ecx is zerorest also */ -@@ -239,7 +239,7 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) - 1: rep movsb - 2: mov %ecx,%eax - ASM_CLAC -- ret -+ RET - - /* - * Return zero to pretend that this copy succeeded. This -@@ -250,7 +250,7 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) - */ - 3: xorl %eax,%eax - ASM_CLAC -- ret -+ RET - - _ASM_EXTABLE_CPY(1b, 2b) - SYM_CODE_END(.Lcopy_user_handle_tail) -@@ -361,7 +361,7 @@ SYM_FUNC_START(__copy_user_nocache) - xorl %eax,%eax - ASM_CLAC - sfence -- ret -+ RET - - .section .fixup,"ax" - .L_fixup_4x8b_copy: -diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S -index 1fbd8ee9642d1..d9e16a2cf2856 100644 ---- a/arch/x86/lib/csum-copy_64.S -+++ b/arch/x86/lib/csum-copy_64.S -@@ -201,7 +201,7 @@ SYM_FUNC_START(csum_partial_copy_generic) - movq 3*8(%rsp), %r13 - movq 4*8(%rsp), %r15 - addq $5*8, %rsp -- ret -+ RET - .Lshort: - movl %ecx, %r10d - jmp .L1 -diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c -index 65d15df6212d6..0e65d00e2339f 100644 ---- a/arch/x86/lib/delay.c -+++ b/arch/x86/lib/delay.c -@@ -54,8 +54,8 @@ static void delay_loop(u64 __loops) - " jnz 2b \n" - "3: dec %0 \n" - -- : /* we don't need output */ -- :"a" (loops) -+ : "+a" (loops) -+ : - ); - } - -diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c -index be5b5fb1598bd..520897061ee09 100644 ---- a/arch/x86/lib/error-inject.c -+++ b/arch/x86/lib/error-inject.c -@@ -1,5 +1,6 @@ - // SPDX-License-Identifier: GPL-2.0 - -+#include - #include - #include - -@@ -10,7 +11,7 @@ asm( - ".type just_return_func, @function\n" - ".globl just_return_func\n" - "just_return_func:\n" -- " ret\n" -+ ASM_RET - ".size just_return_func, .-just_return_func\n" - ); - -diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S -index fa1bc2104b326..b70d98d79a9da 100644 ---- a/arch/x86/lib/getuser.S -+++ b/arch/x86/lib/getuser.S -@@ -57,7 +57,7 @@ SYM_FUNC_START(__get_user_1) - 1: movzbl (%_ASM_AX),%edx - xor %eax,%eax - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__get_user_1) - EXPORT_SYMBOL(__get_user_1) - -@@ -71,7 +71,7 @@ SYM_FUNC_START(__get_user_2) - 2: movzwl (%_ASM_AX),%edx - xor %eax,%eax - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__get_user_2) - EXPORT_SYMBOL(__get_user_2) - -@@ -85,7 +85,7 @@ SYM_FUNC_START(__get_user_4) - 3: movl (%_ASM_AX),%edx - xor %eax,%eax - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__get_user_4) - EXPORT_SYMBOL(__get_user_4) - -@@ -100,7 +100,7 @@ SYM_FUNC_START(__get_user_8) - 4: movq (%_ASM_AX),%rdx - xor %eax,%eax - ASM_CLAC -- ret -+ RET - #else - LOAD_TASK_SIZE_MINUS_N(7) - cmp %_ASM_DX,%_ASM_AX -@@ -112,7 +112,7 @@ SYM_FUNC_START(__get_user_8) - 5: movl 4(%_ASM_AX),%ecx - xor %eax,%eax - ASM_CLAC -- ret -+ RET - #endif - SYM_FUNC_END(__get_user_8) - EXPORT_SYMBOL(__get_user_8) -@@ -124,7 +124,7 @@ SYM_FUNC_START(__get_user_nocheck_1) - 6: movzbl (%_ASM_AX),%edx - xor %eax,%eax - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__get_user_nocheck_1) - EXPORT_SYMBOL(__get_user_nocheck_1) - -@@ -134,7 +134,7 @@ SYM_FUNC_START(__get_user_nocheck_2) - 7: movzwl (%_ASM_AX),%edx - xor %eax,%eax - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__get_user_nocheck_2) - EXPORT_SYMBOL(__get_user_nocheck_2) - -@@ -144,7 +144,7 @@ SYM_FUNC_START(__get_user_nocheck_4) - 8: movl (%_ASM_AX),%edx - xor %eax,%eax - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__get_user_nocheck_4) - EXPORT_SYMBOL(__get_user_nocheck_4) - -@@ -159,7 +159,7 @@ SYM_FUNC_START(__get_user_nocheck_8) - #endif - xor %eax,%eax - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__get_user_nocheck_8) - EXPORT_SYMBOL(__get_user_nocheck_8) - -@@ -169,7 +169,7 @@ SYM_CODE_START_LOCAL(.Lbad_get_user_clac) - bad_get_user: - xor %edx,%edx - mov $(-EFAULT),%_ASM_AX -- ret -+ RET - SYM_CODE_END(.Lbad_get_user_clac) - - #ifdef CONFIG_X86_32 -@@ -179,7 +179,7 @@ bad_get_user_8: - xor %edx,%edx - xor %ecx,%ecx - mov $(-EFAULT),%_ASM_AX -- ret -+ RET - SYM_CODE_END(.Lbad_get_user_8_clac) - #endif - -diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S -index dbf8cc97b7f53..12c16c6aa44a3 100644 ---- a/arch/x86/lib/hweight.S -+++ b/arch/x86/lib/hweight.S -@@ -32,7 +32,7 @@ SYM_FUNC_START(__sw_hweight32) - imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101 - shrl $24, %eax # w = w_tmp >> 24 - __ASM_SIZE(pop,) %__ASM_REG(dx) -- ret -+ RET - SYM_FUNC_END(__sw_hweight32) - EXPORT_SYMBOL(__sw_hweight32) - -@@ -65,7 +65,7 @@ SYM_FUNC_START(__sw_hweight64) - - popq %rdx - popq %rdi -- ret -+ RET - #else /* CONFIG_X86_32 */ - /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */ - pushl %ecx -@@ -77,7 +77,7 @@ SYM_FUNC_START(__sw_hweight64) - addl %ecx, %eax # result - - popl %ecx -- ret -+ RET - #endif - SYM_FUNC_END(__sw_hweight64) - EXPORT_SYMBOL(__sw_hweight64) -diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c -index a1d24fdc07cf0..c8a962c2e653d 100644 ---- a/arch/x86/lib/insn-eval.c -+++ b/arch/x86/lib/insn-eval.c -@@ -412,32 +412,44 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) - #endif /* CONFIG_X86_64 */ - } - --static int get_reg_offset(struct insn *insn, struct pt_regs *regs, -- enum reg_type type) -+static const int pt_regoff[] = { -+ offsetof(struct pt_regs, ax), -+ offsetof(struct pt_regs, cx), -+ offsetof(struct pt_regs, dx), -+ offsetof(struct pt_regs, bx), -+ offsetof(struct pt_regs, sp), -+ offsetof(struct pt_regs, bp), -+ offsetof(struct pt_regs, si), -+ offsetof(struct pt_regs, di), -+#ifdef CONFIG_X86_64 -+ offsetof(struct pt_regs, r8), -+ offsetof(struct pt_regs, r9), -+ offsetof(struct pt_regs, r10), -+ offsetof(struct pt_regs, r11), -+ offsetof(struct pt_regs, r12), -+ offsetof(struct pt_regs, r13), -+ offsetof(struct pt_regs, r14), -+ offsetof(struct pt_regs, r15), -+#else -+ offsetof(struct pt_regs, ds), -+ offsetof(struct pt_regs, es), -+ offsetof(struct pt_regs, fs), -+ offsetof(struct pt_regs, gs), -+#endif -+}; -+ -+int pt_regs_offset(struct pt_regs *regs, int regno) -+{ -+ if ((unsigned)regno < ARRAY_SIZE(pt_regoff)) -+ return pt_regoff[regno]; -+ return -EDOM; -+} -+ -+static int get_regno(struct insn *insn, enum reg_type type) - { -+ int nr_registers = ARRAY_SIZE(pt_regoff); - int regno = 0; - -- static const int regoff[] = { -- offsetof(struct pt_regs, ax), -- offsetof(struct pt_regs, cx), -- offsetof(struct pt_regs, dx), -- offsetof(struct pt_regs, bx), -- offsetof(struct pt_regs, sp), -- offsetof(struct pt_regs, bp), -- offsetof(struct pt_regs, si), -- offsetof(struct pt_regs, di), --#ifdef CONFIG_X86_64 -- offsetof(struct pt_regs, r8), -- offsetof(struct pt_regs, r9), -- offsetof(struct pt_regs, r10), -- offsetof(struct pt_regs, r11), -- offsetof(struct pt_regs, r12), -- offsetof(struct pt_regs, r13), -- offsetof(struct pt_regs, r14), -- offsetof(struct pt_regs, r15), --#endif -- }; -- int nr_registers = ARRAY_SIZE(regoff); - /* - * Don't possibly decode a 32-bit instructions as - * reading a 64-bit-only register. -@@ -505,7 +517,18 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, - WARN_ONCE(1, "decoded an instruction with an invalid register"); - return -EINVAL; - } -- return regoff[regno]; -+ return regno; -+} -+ -+static int get_reg_offset(struct insn *insn, struct pt_regs *regs, -+ enum reg_type type) -+{ -+ int regno = get_regno(insn, type); -+ -+ if (regno < 0) -+ return regno; -+ -+ return pt_regs_offset(regs, regno); - } - - /** -@@ -1417,7 +1440,7 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) - } - } - --static int insn_get_effective_ip(struct pt_regs *regs, unsigned long *ip) -+int insn_get_effective_ip(struct pt_regs *regs, unsigned long *ip) - { - unsigned long seg_base = 0; - -diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c -index c565def611e24..55e371cc69fd5 100644 ---- a/arch/x86/lib/insn.c -+++ b/arch/x86/lib/insn.c -@@ -13,6 +13,7 @@ - #endif - #include /*__ignore_sync_check__ */ - #include /* __ignore_sync_check__ */ -+#include /* __ignore_sync_check__ */ - - #include - #include -@@ -37,10 +38,10 @@ - ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) - - #define __get_next(t, insn) \ -- ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); }) -+ ({ t r = get_unaligned((t *)(insn)->next_byte); (insn)->next_byte += sizeof(t); leXX_to_cpu(t, r); }) - - #define __peek_nbyte_next(t, insn, n) \ -- ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); }) -+ ({ t r = get_unaligned((t *)(insn)->next_byte + n); leXX_to_cpu(t, r); }) - - #define get_next(t, insn) \ - ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); }) -diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S -index cb5a1964506b1..6ff2f56cb0f71 100644 ---- a/arch/x86/lib/iomap_copy_64.S -+++ b/arch/x86/lib/iomap_copy_64.S -@@ -10,6 +10,6 @@ - */ - SYM_FUNC_START(__iowrite32_copy) - movl %edx,%ecx -- rep movsd -- ret -+ rep movsl -+ RET - SYM_FUNC_END(__iowrite32_copy) -diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S -index 1cc9da6e29c79..59cf2343f3d90 100644 ---- a/arch/x86/lib/memcpy_64.S -+++ b/arch/x86/lib/memcpy_64.S -@@ -39,7 +39,7 @@ SYM_FUNC_START_WEAK(memcpy) - rep movsq - movl %edx, %ecx - rep movsb -- ret -+ RET - SYM_FUNC_END(memcpy) - SYM_FUNC_END_ALIAS(__memcpy) - EXPORT_SYMBOL(memcpy) -@@ -53,7 +53,7 @@ SYM_FUNC_START_LOCAL(memcpy_erms) - movq %rdi, %rax - movq %rdx, %rcx - rep movsb -- ret -+ RET - SYM_FUNC_END(memcpy_erms) - - SYM_FUNC_START_LOCAL(memcpy_orig) -@@ -137,7 +137,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) - movq %r9, 1*8(%rdi) - movq %r10, -2*8(%rdi, %rdx) - movq %r11, -1*8(%rdi, %rdx) -- retq -+ RET - .p2align 4 - .Lless_16bytes: - cmpl $8, %edx -@@ -149,7 +149,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) - movq -1*8(%rsi, %rdx), %r9 - movq %r8, 0*8(%rdi) - movq %r9, -1*8(%rdi, %rdx) -- retq -+ RET - .p2align 4 - .Lless_8bytes: - cmpl $4, %edx -@@ -162,7 +162,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) - movl -4(%rsi, %rdx), %r8d - movl %ecx, (%rdi) - movl %r8d, -4(%rdi, %rdx) -- retq -+ RET - .p2align 4 - .Lless_3bytes: - subl $1, %edx -@@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig) - movb %cl, (%rdi) - - .Lend: -- retq -+ RET - SYM_FUNC_END(memcpy_orig) - - .popsection -diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S -index 64801010d312d..4b8ee3a2fcc37 100644 ---- a/arch/x86/lib/memmove_64.S -+++ b/arch/x86/lib/memmove_64.S -@@ -40,7 +40,7 @@ SYM_FUNC_START(__memmove) - /* FSRM implies ERMS => no length checks, do the copy directly */ - .Lmemmove_begin_forward: - ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM -- ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS -+ ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS - - /* - * movsq instruction have many startup latency -@@ -205,7 +205,12 @@ SYM_FUNC_START(__memmove) - movb (%rsi), %r11b - movb %r11b, (%rdi) - 13: -- retq -+ RET -+ -+.Lmemmove_erms: -+ movq %rdx, %rcx -+ rep movsb -+ RET - SYM_FUNC_END(__memmove) - SYM_FUNC_END_ALIAS(memmove) - EXPORT_SYMBOL(__memmove) -diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S -index 9827ae267f96e..d624f2bc42f16 100644 ---- a/arch/x86/lib/memset_64.S -+++ b/arch/x86/lib/memset_64.S -@@ -40,7 +40,7 @@ SYM_FUNC_START(__memset) - movl %edx,%ecx - rep stosb - movq %r9,%rax -- ret -+ RET - SYM_FUNC_END(__memset) - SYM_FUNC_END_ALIAS(memset) - EXPORT_SYMBOL(memset) -@@ -63,7 +63,7 @@ SYM_FUNC_START_LOCAL(memset_erms) - movq %rdx,%rcx - rep stosb - movq %r9,%rax -- ret -+ RET - SYM_FUNC_END(memset_erms) - - SYM_FUNC_START_LOCAL(memset_orig) -@@ -125,7 +125,7 @@ SYM_FUNC_START_LOCAL(memset_orig) - - .Lende: - movq %r10,%rax -- ret -+ RET - - .Lbad_alignment: - cmpq $7,%rdx -diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S -index a2b9caa5274c8..ebd259f314963 100644 ---- a/arch/x86/lib/msr-reg.S -+++ b/arch/x86/lib/msr-reg.S -@@ -35,7 +35,7 @@ SYM_FUNC_START(\op\()_safe_regs) - movl %edi, 28(%r10) - popq %r12 - popq %rbx -- ret -+ RET - 3: - movl $-EIO, %r11d - jmp 2b -@@ -77,7 +77,7 @@ SYM_FUNC_START(\op\()_safe_regs) - popl %esi - popl %ebp - popl %ebx -- ret -+ RET - 3: - movl $-EIO, 4(%esp) - jmp 2b -diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S -index 0ea344c5ea439..ecb2049c1273f 100644 ---- a/arch/x86/lib/putuser.S -+++ b/arch/x86/lib/putuser.S -@@ -52,7 +52,7 @@ SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) - 1: movb %al,(%_ASM_CX) - xor %ecx,%ecx - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__put_user_1) - EXPORT_SYMBOL(__put_user_1) - EXPORT_SYMBOL(__put_user_nocheck_1) -@@ -66,7 +66,7 @@ SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL) - 2: movw %ax,(%_ASM_CX) - xor %ecx,%ecx - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__put_user_2) - EXPORT_SYMBOL(__put_user_2) - EXPORT_SYMBOL(__put_user_nocheck_2) -@@ -80,7 +80,7 @@ SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL) - 3: movl %eax,(%_ASM_CX) - xor %ecx,%ecx - ASM_CLAC -- ret -+ RET - SYM_FUNC_END(__put_user_4) - EXPORT_SYMBOL(__put_user_4) - EXPORT_SYMBOL(__put_user_nocheck_4) -diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S -index ec9922cba30a4..6f5321b36dbb1 100644 ---- a/arch/x86/lib/retpoline.S -+++ b/arch/x86/lib/retpoline.S -@@ -9,8 +9,9 @@ - #include - #include - #include -+#include - -- .section .text.__x86.indirect_thunk -+ .section .text..__x86.indirect_thunk - - .macro RETPOLINE reg - ANNOTATE_INTRA_FUNCTION_CALL -@@ -23,50 +24,18 @@ - .Ldo_rop_\@: - mov %\reg, (%_ASM_SP) - UNWIND_HINT_FUNC -- ret -+ RET - .endm - - .macro THUNK reg - -- .align 32 -- --SYM_FUNC_START(__x86_indirect_thunk_\reg) -- -- ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ -- __stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \ -- __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD -- --SYM_FUNC_END(__x86_indirect_thunk_\reg) -- --.endm -- --/* -- * This generates .altinstr_replacement symbols for use by objtool. They, -- * however, must not actually live in .altinstr_replacement since that will be -- * discarded after init, but module alternatives will also reference these -- * symbols. -- * -- * Their names matches the "__x86_indirect_" prefix to mark them as retpolines. -- */ --.macro ALT_THUNK reg -- -- .align 1 -- --SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg) -- ANNOTATE_RETPOLINE_SAFE --1: call *%\reg --2: .skip 5-(2b-1b), 0x90 --SYM_FUNC_END(__x86_indirect_alt_call_\reg) -- --STACK_FRAME_NON_STANDARD(__x86_indirect_alt_call_\reg) -- --SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg) -- ANNOTATE_RETPOLINE_SAFE --1: jmp *%\reg --2: .skip 5-(2b-1b), 0x90 --SYM_FUNC_END(__x86_indirect_alt_jmp_\reg) -+ .align RETPOLINE_THUNK_SIZE -+SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) -+ UNWIND_HINT_EMPTY - --STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg) -+ ALTERNATIVE_2 __stringify(RETPOLINE \reg), \ -+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \ -+ __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE) - - .endm - -@@ -85,22 +54,214 @@ STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg) - #define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) - #define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) - --#undef GEN -+ .align RETPOLINE_THUNK_SIZE -+SYM_CODE_START(__x86_indirect_thunk_array) -+ - #define GEN(reg) THUNK reg - #include -- - #undef GEN -+ -+ .align RETPOLINE_THUNK_SIZE -+SYM_CODE_END(__x86_indirect_thunk_array) -+ - #define GEN(reg) EXPORT_THUNK(reg) - #include -- - #undef GEN --#define GEN(reg) ALT_THUNK reg --#include - --#undef GEN --#define GEN(reg) __EXPORT_THUNK(__x86_indirect_alt_call_ ## reg) --#include -+/* -+ * This function name is magical and is used by -mfunction-return=thunk-extern -+ * for the compiler to generate JMPs to it. -+ */ -+#ifdef CONFIG_RETHUNK - --#undef GEN --#define GEN(reg) __EXPORT_THUNK(__x86_indirect_alt_jmp_ ## reg) --#include -+/* -+ * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at -+ * special addresses: -+ * -+ * - srso_alias_untrain_ret() is 2M aligned -+ * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 -+ * and 20 in its virtual address are set (while those bits in the -+ * srso_alias_untrain_ret() function are cleared). -+ * -+ * This guarantees that those two addresses will alias in the branch -+ * target buffer of Zen3/4 generations, leading to any potential -+ * poisoned entries at that BTB slot to get evicted. -+ * -+ * As a result, srso_alias_safe_ret() becomes a safe return. -+ */ -+#ifdef CONFIG_CPU_SRSO -+ .section .text..__x86.rethunk_untrain -+ -+SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) -+ UNWIND_HINT_FUNC -+ ASM_NOP2 -+ lfence -+ jmp srso_alias_return_thunk -+SYM_FUNC_END(srso_alias_untrain_ret) -+__EXPORT_THUNK(srso_alias_untrain_ret) -+ -+ .section .text..__x86.rethunk_safe -+#else -+/* dummy definition for alternatives */ -+SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) -+ ANNOTATE_UNRET_SAFE -+ ret -+ int3 -+SYM_FUNC_END(srso_alias_untrain_ret) -+#endif -+ -+SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) -+ lea 8(%_ASM_SP), %_ASM_SP -+ UNWIND_HINT_FUNC -+ ANNOTATE_UNRET_SAFE -+ ret -+ int3 -+SYM_FUNC_END(srso_alias_safe_ret) -+ -+ .section .text..__x86.return_thunk -+ -+SYM_CODE_START(srso_alias_return_thunk) -+ UNWIND_HINT_FUNC -+ ANNOTATE_NOENDBR -+ call srso_alias_safe_ret -+ ud2 -+SYM_CODE_END(srso_alias_return_thunk) -+ -+/* -+ * Some generic notes on the untraining sequences: -+ * -+ * They are interchangeable when it comes to flushing potentially wrong -+ * RET predictions from the BTB. -+ * -+ * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the -+ * Retbleed sequence because the return sequence done there -+ * (srso_safe_ret()) is longer and the return sequence must fully nest -+ * (end before) the untraining sequence. Therefore, the untraining -+ * sequence must fully overlap the return sequence. -+ * -+ * Regarding alignment - the instructions which need to be untrained, -+ * must all start at a cacheline boundary for Zen1/2 generations. That -+ * is, instruction sequences starting at srso_safe_ret() and -+ * the respective instruction sequences at retbleed_return_thunk() -+ * must start at a cacheline boundary. -+ */ -+ -+/* -+ * Safety details here pertain to the AMD Zen{1,2} microarchitecture: -+ * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for -+ * alignment within the BTB. -+ * 2) The instruction at retbleed_untrain_ret must contain, and not -+ * end with, the 0xc3 byte of the RET. -+ * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread -+ * from re-poisioning the BTB prediction. -+ */ -+ .align 64 -+ .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc -+SYM_FUNC_START_NOALIGN(retbleed_untrain_ret); -+ -+ /* -+ * As executed from retbleed_untrain_ret, this is: -+ * -+ * TEST $0xcc, %bl -+ * LFENCE -+ * JMP retbleed_return_thunk -+ * -+ * Executing the TEST instruction has a side effect of evicting any BTB -+ * prediction (potentially attacker controlled) attached to the RET, as -+ * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. -+ */ -+ .byte 0xf6 -+ -+ /* -+ * As executed from retbleed_return_thunk, this is a plain RET. -+ * -+ * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. -+ * -+ * We subsequently jump backwards and architecturally execute the RET. -+ * This creates a correct BTB prediction (type=ret), but in the -+ * meantime we suffer Straight Line Speculation (because the type was -+ * no branch) which is halted by the INT3. -+ * -+ * With SMT enabled and STIBP active, a sibling thread cannot poison -+ * RET's prediction to a type of its choice, but can evict the -+ * prediction due to competitive sharing. If the prediction is -+ * evicted, retbleed_return_thunk will suffer Straight Line Speculation -+ * which will be contained safely by the INT3. -+ */ -+SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) -+ ret -+ int3 -+SYM_CODE_END(retbleed_return_thunk) -+ -+ /* -+ * Ensure the TEST decoding / BTB invalidation is complete. -+ */ -+ lfence -+ -+ /* -+ * Jump back and execute the RET in the middle of the TEST instruction. -+ * INT3 is for SLS protection. -+ */ -+ jmp retbleed_return_thunk -+ int3 -+SYM_FUNC_END(retbleed_untrain_ret) -+__EXPORT_THUNK(retbleed_untrain_ret) -+ -+/* -+ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() -+ * above. On kernel entry, srso_untrain_ret() is executed which is a -+ * -+ * movabs $0xccccc30824648d48,%rax -+ * -+ * and when the return thunk executes the inner label srso_safe_ret() -+ * later, it is a stack manipulation and a RET which is mispredicted and -+ * thus a "safe" one to use. -+ */ -+ .align 64 -+ .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc -+SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) -+ .byte 0x48, 0xb8 -+ -+/* -+ * This forces the function return instruction to speculate into a trap -+ * (UD2 in srso_return_thunk() below). This RET will then mispredict -+ * and execution will continue at the return site read from the top of -+ * the stack. -+ */ -+SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) -+ lea 8(%_ASM_SP), %_ASM_SP -+ ret -+ int3 -+ int3 -+ /* end of movabs */ -+ lfence -+ call srso_safe_ret -+ ud2 -+SYM_CODE_END(srso_safe_ret) -+SYM_FUNC_END(srso_untrain_ret) -+__EXPORT_THUNK(srso_untrain_ret) -+ -+SYM_CODE_START(srso_return_thunk) -+ UNWIND_HINT_FUNC -+ ANNOTATE_NOENDBR -+ call srso_safe_ret -+ ud2 -+SYM_CODE_END(srso_return_thunk) -+ -+SYM_FUNC_START(entry_untrain_ret) -+ ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ -+ "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ -+ "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS -+SYM_FUNC_END(entry_untrain_ret) -+__EXPORT_THUNK(entry_untrain_ret) -+ -+SYM_CODE_START(__x86_return_thunk) -+ UNWIND_HINT_FUNC -+ ANNOTATE_NOENDBR -+ ANNOTATE_UNRET_SAFE -+ ret -+ int3 -+SYM_CODE_END(__x86_return_thunk) -+EXPORT_SYMBOL(__x86_return_thunk) -+ -+#endif /* CONFIG_RETHUNK */ -diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c -index 508c81e97ab10..f1c0befb62df5 100644 ---- a/arch/x86/lib/usercopy_64.c -+++ b/arch/x86/lib/usercopy_64.c -@@ -121,7 +121,7 @@ void __memcpy_flushcache(void *_dst, const void *_src, size_t size) - - /* cache copy and flush to align dest */ - if (!IS_ALIGNED(dest, 8)) { -- unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest); -+ size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest); - - memcpy((void *) dest, (void *) source, len); - clean_cache_range((void *) dest, len); -diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S -index 951da2ad54bbf..8c270ab415bee 100644 ---- a/arch/x86/math-emu/div_Xsig.S -+++ b/arch/x86/math-emu/div_Xsig.S -@@ -341,7 +341,7 @@ L_exit: - popl %esi - - leave -- ret -+ RET - - - #ifdef PARANOID -diff --git a/arch/x86/math-emu/div_small.S b/arch/x86/math-emu/div_small.S -index d047d1816abe9..637439bfefa47 100644 ---- a/arch/x86/math-emu/div_small.S -+++ b/arch/x86/math-emu/div_small.S -@@ -44,5 +44,5 @@ SYM_FUNC_START(FPU_div_small) - popl %esi - - leave -- ret -+ RET - SYM_FUNC_END(FPU_div_small) -diff --git a/arch/x86/math-emu/mul_Xsig.S b/arch/x86/math-emu/mul_Xsig.S -index 4afc7b1fa6e95..54a031b661421 100644 ---- a/arch/x86/math-emu/mul_Xsig.S -+++ b/arch/x86/math-emu/mul_Xsig.S -@@ -62,7 +62,7 @@ SYM_FUNC_START(mul32_Xsig) - - popl %esi - leave -- ret -+ RET - SYM_FUNC_END(mul32_Xsig) - - -@@ -115,7 +115,7 @@ SYM_FUNC_START(mul64_Xsig) - - popl %esi - leave -- ret -+ RET - SYM_FUNC_END(mul64_Xsig) - - -@@ -175,5 +175,5 @@ SYM_FUNC_START(mul_Xsig_Xsig) - - popl %esi - leave -- ret -+ RET - SYM_FUNC_END(mul_Xsig_Xsig) -diff --git a/arch/x86/math-emu/polynom_Xsig.S b/arch/x86/math-emu/polynom_Xsig.S -index 702315eecb860..35fd723fc0df8 100644 ---- a/arch/x86/math-emu/polynom_Xsig.S -+++ b/arch/x86/math-emu/polynom_Xsig.S -@@ -133,5 +133,5 @@ L_accum_done: - popl %edi - popl %esi - leave -- ret -+ RET - SYM_FUNC_END(polynomial_Xsig) -diff --git a/arch/x86/math-emu/reg_norm.S b/arch/x86/math-emu/reg_norm.S -index cad1d60b1e844..594936eeed67a 100644 ---- a/arch/x86/math-emu/reg_norm.S -+++ b/arch/x86/math-emu/reg_norm.S -@@ -72,7 +72,7 @@ L_exit_valid: - L_exit: - popl %ebx - leave -- ret -+ RET - - - L_zero: -@@ -138,7 +138,7 @@ L_exit_nuo_valid: - - popl %ebx - leave -- ret -+ RET - - L_exit_nuo_zero: - movl TAG_Zero,%eax -@@ -146,5 +146,5 @@ L_exit_nuo_zero: - - popl %ebx - leave -- ret -+ RET - SYM_FUNC_END(FPU_normalize_nuo) -diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S -index 4a9fc3cc5a4d4..0bb2a092161af 100644 ---- a/arch/x86/math-emu/reg_round.S -+++ b/arch/x86/math-emu/reg_round.S -@@ -437,7 +437,7 @@ fpu_Arith_exit: - popl %edi - popl %esi - leave -- ret -+ RET - - - /* -diff --git a/arch/x86/math-emu/reg_u_add.S b/arch/x86/math-emu/reg_u_add.S -index 9c9e2c810afe8..07247287a3af7 100644 ---- a/arch/x86/math-emu/reg_u_add.S -+++ b/arch/x86/math-emu/reg_u_add.S -@@ -164,6 +164,6 @@ L_exit: - popl %edi - popl %esi - leave -- ret -+ RET - #endif /* PARANOID */ - SYM_FUNC_END(FPU_u_add) -diff --git a/arch/x86/math-emu/reg_u_div.S b/arch/x86/math-emu/reg_u_div.S -index e2fb5c2644c55..b5a41e2fc484c 100644 ---- a/arch/x86/math-emu/reg_u_div.S -+++ b/arch/x86/math-emu/reg_u_div.S -@@ -468,7 +468,7 @@ L_exit: - popl %esi - - leave -- ret -+ RET - #endif /* PARANOID */ - - SYM_FUNC_END(FPU_u_div) -diff --git a/arch/x86/math-emu/reg_u_mul.S b/arch/x86/math-emu/reg_u_mul.S -index 0c779c87ac5b3..e2588b24b8c2c 100644 ---- a/arch/x86/math-emu/reg_u_mul.S -+++ b/arch/x86/math-emu/reg_u_mul.S -@@ -144,7 +144,7 @@ L_exit: - popl %edi - popl %esi - leave -- ret -+ RET - #endif /* PARANOID */ - - SYM_FUNC_END(FPU_u_mul) -diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S -index e9bb7c248649f..4c900c29e4ff2 100644 ---- a/arch/x86/math-emu/reg_u_sub.S -+++ b/arch/x86/math-emu/reg_u_sub.S -@@ -270,5 +270,5 @@ L_exit: - popl %edi - popl %esi - leave -- ret -+ RET - SYM_FUNC_END(FPU_u_sub) -diff --git a/arch/x86/math-emu/round_Xsig.S b/arch/x86/math-emu/round_Xsig.S -index d9d7de8dbd7b6..126c40473badb 100644 ---- a/arch/x86/math-emu/round_Xsig.S -+++ b/arch/x86/math-emu/round_Xsig.S -@@ -78,7 +78,7 @@ L_exit: - popl %esi - popl %ebx - leave -- ret -+ RET - SYM_FUNC_END(round_Xsig) - - -@@ -138,5 +138,5 @@ L_n_exit: - popl %esi - popl %ebx - leave -- ret -+ RET - SYM_FUNC_END(norm_Xsig) -diff --git a/arch/x86/math-emu/shr_Xsig.S b/arch/x86/math-emu/shr_Xsig.S -index 726af985f7582..f726bf6f6396e 100644 ---- a/arch/x86/math-emu/shr_Xsig.S -+++ b/arch/x86/math-emu/shr_Xsig.S -@@ -45,7 +45,7 @@ SYM_FUNC_START(shr_Xsig) - popl %ebx - popl %esi - leave -- ret -+ RET - - L_more_than_31: - cmpl $64,%ecx -@@ -61,7 +61,7 @@ L_more_than_31: - movl $0,8(%esi) - popl %esi - leave -- ret -+ RET - - L_more_than_63: - cmpl $96,%ecx -@@ -76,7 +76,7 @@ L_more_than_63: - movl %edx,8(%esi) - popl %esi - leave -- ret -+ RET - - L_more_than_95: - xorl %eax,%eax -@@ -85,5 +85,5 @@ L_more_than_95: - movl %eax,8(%esi) - popl %esi - leave -- ret -+ RET - SYM_FUNC_END(shr_Xsig) -diff --git a/arch/x86/math-emu/wm_shrx.S b/arch/x86/math-emu/wm_shrx.S -index 4fc89174caf0c..f608a28a4c43a 100644 ---- a/arch/x86/math-emu/wm_shrx.S -+++ b/arch/x86/math-emu/wm_shrx.S -@@ -55,7 +55,7 @@ SYM_FUNC_START(FPU_shrx) - popl %ebx - popl %esi - leave -- ret -+ RET - - L_more_than_31: - cmpl $64,%ecx -@@ -70,7 +70,7 @@ L_more_than_31: - movl $0,4(%esi) - popl %esi - leave -- ret -+ RET - - L_more_than_63: - cmpl $96,%ecx -@@ -84,7 +84,7 @@ L_more_than_63: - movl %edx,4(%esi) - popl %esi - leave -- ret -+ RET - - L_more_than_95: - xorl %eax,%eax -@@ -92,7 +92,7 @@ L_more_than_95: - movl %eax,4(%esi) - popl %esi - leave -- ret -+ RET - SYM_FUNC_END(FPU_shrx) - - -@@ -146,7 +146,7 @@ SYM_FUNC_START(FPU_shrxs) - popl %ebx - popl %esi - leave -- ret -+ RET - - /* Shift by [0..31] bits */ - Ls_less_than_32: -@@ -163,7 +163,7 @@ Ls_less_than_32: - popl %ebx - popl %esi - leave -- ret -+ RET - - /* Shift by [64..95] bits */ - Ls_more_than_63: -@@ -189,7 +189,7 @@ Ls_more_than_63: - popl %ebx - popl %esi - leave -- ret -+ RET - - Ls_more_than_95: - /* Shift by [96..inf) bits */ -@@ -203,5 +203,5 @@ Ls_more_than_95: - popl %ebx - popl %esi - leave -- ret -+ RET - SYM_FUNC_END(FPU_shrxs) -diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c -index f5e1e60c9095f..6c2f1b76a0b61 100644 ---- a/arch/x86/mm/cpu_entry_area.c -+++ b/arch/x86/mm/cpu_entry_area.c -@@ -110,6 +110,13 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu) - cea_map_stack(NMI); - cea_map_stack(DB); - cea_map_stack(MCE); -+ -+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) { -+ if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) { -+ cea_map_stack(VC); -+ cea_map_stack(VC2); -+ } -+ } - } - #else - static inline void percpu_setup_exception_stacks(unsigned int cpu) -diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c -index e1664e9f969c3..a9c7efd4b7946 100644 ---- a/arch/x86/mm/extable.c -+++ b/arch/x86/mm/extable.c -@@ -2,48 +2,50 @@ - #include - #include - #include -+#include - #include - - #include - #include - #include - #include -+#include - --typedef bool (*ex_handler_t)(const struct exception_table_entry *, -- struct pt_regs *, int, unsigned long, -- unsigned long); -+static inline unsigned long *pt_regs_nr(struct pt_regs *regs, int nr) -+{ -+ int reg_offset = pt_regs_offset(regs, nr); -+ static unsigned long __dummy; -+ -+ if (WARN_ON_ONCE(reg_offset < 0)) -+ return &__dummy; -+ -+ return (unsigned long *)((unsigned long)regs + reg_offset); -+} - - static inline unsigned long - ex_fixup_addr(const struct exception_table_entry *x) - { - return (unsigned long)&x->fixup + x->fixup; - } --static inline ex_handler_t --ex_fixup_handler(const struct exception_table_entry *x) --{ -- return (ex_handler_t)((unsigned long)&x->handler + x->handler); --} - --__visible bool ex_handler_default(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) -+static bool ex_handler_default(const struct exception_table_entry *e, -+ struct pt_regs *regs) - { -- regs->ip = ex_fixup_addr(fixup); -+ if (e->data & EX_FLAG_CLEAR_AX) -+ regs->ax = 0; -+ if (e->data & EX_FLAG_CLEAR_DX) -+ regs->dx = 0; -+ -+ regs->ip = ex_fixup_addr(e); - return true; - } --EXPORT_SYMBOL(ex_handler_default); - --__visible bool ex_handler_fault(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) -+static bool ex_handler_fault(const struct exception_table_entry *fixup, -+ struct pt_regs *regs, int trapnr) - { -- regs->ip = ex_fixup_addr(fixup); - regs->ax = trapnr; -- return true; -+ return ex_handler_default(fixup, regs); - } --EXPORT_SYMBOL_GPL(ex_handler_fault); - - /* - * Handler for when we fail to restore a task's FPU state. We should never get -@@ -55,10 +57,8 @@ EXPORT_SYMBOL_GPL(ex_handler_fault); - * of vulnerability by restoring from the initial state (essentially, zeroing - * out all the FPU registers) if we can't restore from the task's FPU state. - */ --__visible bool ex_handler_fprestore(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) -+static bool ex_handler_fprestore(const struct exception_table_entry *fixup, -+ struct pt_regs *regs) - { - regs->ip = ex_fixup_addr(fixup); - -@@ -68,98 +68,77 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup, - __restore_fpregs_from_fpstate(&init_fpstate, xfeatures_mask_fpstate()); - return true; - } --EXPORT_SYMBOL_GPL(ex_handler_fprestore); - --__visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) -+static bool ex_handler_uaccess(const struct exception_table_entry *fixup, -+ struct pt_regs *regs, int trapnr) - { - WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); -- regs->ip = ex_fixup_addr(fixup); -- return true; -+ return ex_handler_default(fixup, regs); - } --EXPORT_SYMBOL(ex_handler_uaccess); - --__visible bool ex_handler_copy(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) -+static bool ex_handler_copy(const struct exception_table_entry *fixup, -+ struct pt_regs *regs, int trapnr) - { - WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); -- regs->ip = ex_fixup_addr(fixup); -- regs->ax = trapnr; -- return true; -+ return ex_handler_fault(fixup, regs, trapnr); - } --EXPORT_SYMBOL(ex_handler_copy); - --__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) -+static bool ex_handler_msr(const struct exception_table_entry *fixup, -+ struct pt_regs *regs, bool wrmsr, bool safe, int reg) - { -- if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", -- (unsigned int)regs->cx, regs->ip, (void *)regs->ip)) -+ if (__ONCE_LITE_IF(!safe && wrmsr)) { -+ pr_warn("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", -+ (unsigned int)regs->cx, (unsigned int)regs->dx, -+ (unsigned int)regs->ax, regs->ip, (void *)regs->ip); - show_stack_regs(regs); -+ } - -- /* Pretend that the read succeeded and returned 0. */ -- regs->ip = ex_fixup_addr(fixup); -- regs->ax = 0; -- regs->dx = 0; -- return true; --} --EXPORT_SYMBOL(ex_handler_rdmsr_unsafe); -- --__visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) --{ -- if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", -- (unsigned int)regs->cx, (unsigned int)regs->dx, -- (unsigned int)regs->ax, regs->ip, (void *)regs->ip)) -+ if (__ONCE_LITE_IF(!safe && !wrmsr)) { -+ pr_warn("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", -+ (unsigned int)regs->cx, regs->ip, (void *)regs->ip); - show_stack_regs(regs); -+ } - -- /* Pretend that the write succeeded. */ -- regs->ip = ex_fixup_addr(fixup); -- return true; -+ if (!wrmsr) { -+ /* Pretend that the read succeeded and returned 0. */ -+ regs->ax = 0; -+ regs->dx = 0; -+ } -+ -+ if (safe) -+ *pt_regs_nr(regs, reg) = -EIO; -+ -+ return ex_handler_default(fixup, regs); - } --EXPORT_SYMBOL(ex_handler_wrmsr_unsafe); - --__visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, -- unsigned long fault_addr) -+static bool ex_handler_clear_fs(const struct exception_table_entry *fixup, -+ struct pt_regs *regs) - { - if (static_cpu_has(X86_BUG_NULL_SEG)) - asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS)); - asm volatile ("mov %0, %%fs" : : "rm" (0)); -- return ex_handler_default(fixup, regs, trapnr, error_code, fault_addr); -+ return ex_handler_default(fixup, regs); - } --EXPORT_SYMBOL(ex_handler_clear_fs); - --enum handler_type ex_get_fault_handler_type(unsigned long ip) -+static bool ex_handler_imm_reg(const struct exception_table_entry *fixup, -+ struct pt_regs *regs, int reg, int imm) - { -- const struct exception_table_entry *e; -- ex_handler_t handler; -+ *pt_regs_nr(regs, reg) = (long)imm; -+ return ex_handler_default(fixup, regs); -+} - -- e = search_exception_tables(ip); -- if (!e) -- return EX_HANDLER_NONE; -- handler = ex_fixup_handler(e); -- if (handler == ex_handler_fault) -- return EX_HANDLER_FAULT; -- else if (handler == ex_handler_uaccess || handler == ex_handler_copy) -- return EX_HANDLER_UACCESS; -- else -- return EX_HANDLER_OTHER; -+int ex_get_fixup_type(unsigned long ip) -+{ -+ const struct exception_table_entry *e = search_exception_tables(ip); -+ -+ return e ? FIELD_GET(EX_DATA_TYPE_MASK, e->data) : EX_TYPE_NONE; - } - - int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, - unsigned long fault_addr) - { - const struct exception_table_entry *e; -- ex_handler_t handler; -+ int type, reg, imm; - - #ifdef CONFIG_PNPBIOS - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { -@@ -179,8 +158,48 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code, - if (!e) - return 0; - -- handler = ex_fixup_handler(e); -- return handler(e, regs, trapnr, error_code, fault_addr); -+ type = FIELD_GET(EX_DATA_TYPE_MASK, e->data); -+ reg = FIELD_GET(EX_DATA_REG_MASK, e->data); -+ imm = FIELD_GET(EX_DATA_IMM_MASK, e->data); -+ -+ switch (type) { -+ case EX_TYPE_DEFAULT: -+ case EX_TYPE_DEFAULT_MCE_SAFE: -+ return ex_handler_default(e, regs); -+ case EX_TYPE_FAULT: -+ case EX_TYPE_FAULT_MCE_SAFE: -+ return ex_handler_fault(e, regs, trapnr); -+ case EX_TYPE_UACCESS: -+ return ex_handler_uaccess(e, regs, trapnr); -+ case EX_TYPE_COPY: -+ return ex_handler_copy(e, regs, trapnr); -+ case EX_TYPE_CLEAR_FS: -+ return ex_handler_clear_fs(e, regs); -+ case EX_TYPE_FPU_RESTORE: -+ return ex_handler_fprestore(e, regs); -+ case EX_TYPE_BPF: -+ return ex_handler_bpf(e, regs); -+ case EX_TYPE_WRMSR: -+ return ex_handler_msr(e, regs, true, false, reg); -+ case EX_TYPE_RDMSR: -+ return ex_handler_msr(e, regs, false, false, reg); -+ case EX_TYPE_WRMSR_SAFE: -+ return ex_handler_msr(e, regs, true, true, reg); -+ case EX_TYPE_RDMSR_SAFE: -+ return ex_handler_msr(e, regs, false, true, reg); -+ case EX_TYPE_WRMSR_IN_MCE: -+ ex_handler_msr_mce(regs, true); -+ break; -+ case EX_TYPE_RDMSR_IN_MCE: -+ ex_handler_msr_mce(regs, false); -+ break; -+ case EX_TYPE_POP_REG: -+ regs->sp += sizeof(long); -+ fallthrough; -+ case EX_TYPE_IMM_REG: -+ return ex_handler_imm_reg(e, regs, reg, imm); -+ } -+ BUG(); - } - - extern unsigned int early_recursion_flag; -diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c -index 84a2c8c4af735..4bfed53e210ec 100644 ---- a/arch/x86/mm/fault.c -+++ b/arch/x86/mm/fault.c -@@ -32,6 +32,7 @@ - #include /* VMALLOC_START, ... */ - #include /* kvm_handle_async_pf */ - #include /* fixup_vdso_exception() */ -+#include - - #define CREATE_TRACE_POINTS - #include -@@ -631,6 +632,9 @@ static noinline void - page_fault_oops(struct pt_regs *regs, unsigned long error_code, - unsigned long address) - { -+#ifdef CONFIG_VMAP_STACK -+ struct stack_info info; -+#endif - unsigned long flags; - int sig; - -@@ -649,9 +653,7 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code, - * that we're in vmalloc space to avoid this. - */ - if (is_vmalloc_addr((void *)address) && -- (((unsigned long)current->stack - 1 - address < PAGE_SIZE) || -- address - ((unsigned long)current->stack + THREAD_SIZE) < PAGE_SIZE)) { -- unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *); -+ get_stack_guard_info((void *)address, &info)) { - /* - * We're likely to be running with very little stack space - * left. It's plausible that we'd hit this condition but -@@ -662,13 +664,11 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code, - * and then double-fault, though, because we're likely to - * break the console driver and lose most of the stack dump. - */ -- asm volatile ("movq %[stack], %%rsp\n\t" -- "call handle_stack_overflow\n\t" -- "1: jmp 1b" -- : ASM_CALL_CONSTRAINT -- : "D" ("kernel stack overflow (page fault)"), -- "S" (regs), "d" (address), -- [stack] "rm" (stack)); -+ call_on_stack(__this_cpu_ist_top_va(DF) - sizeof(void*), -+ handle_stack_overflow, -+ ASM_CALL_ARG3, -+ , [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info)); -+ - unreachable(); - } - #endif -diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c -index 23a14d82e7838..56d5ab70bfa1c 100644 ---- a/arch/x86/mm/init.c -+++ b/arch/x86/mm/init.c -@@ -9,6 +9,7 @@ - #include - - #include -+#include - #include - #include - #include -@@ -26,6 +27,7 @@ - #include - #include - #include -+#include - - /* - * We need to define the tracepoints somewhere, and tlb.c -@@ -78,10 +80,20 @@ static uint8_t __pte2cachemode_tbl[8] = { - [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, - }; - --/* Check that the write-protect PAT entry is set for write-protect */ -+/* -+ * Check that the write-protect PAT entry is set for write-protect. -+ * To do this without making assumptions how PAT has been set up (Xen has -+ * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache -+ * mode via the __cachemode2pte_tbl[] into protection bits (those protection -+ * bits will select a cache mode of WP or better), and then translate the -+ * protection bits back into the cache mode using __pte2cm_idx() and the -+ * __pte2cachemode_tbl[] array. This will return the really used cache mode. -+ */ - bool x86_has_pat_wp(void) - { -- return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP; -+ uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP]; -+ -+ return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP; - } - - enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) -@@ -251,6 +263,24 @@ static void __init probe_page_size_mask(void) - } - } - -+#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \ -+ .family = 6, \ -+ .model = _model, \ -+ } -+/* -+ * INVLPG may not properly flush Global entries -+ * on these CPUs when PCIDs are enabled. -+ */ -+static const struct x86_cpu_id invlpg_miss_ids[] = { -+ INTEL_MATCH(INTEL_FAM6_ALDERLAKE ), -+ INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ), -+ INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ), -+ INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ), -+ INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P), -+ INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S), -+ {} -+}; -+ - static void setup_pcid(void) - { - if (!IS_ENABLED(CONFIG_X86_64)) -@@ -259,6 +289,12 @@ static void setup_pcid(void) - if (!boot_cpu_has(X86_FEATURE_PCID)) - return; - -+ if (x86_match_cpu(invlpg_miss_ids)) { -+ pr_info("Incomplete global flushes, disabling PCID"); -+ setup_clear_cpu_cap(X86_FEATURE_PCID); -+ return; -+ } -+ - if (boot_cpu_has(X86_FEATURE_PGE)) { - /* - * This can't be cr4_set_bits_and_update_boot() -- the -@@ -787,9 +823,12 @@ void __init poking_init(void) - spinlock_t *ptl; - pte_t *ptep; - -- poking_mm = copy_init_mm(); -+ poking_mm = mm_alloc(); - BUG_ON(!poking_mm); - -+ /* Xen PV guests need the PGD to be pinned. */ -+ paravirt_arch_dup_mmap(NULL, poking_mm); -+ - /* - * Randomize the poking address, but make sure that the following page - * will be mapped at the same PMD. We need 2 pages, so find space for 3, -diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c -index 36098226a9573..200ad5ceeb43f 100644 ---- a/arch/x86/mm/init_64.c -+++ b/arch/x86/mm/init_64.c -@@ -646,7 +646,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, - pages++; - spin_lock(&init_mm.page_table_lock); - -- prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE); -+ prot = __pgprot(pgprot_val(prot) | _PAGE_PSE); - - set_pte_init((pte_t *)pud, - pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, -@@ -902,6 +902,8 @@ static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end - - static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) - { -+ const unsigned long page = ALIGN_DOWN(start, PMD_SIZE); -+ - vmemmap_flush_unused_pmd(); - - /* -@@ -914,8 +916,7 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long - * Mark with PAGE_UNUSED the unused parts of the new memmap range - */ - if (!IS_ALIGNED(start, PMD_SIZE)) -- memset((void *)start, PAGE_UNUSED, -- start - ALIGN_DOWN(start, PMD_SIZE)); -+ memset((void *)page, PAGE_UNUSED, start - page); - - /* - * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of -diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c -index 60ade7dd71bd9..5dfa40279f0fd 100644 ---- a/arch/x86/mm/ioremap.c -+++ b/arch/x86/mm/ioremap.c -@@ -216,9 +216,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; -- phys_addr &= PHYSICAL_PAGE_MASK; -+ phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr+1) - phys_addr; - -+ /* -+ * Mask out any bits not part of the actual physical -+ * address, like memory encryption bits. -+ */ -+ phys_addr &= PHYSICAL_PAGE_MASK; -+ - retval = memtype_reserve(phys_addr, (u64)phys_addr + size, - pcm, &new_pcm); - if (retval) { -@@ -614,6 +620,7 @@ static bool memremap_is_efi_data(resource_size_t phys_addr, - static bool memremap_is_setup_data(resource_size_t phys_addr, - unsigned long size) - { -+ struct setup_indirect *indirect; - struct setup_data *data; - u64 paddr, paddr_next; - -@@ -626,6 +633,10 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, - - data = memremap(paddr, sizeof(*data), - MEMREMAP_WB | MEMREMAP_DEC); -+ if (!data) { -+ pr_warn("failed to memremap setup_data entry\n"); -+ return false; -+ } - - paddr_next = data->next; - len = data->len; -@@ -635,10 +646,21 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, - return true; - } - -- if (data->type == SETUP_INDIRECT && -- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { -- paddr = ((struct setup_indirect *)data->data)->addr; -- len = ((struct setup_indirect *)data->data)->len; -+ if (data->type == SETUP_INDIRECT) { -+ memunmap(data); -+ data = memremap(paddr, sizeof(*data) + len, -+ MEMREMAP_WB | MEMREMAP_DEC); -+ if (!data) { -+ pr_warn("failed to memremap indirect setup_data\n"); -+ return false; -+ } -+ -+ indirect = (struct setup_indirect *)data->data; -+ -+ if (indirect->type != SETUP_INDIRECT) { -+ paddr = indirect->addr; -+ len = indirect->len; -+ } - } - - memunmap(data); -@@ -659,22 +681,51 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, - static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, - unsigned long size) - { -+ struct setup_indirect *indirect; - struct setup_data *data; - u64 paddr, paddr_next; - - paddr = boot_params.hdr.setup_data; - while (paddr) { -- unsigned int len; -+ unsigned int len, size; - - if (phys_addr == paddr) - return true; - - data = early_memremap_decrypted(paddr, sizeof(*data)); -+ if (!data) { -+ pr_warn("failed to early memremap setup_data entry\n"); -+ return false; -+ } -+ -+ size = sizeof(*data); - - paddr_next = data->next; - len = data->len; - -- early_memunmap(data, sizeof(*data)); -+ if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { -+ early_memunmap(data, sizeof(*data)); -+ return true; -+ } -+ -+ if (data->type == SETUP_INDIRECT) { -+ size += len; -+ early_memunmap(data, sizeof(*data)); -+ data = early_memremap_decrypted(paddr, size); -+ if (!data) { -+ pr_warn("failed to early memremap indirect setup_data\n"); -+ return false; -+ } -+ -+ indirect = (struct setup_indirect *)data->data; -+ -+ if (indirect->type != SETUP_INDIRECT) { -+ paddr = indirect->addr; -+ len = indirect->len; -+ } -+ } -+ -+ early_memunmap(data, size); - - if ((phys_addr > paddr) && (phys_addr < (paddr + len))) - return true; -diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c -index 557f0fe25dff4..37db264866b64 100644 ---- a/arch/x86/mm/kaslr.c -+++ b/arch/x86/mm/kaslr.c -@@ -172,10 +172,10 @@ void __meminit init_trampoline_kaslr(void) - set_p4d(p4d_tramp, - __p4d(_KERNPG_TABLE | __pa(pud_page_tramp))); - -- set_pgd(&trampoline_pgd_entry, -- __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp))); -+ trampoline_pgd_entry = -+ __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)); - } else { -- set_pgd(&trampoline_pgd_entry, -- __pgd(_KERNPG_TABLE | __pa(pud_page_tramp))); -+ trampoline_pgd_entry = -+ __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)); - } - } -diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c -index ff08dc4636347..e29b1418d00c7 100644 ---- a/arch/x86/mm/mem_encrypt.c -+++ b/arch/x86/mm/mem_encrypt.c -@@ -20,6 +20,7 @@ - #include - #include - #include -+#include - - #include - #include -diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S -index 17d292b7072f1..9de3d900bc927 100644 ---- a/arch/x86/mm/mem_encrypt_boot.S -+++ b/arch/x86/mm/mem_encrypt_boot.S -@@ -65,7 +65,10 @@ SYM_FUNC_START(sme_encrypt_execute) - movq %rbp, %rsp /* Restore original stack pointer */ - pop %rbp - -+ /* Offset to __x86_return_thunk would be wrong here */ -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - SYM_FUNC_END(sme_encrypt_execute) - - SYM_FUNC_START(__enc_copy) -@@ -151,6 +154,9 @@ SYM_FUNC_START(__enc_copy) - pop %r12 - pop %r15 - -+ /* Offset to __x86_return_thunk would be wrong here */ -+ ANNOTATE_UNRET_SAFE - ret -+ int3 - .L__enc_copy_end: - SYM_FUNC_END(__enc_copy) -diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c -index 470b202084306..c7e9fb1d830d2 100644 ---- a/arch/x86/mm/mem_encrypt_identity.c -+++ b/arch/x86/mm/mem_encrypt_identity.c -@@ -27,6 +27,15 @@ - #undef CONFIG_PARAVIRT_XXL - #undef CONFIG_PARAVIRT_SPINLOCKS - -+/* -+ * This code runs before CPU feature bits are set. By default, the -+ * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if -+ * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5 -+ * is provided to handle this situation and, instead, use a variable that -+ * has been set by the early boot code. -+ */ -+#define USE_EARLY_PGTABLE_L5 -+ - #include - #include - #include -@@ -576,7 +585,8 @@ void __init sme_enable(struct boot_params *bp) - cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | - ((u64)bp->ext_cmd_line_ptr << 32)); - -- cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); -+ if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0) -+ return; - - if (!strncmp(buffer, cmdline_on, sizeof(buffer))) - sme_me_mask = me_mask; -diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c -index 1e9b93b088dbf..e360c6892a584 100644 ---- a/arch/x86/mm/numa.c -+++ b/arch/x86/mm/numa.c -@@ -860,7 +860,7 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable) - return; - } - mask = node_to_cpumask_map[node]; -- if (!mask) { -+ if (!cpumask_available(mask)) { - pr_err("node_to_cpumask_map[%i] NULL\n", node); - dump_stack(); - return; -@@ -906,7 +906,7 @@ const struct cpumask *cpumask_of_node(int node) - dump_stack(); - return cpu_none_mask; - } -- if (node_to_cpumask_map[node] == NULL) { -+ if (!cpumask_available(node_to_cpumask_map[node])) { - printk(KERN_WARNING - "cpumask_of_node(%d): no node_to_cpumask_map!\n", - node); -diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c -index 4ba2a3ee4bce1..d5ef64ddd35e9 100644 ---- a/arch/x86/mm/pat/memtype.c -+++ b/arch/x86/mm/pat/memtype.c -@@ -101,7 +101,7 @@ int pat_debug_enable; - static int __init pat_debug_setup(char *str) - { - pat_debug_enable = 1; -- return 0; -+ return 1; - } - __setup("debugpat", pat_debug_setup); - -diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c -index 59ba2968af1b3..511172d70825c 100644 ---- a/arch/x86/mm/tlb.c -+++ b/arch/x86/mm/tlb.c -@@ -854,13 +854,11 @@ done: - nr_invalidate); - } - --static bool tlb_is_not_lazy(int cpu) -+static bool tlb_is_not_lazy(int cpu, void *data) - { - return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu); - } - --static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); -- - DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); - EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared); - -@@ -889,36 +887,11 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask, - * up on the new contents of what used to be page tables, while - * doing a speculative memory access. - */ -- if (info->freed_tables) { -+ if (info->freed_tables) - on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true); -- } else { -- /* -- * Although we could have used on_each_cpu_cond_mask(), -- * open-coding it has performance advantages, as it eliminates -- * the need for indirect calls or retpolines. In addition, it -- * allows to use a designated cpumask for evaluating the -- * condition, instead of allocating one. -- * -- * This code works under the assumption that there are no nested -- * TLB flushes, an assumption that is already made in -- * flush_tlb_mm_range(). -- * -- * cond_cpumask is logically a stack-local variable, but it is -- * more efficient to have it off the stack and not to allocate -- * it on demand. Preemption is disabled and this code is -- * non-reentrant. -- */ -- struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); -- int cpu; -- -- cpumask_clear(cond_cpumask); -- -- for_each_cpu(cpu, cpumask) { -- if (tlb_is_not_lazy(cpu)) -- __cpumask_set_cpu(cpu, cond_cpumask); -- } -- on_each_cpu_mask(cond_cpumask, flush_tlb_func, (void *)info, true); -- } -+ else -+ on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func, -+ (void *)info, 1, cpumask); - } - - void flush_tlb_multi(const struct cpumask *cpumask, -diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c -index 9ea57389c554b..dccaab2113f93 100644 ---- a/arch/x86/net/bpf_jit_comp.c -+++ b/arch/x86/net/bpf_jit_comp.c -@@ -15,7 +15,6 @@ - #include - #include - #include --#include - - static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) - { -@@ -225,6 +224,14 @@ static void jit_fill_hole(void *area, unsigned int size) - - struct jit_context { - int cleanup_addr; /* Epilogue code offset */ -+ -+ /* -+ * Program specific offsets of labels in the code; these rely on the -+ * JIT doing at least 2 passes, recording the position on the first -+ * pass, only to generate the correct offset on the second pass. -+ */ -+ int tail_call_direct_label; -+ int tail_call_indirect_label; - }; - - /* Maximum number of bytes emitted while JITing one eBPF insn */ -@@ -380,20 +387,38 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, - return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true); - } - --static int get_pop_bytes(bool *callee_regs_used) -+#define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) -+ -+static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) - { -- int bytes = 0; -+ u8 *prog = *pprog; - -- if (callee_regs_used[3]) -- bytes += 2; -- if (callee_regs_used[2]) -- bytes += 2; -- if (callee_regs_used[1]) -- bytes += 2; -- if (callee_regs_used[0]) -- bytes += 1; -+#ifdef CONFIG_RETPOLINE -+ if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { -+ EMIT_LFENCE(); -+ EMIT2(0xFF, 0xE0 + reg); -+ } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { -+ emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); -+ } else -+#endif -+ EMIT2(0xFF, 0xE0 + reg); -+ -+ *pprog = prog; -+} -+ -+static void emit_return(u8 **pprog, u8 *ip) -+{ -+ u8 *prog = *pprog; -+ -+ if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { -+ emit_jump(&prog, &__x86_return_thunk, ip); -+ } else { -+ EMIT1(0xC3); /* ret */ -+ if (IS_ENABLED(CONFIG_SLS)) -+ EMIT1(0xCC); /* int3 */ -+ } - -- return bytes; -+ *pprog = prog; - } - - /* -@@ -411,29 +436,12 @@ static int get_pop_bytes(bool *callee_regs_used) - * out: - */ - static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, -- u32 stack_depth) -+ u32 stack_depth, u8 *ip, -+ struct jit_context *ctx) - { - int tcc_off = -4 - round_up(stack_depth, 8); -- u8 *prog = *pprog; -- int pop_bytes = 0; -- int off1 = 42; -- int off2 = 31; -- int off3 = 9; -- -- /* count the additional bytes used for popping callee regs from stack -- * that need to be taken into account for each of the offsets that -- * are used for bailing out of the tail call -- */ -- pop_bytes = get_pop_bytes(callee_regs_used); -- off1 += pop_bytes; -- off2 += pop_bytes; -- off3 += pop_bytes; -- -- if (stack_depth) { -- off1 += 7; -- off2 += 7; -- off3 += 7; -- } -+ u8 *prog = *pprog, *start = *pprog; -+ int offset; - - /* - * rdi - pointer to ctx -@@ -448,8 +456,9 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, - EMIT2(0x89, 0xD2); /* mov edx, edx */ - EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ - offsetof(struct bpf_array, map.max_entries)); --#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */ -- EMIT2(X86_JBE, OFFSET1); /* jbe out */ -+ -+ offset = ctx->tail_call_indirect_label - (prog + 2 - start); -+ EMIT2(X86_JBE, offset); /* jbe out */ - - /* - * if (tail_call_cnt > MAX_TAIL_CALL_CNT) -@@ -457,8 +466,9 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, - */ - EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ - EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ --#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE) -- EMIT2(X86_JA, OFFSET2); /* ja out */ -+ -+ offset = ctx->tail_call_indirect_label - (prog + 2 - start); -+ EMIT2(X86_JA, offset); /* ja out */ - EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ - -@@ -471,12 +481,11 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, - * goto out; - */ - EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ --#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE) -- EMIT2(X86_JE, OFFSET3); /* je out */ - -- *pprog = prog; -- pop_callee_regs(pprog, callee_regs_used); -- prog = *pprog; -+ offset = ctx->tail_call_indirect_label - (prog + 2 - start); -+ EMIT2(X86_JE, offset); /* je out */ -+ -+ pop_callee_regs(&prog, callee_regs_used); - - EMIT1(0x58); /* pop rax */ - if (stack_depth) -@@ -493,41 +502,21 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used, - * rdi == ctx (1st arg) - * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET - */ -- RETPOLINE_RCX_BPF_JIT(); -+ emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); - - /* out: */ -+ ctx->tail_call_indirect_label = prog - start; - *pprog = prog; - } - - static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, -- u8 **pprog, int addr, u8 *image, -- bool *callee_regs_used, u32 stack_depth) -+ u8 **pprog, u8 *ip, -+ bool *callee_regs_used, u32 stack_depth, -+ struct jit_context *ctx) - { - int tcc_off = -4 - round_up(stack_depth, 8); -- u8 *prog = *pprog; -- int pop_bytes = 0; -- int off1 = 20; -- int poke_off; -- -- /* count the additional bytes used for popping callee regs to stack -- * that need to be taken into account for jump offset that is used for -- * bailing out from of the tail call when limit is reached -- */ -- pop_bytes = get_pop_bytes(callee_regs_used); -- off1 += pop_bytes; -- -- /* -- * total bytes for: -- * - nop5/ jmpq $off -- * - pop callee regs -- * - sub rsp, $val if depth > 0 -- * - pop rax -- */ -- poke_off = X86_PATCH_SIZE + pop_bytes + 1; -- if (stack_depth) { -- poke_off += 7; -- off1 += 7; -- } -+ u8 *prog = *pprog, *start = *pprog; -+ int offset; - - /* - * if (tail_call_cnt > MAX_TAIL_CALL_CNT) -@@ -535,28 +524,30 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, - */ - EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ - EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -- EMIT2(X86_JA, off1); /* ja out */ -+ -+ offset = ctx->tail_call_direct_label - (prog + 2 - start); -+ EMIT2(X86_JA, offset); /* ja out */ - EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ - -- poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE); -+ poke->tailcall_bypass = ip + (prog - start); - poke->adj_off = X86_TAIL_CALL_OFFSET; -- poke->tailcall_target = image + (addr - X86_PATCH_SIZE); -+ poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; - poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; - - emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, - poke->tailcall_bypass); - -- *pprog = prog; -- pop_callee_regs(pprog, callee_regs_used); -- prog = *pprog; -+ pop_callee_regs(&prog, callee_regs_used); - EMIT1(0x58); /* pop rax */ - if (stack_depth) - EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); - - memcpy(prog, x86_nops[5], X86_PATCH_SIZE); - prog += X86_PATCH_SIZE; -+ - /* out: */ -+ ctx->tail_call_direct_label = prog - start; - - *pprog = prog; - } -@@ -721,6 +712,20 @@ static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) - *pprog = prog; - } - -+/* -+ * Similar version of maybe_emit_mod() for a single register -+ */ -+static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) -+{ -+ u8 *prog = *pprog; -+ -+ if (is64) -+ EMIT1(add_1mod(0x48, reg)); -+ else if (is_ereg(reg)) -+ EMIT1(add_1mod(0x40, reg)); -+ *pprog = prog; -+} -+ - /* LDX: dst_reg = *(u8*)(src_reg + off) */ - static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) - { -@@ -827,9 +832,7 @@ static int emit_atomic(u8 **pprog, u8 atomic_op, - return 0; - } - --static bool ex_handler_bpf(const struct exception_table_entry *x, -- struct pt_regs *regs, int trapnr, -- unsigned long error_code, unsigned long fault_addr) -+bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) - { - u32 reg = x->fixup >> 8; - -@@ -951,10 +954,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, - /* neg dst */ - case BPF_ALU | BPF_NEG: - case BPF_ALU64 | BPF_NEG: -- if (BPF_CLASS(insn->code) == BPF_ALU64) -- EMIT1(add_1mod(0x48, dst_reg)); -- else if (is_ereg(dst_reg)) -- EMIT1(add_1mod(0x40, dst_reg)); -+ maybe_emit_1mod(&prog, dst_reg, -+ BPF_CLASS(insn->code) == BPF_ALU64); - EMIT2(0xF7, add_1reg(0xD8, dst_reg)); - break; - -@@ -968,10 +969,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, - case BPF_ALU64 | BPF_AND | BPF_K: - case BPF_ALU64 | BPF_OR | BPF_K: - case BPF_ALU64 | BPF_XOR | BPF_K: -- if (BPF_CLASS(insn->code) == BPF_ALU64) -- EMIT1(add_1mod(0x48, dst_reg)); -- else if (is_ereg(dst_reg)) -- EMIT1(add_1mod(0x40, dst_reg)); -+ maybe_emit_1mod(&prog, dst_reg, -+ BPF_CLASS(insn->code) == BPF_ALU64); - - /* - * b3 holds 'normal' opcode, b2 short form only valid -@@ -1112,10 +1111,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, - case BPF_ALU64 | BPF_LSH | BPF_K: - case BPF_ALU64 | BPF_RSH | BPF_K: - case BPF_ALU64 | BPF_ARSH | BPF_K: -- if (BPF_CLASS(insn->code) == BPF_ALU64) -- EMIT1(add_1mod(0x48, dst_reg)); -- else if (is_ereg(dst_reg)) -- EMIT1(add_1mod(0x40, dst_reg)); -+ maybe_emit_1mod(&prog, dst_reg, -+ BPF_CLASS(insn->code) == BPF_ALU64); - - b3 = simple_alu_opcodes[BPF_OP(insn->code)]; - if (imm32 == 1) -@@ -1146,10 +1143,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, - } - - /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ -- if (BPF_CLASS(insn->code) == BPF_ALU64) -- EMIT1(add_1mod(0x48, dst_reg)); -- else if (is_ereg(dst_reg)) -- EMIT1(add_1mod(0x40, dst_reg)); -+ maybe_emit_1mod(&prog, dst_reg, -+ BPF_CLASS(insn->code) == BPF_ALU64); - - b3 = simple_alu_opcodes[BPF_OP(insn->code)]; - EMIT2(0xD3, add_1reg(b3, dst_reg)); -@@ -1222,8 +1217,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, - /* speculation barrier */ - case BPF_ST | BPF_NOSPEC: - if (boot_cpu_has(X86_FEATURE_XMM2)) -- /* Emit 'lfence' */ -- EMIT3(0x0F, 0xAE, 0xE8); -+ EMIT_LFENCE(); - break; - - /* ST: *(u8*)(dst_reg + off) = imm */ -@@ -1274,19 +1268,54 @@ st: if (is_imm8(insn->off)) - case BPF_LDX | BPF_MEM | BPF_DW: - case BPF_LDX | BPF_PROBE_MEM | BPF_DW: - if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { -- /* test src_reg, src_reg */ -- maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */ -- EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg)); -- /* jne start_of_ldx */ -- EMIT2(X86_JNE, 0); -+ /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM -+ * add abs(insn->off) to the limit to make sure that negative -+ * offset won't be an issue. -+ * insn->off is s16, so it won't affect valid pointers. -+ */ -+ u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off); -+ u8 *end_of_jmp1, *end_of_jmp2; -+ -+ /* Conservatively check that src_reg + insn->off is a kernel address: -+ * 1. src_reg + insn->off >= limit -+ * 2. src_reg + insn->off doesn't become small positive. -+ * Cannot do src_reg + insn->off >= limit in one branch, -+ * since it needs two spare registers, but JIT has only one. -+ */ -+ -+ /* movabsq r11, limit */ -+ EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); -+ EMIT((u32)limit, 4); -+ EMIT(limit >> 32, 4); -+ /* cmp src_reg, r11 */ -+ maybe_emit_mod(&prog, src_reg, AUX_REG, true); -+ EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); -+ /* if unsigned '<' goto end_of_jmp2 */ -+ EMIT2(X86_JB, 0); -+ end_of_jmp1 = prog; -+ -+ /* mov r11, src_reg */ -+ emit_mov_reg(&prog, true, AUX_REG, src_reg); -+ /* add r11, insn->off */ -+ maybe_emit_1mod(&prog, AUX_REG, true); -+ EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); -+ /* jmp if not carry to start_of_ldx -+ * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr -+ * that has to be rejected. -+ */ -+ EMIT2(0x73 /* JNC */, 0); -+ end_of_jmp2 = prog; -+ - /* xor dst_reg, dst_reg */ - emit_mov_imm32(&prog, false, dst_reg, 0); - /* jmp byte_after_ldx */ - EMIT2(0xEB, 0); - -- /* populate jmp_offset for JNE above */ -- temp[4] = prog - temp - 5 /* sizeof(test + jne) */; -+ /* populate jmp_offset for JB above to jump to xor dst_reg */ -+ end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1; -+ /* populate jmp_offset for JNC above to jump to start_of_ldx */ - start_of_ldx = prog; -+ end_of_jmp2[-1] = start_of_ldx - end_of_jmp2; - } - emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); - if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { -@@ -1313,12 +1342,7 @@ st: if (is_imm8(insn->off)) - } - ex->insn = delta; - -- delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler; -- if (!is_simm32(delta)) { -- pr_err("extable->handler doesn't fit into 32-bit\n"); -- return -EFAULT; -- } -- ex->handler = delta; -+ ex->data = EX_TYPE_BPF; - - if (dst_reg > BPF_REG_9) { - pr_err("verifier error\n"); -@@ -1332,7 +1356,7 @@ st: if (is_imm8(insn->off)) - * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" - * of 4 bytes will be ignored and rbx will be zero inited. - */ -- ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8); -+ ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); - } - break; - -@@ -1399,8 +1423,9 @@ st: if (is_imm8(insn->off)) - case BPF_JMP | BPF_CALL: - func = (u8 *) __bpf_call_base + imm32; - if (tail_call_reachable) { -+ /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ - EMIT3_off32(0x48, 0x8B, 0x85, -- -(bpf_prog->aux->stack_depth + 8)); -+ -round_up(bpf_prog->aux->stack_depth, 8) - 8); - if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) - return -EINVAL; - } else { -@@ -1412,13 +1437,16 @@ st: if (is_imm8(insn->off)) - case BPF_JMP | BPF_TAIL_CALL: - if (imm32) - emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1], -- &prog, addrs[i], image, -+ &prog, image + addrs[i - 1], - callee_regs_used, -- bpf_prog->aux->stack_depth); -+ bpf_prog->aux->stack_depth, -+ ctx); - else - emit_bpf_tail_call_indirect(&prog, - callee_regs_used, -- bpf_prog->aux->stack_depth); -+ bpf_prog->aux->stack_depth, -+ image + addrs[i - 1], -+ ctx); - break; - - /* cond jump */ -@@ -1459,10 +1487,8 @@ st: if (is_imm8(insn->off)) - case BPF_JMP | BPF_JSET | BPF_K: - case BPF_JMP32 | BPF_JSET | BPF_K: - /* test dst_reg, imm32 */ -- if (BPF_CLASS(insn->code) == BPF_JMP) -- EMIT1(add_1mod(0x48, dst_reg)); -- else if (is_ereg(dst_reg)) -- EMIT1(add_1mod(0x40, dst_reg)); -+ maybe_emit_1mod(&prog, dst_reg, -+ BPF_CLASS(insn->code) == BPF_JMP); - EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); - goto emit_cond_jmp; - -@@ -1495,10 +1521,8 @@ st: if (is_imm8(insn->off)) - } - - /* cmp dst_reg, imm8/32 */ -- if (BPF_CLASS(insn->code) == BPF_JMP) -- EMIT1(add_1mod(0x48, dst_reg)); -- else if (is_ereg(dst_reg)) -- EMIT1(add_1mod(0x40, dst_reg)); -+ maybe_emit_1mod(&prog, dst_reg, -+ BPF_CLASS(insn->code) == BPF_JMP); - - if (is_imm8(imm32)) - EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); -@@ -1665,7 +1689,7 @@ emit_jmp: - ctx->cleanup_addr = proglen; - pop_callee_regs(&prog, callee_regs_used); - EMIT1(0xC9); /* leave */ -- EMIT1(0xC3); /* ret */ -+ emit_return(&prog, image + addrs[i - 1] + (prog - temp)); - break; - - default: -@@ -2111,7 +2135,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i - if (flags & BPF_TRAMP_F_SKIP_FRAME) - /* skip our return address and return to parent */ - EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ -- EMIT1(0xC3); /* ret */ -+ emit_return(&prog, prog); - /* Make sure the trampoline generation logic doesn't overflow */ - if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { - ret = -EFAULT; -@@ -2124,24 +2148,6 @@ cleanup: - return ret; - } - --static int emit_fallback_jump(u8 **pprog) --{ -- u8 *prog = *pprog; -- int err = 0; -- --#ifdef CONFIG_RETPOLINE -- /* Note that this assumes the the compiler uses external -- * thunks for indirect calls. Both clang and GCC use the same -- * naming convention for external thunks. -- */ -- err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog); --#else -- EMIT2(0xFF, 0xE2); /* jmp rdx */ --#endif -- *pprog = prog; -- return err; --} -- - static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) - { - u8 *jg_reloc, *prog = *pprog; -@@ -2163,9 +2169,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs) - if (err) - return err; - -- err = emit_fallback_jump(&prog); /* jmp thunk/indirect */ -- if (err) -- return err; -+ emit_indirect_jump(&prog, 2 /* rdx */, prog); - - *pprog = prog; - return 0; -diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c -index 3bfda5f502cb8..da9b7cfa46329 100644 ---- a/arch/x86/net/bpf_jit_comp32.c -+++ b/arch/x86/net/bpf_jit_comp32.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - - /* -@@ -1267,6 +1268,21 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth) - *pprog = prog; - } - -+static int emit_jmp_edx(u8 **pprog, u8 *ip) -+{ -+ u8 *prog = *pprog; -+ int cnt = 0; -+ -+#ifdef CONFIG_RETPOLINE -+ EMIT1_off32(0xE9, (u8 *)__x86_indirect_thunk_edx - (ip + 5)); -+#else -+ EMIT2(0xFF, 0xE2); -+#endif -+ *pprog = prog; -+ -+ return cnt; -+} -+ - /* - * Generate the following code: - * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... -@@ -1280,7 +1296,7 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth) - * goto *(prog->bpf_func + prologue_size); - * out: - */ --static void emit_bpf_tail_call(u8 **pprog) -+static void emit_bpf_tail_call(u8 **pprog, u8 *ip) - { - u8 *prog = *pprog; - int cnt = 0; -@@ -1362,7 +1378,7 @@ static void emit_bpf_tail_call(u8 **pprog) - * eax == ctx (1st arg) - * edx == prog->bpf_func + prologue_size - */ -- RETPOLINE_EDX_BPF_JIT(); -+ cnt += emit_jmp_edx(&prog, ip + cnt); - - if (jmp_label1 == -1) - jmp_label1 = cnt; -@@ -2122,7 +2138,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, - break; - } - case BPF_JMP | BPF_TAIL_CALL: -- emit_bpf_tail_call(&prog); -+ emit_bpf_tail_call(&prog, image + addrs[i - 1]); - break; - - /* cond jump */ -diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c -index 2edd86649468f..bf5161dcf89e7 100644 ---- a/arch/x86/pci/fixup.c -+++ b/arch/x86/pci/fixup.c -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -353,8 +354,8 @@ static void pci_fixup_video(struct pci_dev *pdev) - } - } - } --DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, -- PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); -+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, -+ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); - - - static const struct dmi_system_id msi_k8t_dmi_table[] = { -@@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev) - DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma); - - #endif -+ -+#ifdef CONFIG_AMD_NB -+ -+#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008 -+#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L -+ -+static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev) -+{ -+ u32 data; -+ -+ if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) { -+ data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK; -+ if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data)) -+ pci_err(dev, "Failed to write data 0x%x\n", data); -+ } else { -+ pci_err(dev, "Failed to read data\n"); -+ } -+} -+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0); -+#endif -diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c -index 97b63e35e1528..21c4bc41741fe 100644 ---- a/arch/x86/pci/irq.c -+++ b/arch/x86/pci/irq.c -@@ -253,6 +253,15 @@ static void write_pc_conf_nybble(u8 base, u8 index, u8 val) - pc_conf_set(reg, x); - } - -+/* -+ * FinALi pirq rules are as follows: -+ * -+ * - bit 0 selects between INTx Routing Table Mapping Registers, -+ * -+ * - bit 3 selects the nibble within the INTx Routing Table Mapping Register, -+ * -+ * - bits 7:4 map to bits 3:0 of the PCI INTx Sensitivity Register. -+ */ - static int pirq_finali_get(struct pci_dev *router, struct pci_dev *dev, - int pirq) - { -@@ -260,11 +269,13 @@ static int pirq_finali_get(struct pci_dev *router, struct pci_dev *dev, - 0, 9, 3, 10, 4, 5, 7, 6, 0, 11, 0, 12, 0, 14, 0, 15 - }; - unsigned long flags; -+ u8 index; - u8 x; - -+ index = (pirq & 1) << 1 | (pirq & 8) >> 3; - raw_spin_lock_irqsave(&pc_conf_lock, flags); - pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY); -- x = irqmap[read_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, pirq - 1)]; -+ x = irqmap[read_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, index)]; - pc_conf_set(PC_CONF_FINALI_LOCK, 0); - raw_spin_unlock_irqrestore(&pc_conf_lock, flags); - return x; -@@ -278,13 +289,15 @@ static int pirq_finali_set(struct pci_dev *router, struct pci_dev *dev, - }; - u8 val = irqmap[irq]; - unsigned long flags; -+ u8 index; - - if (!val) - return 0; - -+ index = (pirq & 1) << 1 | (pirq & 8) >> 3; - raw_spin_lock_irqsave(&pc_conf_lock, flags); - pc_conf_set(PC_CONF_FINALI_LOCK, PC_CONF_FINALI_LOCK_KEY); -- write_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, pirq - 1, val); -+ write_pc_conf_nybble(PC_CONF_FINALI_PCI_INTX_RT1, index, val); - pc_conf_set(PC_CONF_FINALI_LOCK, 0); - raw_spin_unlock_irqrestore(&pc_conf_lock, flags); - return 1; -@@ -293,7 +306,7 @@ static int pirq_finali_set(struct pci_dev *router, struct pci_dev *dev, - static int pirq_finali_lvl(struct pci_dev *router, struct pci_dev *dev, - int pirq, int irq) - { -- u8 mask = ~(1u << (pirq - 1)); -+ u8 mask = ~((pirq & 0xf0u) >> 4); - unsigned long flags; - u8 trig; - -diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c -index 5debe4ac6f819..f153e9ab8c966 100644 ---- a/arch/x86/pci/xen.c -+++ b/arch/x86/pci/xen.c -@@ -472,7 +472,6 @@ static __init void xen_setup_pci_msi(void) - xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs; - } - xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs; -- pci_msi_ignore_mask = 1; - } else if (xen_hvm_domain()) { - xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs; - xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs; -@@ -486,6 +485,11 @@ static __init void xen_setup_pci_msi(void) - * in allocating the native domain and never use it. - */ - x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain; -+ /* -+ * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely -+ * controlled by the hypervisor. -+ */ -+ pci_msi_ignore_mask = 1; - } - - #else /* CONFIG_PCI_MSI */ -diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S -index 09ec84f6ef517..f3cfdb1c9a359 100644 ---- a/arch/x86/platform/efi/efi_stub_32.S -+++ b/arch/x86/platform/efi/efi_stub_32.S -@@ -56,5 +56,5 @@ SYM_FUNC_START(efi_call_svam) - - movl 16(%esp), %ebx - leave -- ret -+ RET - SYM_FUNC_END(efi_call_svam) -diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S -index 90380a17ab238..2206b8bc47b8a 100644 ---- a/arch/x86/platform/efi/efi_stub_64.S -+++ b/arch/x86/platform/efi/efi_stub_64.S -@@ -23,5 +23,5 @@ SYM_FUNC_START(__efi_call) - mov %rsi, %rcx - CALL_NOSPEC rdi - leave -- ret -+ RET - SYM_FUNC_END(__efi_call) -diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S -index fd3dd1708eba5..a7ffe30e86143 100644 ---- a/arch/x86/platform/efi/efi_thunk_64.S -+++ b/arch/x86/platform/efi/efi_thunk_64.S -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - - .text - .code64 -@@ -63,7 +64,9 @@ SYM_CODE_START(__efi64_thunk) - 1: movq 24(%rsp), %rsp - pop %rbx - pop %rbp -- retq -+ ANNOTATE_UNRET_SAFE -+ ret -+ int3 - - .code32 - 2: pushl $__KERNEL_CS -diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c -index b15ebfe40a73e..b0b848d6933af 100644 ---- a/arch/x86/platform/efi/quirks.c -+++ b/arch/x86/platform/efi/quirks.c -@@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) - return; - } - -- new = early_memremap(data.phys_map, data.size); -+ new = early_memremap_prot(data.phys_map, data.size, -+ pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL))); - if (!new) { - pr_err("Failed to map new boot services memmap\n"); - return; -diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c -index f03a6883dcc6d..89f25af4b3c33 100644 ---- a/arch/x86/platform/olpc/olpc-xo1-sci.c -+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c -@@ -80,7 +80,7 @@ static void send_ebook_state(void) - return; - } - -- if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state) -+ if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state) - return; /* Nothing new to report. */ - - input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state); -diff --git a/arch/x86/platform/olpc/xo1-wakeup.S b/arch/x86/platform/olpc/xo1-wakeup.S -index 75f4faff84682..3a5abffe5660d 100644 ---- a/arch/x86/platform/olpc/xo1-wakeup.S -+++ b/arch/x86/platform/olpc/xo1-wakeup.S -@@ -77,7 +77,7 @@ save_registers: - pushfl - popl saved_context_eflags - -- ret -+ RET - - restore_registers: - movl saved_context_ebp, %ebp -@@ -88,7 +88,7 @@ restore_registers: - pushl saved_context_eflags - popfl - -- ret -+ RET - - SYM_CODE_START(do_olpc_suspend_lowlevel) - call save_processor_state -@@ -109,7 +109,7 @@ ret_point: - - call restore_registers - call restore_processor_state -- ret -+ RET - SYM_CODE_END(do_olpc_suspend_lowlevel) - - .data -diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c -index 6665f88020983..f5133d620d4ef 100644 ---- a/arch/x86/power/cpu.c -+++ b/arch/x86/power/cpu.c -@@ -25,6 +25,7 @@ - #include - #include - #include -+#include - - #ifdef CONFIG_X86_32 - __visible unsigned long saved_context_ebx; -@@ -40,7 +41,8 @@ static void msr_save_context(struct saved_context *ctxt) - struct saved_msr *end = msr + ctxt->saved_msrs.num; - - while (msr < end) { -- msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); -+ if (msr->valid) -+ rdmsrl(msr->info.msr_no, msr->info.reg.q); - msr++; - } - } -@@ -261,11 +263,18 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) - x86_platform.restore_sched_clock_state(); - mtrr_bp_restore(); - perf_restore_debug_store(); -- msr_restore_context(ctxt); - - c = &cpu_data(smp_processor_id()); - if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) - init_ia32_feat_ctl(c); -+ -+ microcode_bsp_resume(); -+ -+ /* -+ * This needs to happen after the microcode has been updated upon resume -+ * because some of the MSRs are "emulated" in microcode. -+ */ -+ msr_restore_context(ctxt); - } - - /* Needed by apm.c */ -@@ -424,8 +433,10 @@ static int msr_build_context(const u32 *msr_id, const int num) - } - - for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { -+ u64 dummy; -+ - msr_array[i].info.msr_no = msr_id[j]; -- msr_array[i].valid = false; -+ msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy); - msr_array[i].info.reg.q = 0; - } - saved_msrs->num = total_num; -@@ -500,10 +511,32 @@ static int pm_cpu_check(const struct x86_cpu_id *c) - return ret; - } - -+static void pm_save_spec_msr(void) -+{ -+ struct msr_enumeration { -+ u32 msr_no; -+ u32 feature; -+ } msr_enum[] = { -+ { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL }, -+ { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL }, -+ { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT }, -+ { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL }, -+ { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD }, -+ { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC }, -+ }; -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(msr_enum); i++) { -+ if (boot_cpu_has(msr_enum[i].feature)) -+ msr_build_context(&msr_enum[i].msr_no, 1); -+ } -+} -+ - static int pm_check_save_msr(void) - { - dmi_check_system(msr_save_dmi_table); - pm_cpu_check(msr_save_cpu_table); -+ pm_save_spec_msr(); - - return 0; - } -diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S -index 8786653ad3c06..5606a15cf9a17 100644 ---- a/arch/x86/power/hibernate_asm_32.S -+++ b/arch/x86/power/hibernate_asm_32.S -@@ -32,7 +32,7 @@ SYM_FUNC_START(swsusp_arch_suspend) - FRAME_BEGIN - call swsusp_save - FRAME_END -- ret -+ RET - SYM_FUNC_END(swsusp_arch_suspend) - - SYM_CODE_START(restore_image) -@@ -108,5 +108,5 @@ SYM_FUNC_START(restore_registers) - /* tell the hibernation core that we've just restored the memory */ - movl %eax, in_suspend - -- ret -+ RET - SYM_FUNC_END(restore_registers) -diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S -index d9bed596d849c..0a0539e1cc814 100644 ---- a/arch/x86/power/hibernate_asm_64.S -+++ b/arch/x86/power/hibernate_asm_64.S -@@ -66,7 +66,7 @@ SYM_FUNC_START(restore_registers) - /* tell the hibernation core that we've just restored the memory */ - movq %rax, in_suspend(%rip) - -- ret -+ RET - SYM_FUNC_END(restore_registers) - - SYM_FUNC_START(swsusp_arch_suspend) -@@ -96,7 +96,7 @@ SYM_FUNC_START(swsusp_arch_suspend) - FRAME_BEGIN - call swsusp_save - FRAME_END -- ret -+ RET - SYM_FUNC_END(swsusp_arch_suspend) - - SYM_FUNC_START(restore_image) -diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile -index 95ea17a9d20cb..dc0b91c1db04b 100644 ---- a/arch/x86/purgatory/Makefile -+++ b/arch/x86/purgatory/Makefile -@@ -14,6 +14,11 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE - - CFLAGS_sha256.o := -D__DISABLE_EXPORTS - -+# When profile-guided optimization is enabled, llvm emits two different -+# overlapping text sections, which is not supported by kexec. Remove profile -+# optimization flags. -+KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS)) -+ - # When linking purgatory.ro with -r unresolved symbols are not checked, - # also link a purgatory.chk binary without -r to check for unresolved symbols. - PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib -@@ -64,8 +69,7 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS) - CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE) - CFLAGS_string.o += $(PURGATORY_CFLAGS) - --AFLAGS_REMOVE_setup-x86_$(BITS).o += -Wa,-gdwarf-2 --AFLAGS_REMOVE_entry64.o += -Wa,-gdwarf-2 -+asflags-remove-y += -g -Wa,-gdwarf-2 - - $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE - $(call if_changed,ld) -diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c -index 31b5856010cba..1d20ed4b28729 100644 ---- a/arch/x86/realmode/init.c -+++ b/arch/x86/realmode/init.c -@@ -17,6 +17,32 @@ u32 *trampoline_cr4_features; - /* Hold the pgd entry used on booting additional CPUs */ - pgd_t trampoline_pgd_entry; - -+void load_trampoline_pgtable(void) -+{ -+#ifdef CONFIG_X86_32 -+ load_cr3(initial_page_table); -+#else -+ /* -+ * This function is called before exiting to real-mode and that will -+ * fail with CR4.PCIDE still set. -+ */ -+ if (boot_cpu_has(X86_FEATURE_PCID)) -+ cr4_clear_bits(X86_CR4_PCIDE); -+ -+ write_cr3(real_mode_header->trampoline_pgd); -+#endif -+ -+ /* -+ * The CR3 write above will not flush global TLB entries. -+ * Stale, global entries from previous page tables may still be -+ * present. Flush those stale entries. -+ * -+ * This ensures that memory accessed while running with -+ * trampoline_pgd is *actually* mapped into trampoline_pgd. -+ */ -+ __flush_tlb_all(); -+} -+ - void __init reserve_real_mode(void) - { - phys_addr_t mem; -@@ -72,6 +98,7 @@ static void __init setup_real_mode(void) - #ifdef CONFIG_X86_64 - u64 *trampoline_pgd; - u64 efer; -+ int i; - #endif - - base = (unsigned char *)real_mode_header; -@@ -128,8 +155,17 @@ static void __init setup_real_mode(void) - trampoline_header->flags = 0; - - trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); -+ -+ /* Map the real mode stub as virtual == physical */ - trampoline_pgd[0] = trampoline_pgd_entry.pgd; -- trampoline_pgd[511] = init_top_pgt[511].pgd; -+ -+ /* -+ * Include the entirety of the kernel mapping into the trampoline -+ * PGD. This way, all mappings present in the normal kernel page -+ * tables are usable while running on trampoline_pgd. -+ */ -+ for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++) -+ trampoline_pgd[i] = init_top_pgt[i].pgd; - #endif - - sme_sev_setup_real_mode(trampoline_header); -diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile -index 5ccb18290d717..a8591ec8ae682 100644 ---- a/arch/x86/um/Makefile -+++ b/arch/x86/um/Makefile -@@ -28,7 +28,8 @@ else - - obj-y += syscalls_64.o vdso/ - --subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o -+subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o -+subarch-$(CONFIG_PREEMPTION) += ../entry/thunk_64.o - - endif - -diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S -index 13f118dec74f8..aed782ab77213 100644 ---- a/arch/x86/um/checksum_32.S -+++ b/arch/x86/um/checksum_32.S -@@ -110,7 +110,7 @@ csum_partial: - 7: - popl %ebx - popl %esi -- ret -+ RET - - #else - -@@ -208,7 +208,7 @@ csum_partial: - 80: - popl %ebx - popl %esi -- ret -+ RET - - #endif - EXPORT_SYMBOL(csum_partial) -diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c -index 3ee234b6234dd..255a44dd415a9 100644 ---- a/arch/x86/um/ldt.c -+++ b/arch/x86/um/ldt.c -@@ -23,9 +23,11 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func, - { - long res; - void *stub_addr; -+ -+ BUILD_BUG_ON(sizeof(*desc) % sizeof(long)); -+ - res = syscall_stub_data(mm_idp, (unsigned long *)desc, -- (sizeof(*desc) + sizeof(long) - 1) & -- ~(sizeof(long) - 1), -+ sizeof(*desc) / sizeof(long), - addr, &stub_addr); - if (!res) { - unsigned long args[] = { func, -diff --git a/arch/x86/um/setjmp_32.S b/arch/x86/um/setjmp_32.S -index 62eaf8c80e041..2d991ddbcca57 100644 ---- a/arch/x86/um/setjmp_32.S -+++ b/arch/x86/um/setjmp_32.S -@@ -34,7 +34,7 @@ kernel_setjmp: - movl %esi,12(%edx) - movl %edi,16(%edx) - movl %ecx,20(%edx) # Return address -- ret -+ RET - - .size kernel_setjmp,.-kernel_setjmp - -diff --git a/arch/x86/um/setjmp_64.S b/arch/x86/um/setjmp_64.S -index 1b5d40d4ff46d..b46acb6a8ebd8 100644 ---- a/arch/x86/um/setjmp_64.S -+++ b/arch/x86/um/setjmp_64.S -@@ -33,7 +33,7 @@ kernel_setjmp: - movq %r14,40(%rdi) - movq %r15,48(%rdi) - movq %rsi,56(%rdi) # Return address -- ret -+ RET - - .size kernel_setjmp,.-kernel_setjmp - -diff --git a/arch/x86/um/shared/sysdep/syscalls_32.h b/arch/x86/um/shared/sysdep/syscalls_32.h -index 68fd2cf526fd7..f6e9f84397e79 100644 ---- a/arch/x86/um/shared/sysdep/syscalls_32.h -+++ b/arch/x86/um/shared/sysdep/syscalls_32.h -@@ -6,10 +6,9 @@ - #include - #include - --typedef long syscall_handler_t(struct pt_regs); -+typedef long syscall_handler_t(struct syscall_args); - - extern syscall_handler_t *sys_call_table[]; - - #define EXECUTE_SYSCALL(syscall, regs) \ -- ((long (*)(struct syscall_args)) \ -- (*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) -+ ((*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) -diff --git a/arch/x86/um/shared/sysdep/syscalls_64.h b/arch/x86/um/shared/sysdep/syscalls_64.h -index 8a7d5e1da98e5..1e6875b4ffd83 100644 ---- a/arch/x86/um/shared/sysdep/syscalls_64.h -+++ b/arch/x86/um/shared/sysdep/syscalls_64.h -@@ -10,13 +10,12 @@ - #include - #include - --typedef long syscall_handler_t(void); -+typedef long syscall_handler_t(long, long, long, long, long, long); - - extern syscall_handler_t *sys_call_table[]; - - #define EXECUTE_SYSCALL(syscall, regs) \ -- (((long (*)(long, long, long, long, long, long)) \ -- (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(®s->regs), \ -+ (((*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(®s->regs), \ - UPT_SYSCALL_ARG2(®s->regs), \ - UPT_SYSCALL_ARG3(®s->regs), \ - UPT_SYSCALL_ARG4(®s->regs), \ -diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c -index 58f51667e2e4b..8249685b40960 100644 ---- a/arch/x86/um/syscalls_64.c -+++ b/arch/x86/um/syscalls_64.c -@@ -11,6 +11,7 @@ - #include - #include /* XXX This should get the constants from libc */ - #include -+#include - - long arch_prctl(struct task_struct *task, int option, - unsigned long __user *arg2) -@@ -35,7 +36,7 @@ long arch_prctl(struct task_struct *task, int option, - switch (option) { - case ARCH_SET_FS: - case ARCH_SET_GS: -- ret = restore_registers(pid, ¤t->thread.regs.regs); -+ ret = restore_pid_registers(pid, ¤t->thread.regs.regs); - if (ret) - return ret; - break; -diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c -index ac8eee093f9cd..66162eafd8e8f 100644 ---- a/arch/x86/um/tls_32.c -+++ b/arch/x86/um/tls_32.c -@@ -65,9 +65,6 @@ static int get_free_idx(struct task_struct* task) - struct thread_struct *t = &task->thread; - int idx; - -- if (!t->arch.tls_array) -- return GDT_ENTRY_TLS_MIN; -- - for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) - if (!t->arch.tls_array[idx].present) - return idx + GDT_ENTRY_TLS_MIN; -@@ -240,9 +237,6 @@ static int get_tls_entry(struct task_struct *task, struct user_desc *info, - { - struct thread_struct *t = &task->thread; - -- if (!t->arch.tls_array) -- goto clear; -- - if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) - return -EINVAL; - -diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile -index 5943387e3f357..5ca366e15c767 100644 ---- a/arch/x86/um/vdso/Makefile -+++ b/arch/x86/um/vdso/Makefile -@@ -62,7 +62,7 @@ quiet_cmd_vdso = VDSO $@ - -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ - sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' - --VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -+VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -z noexecstack - GCOV_PROFILE := n - - # -diff --git a/arch/x86/um/vdso/um_vdso.c b/arch/x86/um/vdso/um_vdso.c -index 2112b8d146688..ff0f3b4b6c45e 100644 ---- a/arch/x86/um/vdso/um_vdso.c -+++ b/arch/x86/um/vdso/um_vdso.c -@@ -17,8 +17,10 @@ int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts) - { - long ret; - -- asm("syscall" : "=a" (ret) : -- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); -+ asm("syscall" -+ : "=a" (ret) -+ : "0" (__NR_clock_gettime), "D" (clock), "S" (ts) -+ : "rcx", "r11", "memory"); - - return ret; - } -@@ -29,8 +31,10 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) - { - long ret; - -- asm("syscall" : "=a" (ret) : -- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); -+ asm("syscall" -+ : "=a" (ret) -+ : "0" (__NR_gettimeofday), "D" (tv), "S" (tz) -+ : "rcx", "r11", "memory"); - - return ret; - } -diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile -index 4953260e281c3..40b5779fce21c 100644 ---- a/arch/x86/xen/Makefile -+++ b/arch/x86/xen/Makefile -@@ -45,7 +45,7 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o - - obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o - --obj-$(CONFIG_XEN_PV_DOM0) += vga.o -+obj-$(CONFIG_XEN_DOM0) += vga.o - - obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o - -diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c -index a7b7d674f5005..998db0257e2ad 100644 ---- a/arch/x86/xen/enlighten_pv.c -+++ b/arch/x86/xen/enlighten_pv.c -@@ -759,6 +759,7 @@ static void xen_load_idt(const struct desc_ptr *desc) - { - static DEFINE_SPINLOCK(lock); - static struct trap_info traps[257]; -+ static const struct trap_info zero = { }; - unsigned out; - - trace_xen_cpu_load_idt(desc); -@@ -768,7 +769,7 @@ static void xen_load_idt(const struct desc_ptr *desc) - memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); - - out = xen_convert_trap_info(desc, traps, false); -- memset(&traps[out], 0, sizeof(traps[0])); -+ traps[out] = zero; - - xen_mc_flush(); - if (HYPERVISOR_set_trap_table(traps)) -@@ -1352,7 +1353,8 @@ asmlinkage __visible void __init xen_start_kernel(void) - - x86_platform.set_legacy_features = - xen_dom0_set_legacy_features; -- xen_init_vga(info, xen_start_info->console.dom0.info_size); -+ xen_init_vga(info, xen_start_info->console.dom0.info_size, -+ &boot_params.screen_info); - xen_start_info->console.domU.mfn = 0; - xen_start_info->console.domU.evtchn = 0; - -@@ -1364,10 +1366,6 @@ asmlinkage __visible void __init xen_start_kernel(void) - - xen_acpi_sleep_register(); - -- /* Avoid searching for BIOS MP tables */ -- x86_init.mpparse.find_smp_config = x86_init_noop; -- x86_init.mpparse.get_smp_config = x86_init_uint_noop; -- - xen_boot_params_init_edd(); - - #ifdef CONFIG_ACPI -diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c -index bcae606bbc5cf..ada3868c02c23 100644 ---- a/arch/x86/xen/enlighten_pvh.c -+++ b/arch/x86/xen/enlighten_pvh.c -@@ -43,6 +43,19 @@ void __init xen_pvh_init(struct boot_params *boot_params) - x86_init.oem.banner = xen_banner; - - xen_efi_init(boot_params); -+ -+ if (xen_initial_domain()) { -+ struct xen_platform_op op = { -+ .cmd = XENPF_get_dom0_console, -+ }; -+ int ret = HYPERVISOR_platform_op(&op); -+ -+ if (ret > 0) -+ xen_init_vga(&op.u.dom0_console, -+ min(ret * sizeof(char), -+ sizeof(op.u.dom0_console)), -+ &boot_params->screen_info); -+ } - } - - void __init mem_map_via_hcall(struct boot_params *boot_params_p) -diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c -index e13b0b49fcdfc..d7249f4c90f1b 100644 ---- a/arch/x86/xen/pmu.c -+++ b/arch/x86/xen/pmu.c -@@ -512,10 +512,7 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) - return ret; - } - --bool is_xen_pmu(int cpu) --{ -- return (get_xenpmu_data() != NULL); --} -+bool is_xen_pmu; - - void xen_pmu_init(int cpu) - { -@@ -526,7 +523,7 @@ void xen_pmu_init(int cpu) - - BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE); - -- if (xen_hvm_domain()) -+ if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu)) - return; - - xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL); -@@ -547,7 +544,8 @@ void xen_pmu_init(int cpu) - per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; - per_cpu(xenpmu_shared, cpu).flags = 0; - -- if (cpu == 0) { -+ if (!is_xen_pmu) { -+ is_xen_pmu = true; - perf_register_guest_info_callbacks(&xen_guest_cbs); - xen_pmu_arch_init(); - } -diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h -index 0e83a160589bc..65c58894fc79f 100644 ---- a/arch/x86/xen/pmu.h -+++ b/arch/x86/xen/pmu.h -@@ -4,6 +4,8 @@ - - #include - -+extern bool is_xen_pmu; -+ - irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); - #ifdef CONFIG_XEN_HAVE_VPMU - void xen_pmu_init(int cpu); -@@ -12,7 +14,6 @@ void xen_pmu_finish(int cpu); - static inline void xen_pmu_init(int cpu) {} - static inline void xen_pmu_finish(int cpu) {} - #endif --bool is_xen_pmu(int cpu); - bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err); - bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err); - int pmu_apic_update(uint32_t reg); -diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c -index 8bfc103301077..1f80dd3a2dd4a 100644 ---- a/arch/x86/xen/setup.c -+++ b/arch/x86/xen/setup.c -@@ -922,7 +922,7 @@ void xen_enable_sysenter(void) - if (!boot_cpu_has(sysenter_feature)) - return; - -- ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target); -+ ret = register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat); - if(ret != 0) - setup_clear_cpu_cap(sysenter_feature); - } -@@ -931,7 +931,7 @@ void xen_enable_syscall(void) - { - int ret; - -- ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); -+ ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64); - if (ret != 0) { - printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); - /* Pretty fatal; 64-bit userspace has no other -@@ -940,7 +940,7 @@ void xen_enable_syscall(void) - - if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { - ret = register_callback(CALLBACKTYPE_syscall32, -- xen_syscall32_target); -+ xen_entry_SYSCALL_compat); - if (ret != 0) - setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); - } -diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c -index c1b2f764b29a2..cdec892b28e2e 100644 ---- a/arch/x86/xen/smp.c -+++ b/arch/x86/xen/smp.c -@@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) - - void xen_smp_intr_free(unsigned int cpu) - { -+ kfree(per_cpu(xen_resched_irq, cpu).name); -+ per_cpu(xen_resched_irq, cpu).name = NULL; - if (per_cpu(xen_resched_irq, cpu).irq >= 0) { - unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); - per_cpu(xen_resched_irq, cpu).irq = -1; -- kfree(per_cpu(xen_resched_irq, cpu).name); -- per_cpu(xen_resched_irq, cpu).name = NULL; - } -+ kfree(per_cpu(xen_callfunc_irq, cpu).name); -+ per_cpu(xen_callfunc_irq, cpu).name = NULL; - if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { - unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); - per_cpu(xen_callfunc_irq, cpu).irq = -1; -- kfree(per_cpu(xen_callfunc_irq, cpu).name); -- per_cpu(xen_callfunc_irq, cpu).name = NULL; - } -+ kfree(per_cpu(xen_debug_irq, cpu).name); -+ per_cpu(xen_debug_irq, cpu).name = NULL; - if (per_cpu(xen_debug_irq, cpu).irq >= 0) { - unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); - per_cpu(xen_debug_irq, cpu).irq = -1; -- kfree(per_cpu(xen_debug_irq, cpu).name); -- per_cpu(xen_debug_irq, cpu).name = NULL; - } -+ kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); -+ per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; - if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { - unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, - NULL); - per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; -- kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); -- per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; - } - } - -@@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu) - char *resched_name, *callfunc_name, *debug_name; - - resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); -+ per_cpu(xen_resched_irq, cpu).name = resched_name; - rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, - cpu, - xen_reschedule_interrupt, -@@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu) - if (rc < 0) - goto fail; - per_cpu(xen_resched_irq, cpu).irq = rc; -- per_cpu(xen_resched_irq, cpu).name = resched_name; - - callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); -+ per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; - rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, - cpu, - xen_call_function_interrupt, -@@ -86,10 +87,10 @@ int xen_smp_intr_init(unsigned int cpu) - if (rc < 0) - goto fail; - per_cpu(xen_callfunc_irq, cpu).irq = rc; -- per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; - - if (!xen_fifo_events) { - debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); -+ per_cpu(xen_debug_irq, cpu).name = debug_name; - rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, - xen_debug_interrupt, - IRQF_PERCPU | IRQF_NOBALANCING, -@@ -97,10 +98,10 @@ int xen_smp_intr_init(unsigned int cpu) - if (rc < 0) - goto fail; - per_cpu(xen_debug_irq, cpu).irq = rc; -- per_cpu(xen_debug_irq, cpu).name = debug_name; - } - - callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); -+ per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; - rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, - cpu, - xen_call_function_single_interrupt, -@@ -110,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu) - if (rc < 0) - goto fail; - per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; -- per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; - - return 0; - -diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c -index 6ff3c887e0b99..b70afdff419ca 100644 ---- a/arch/x86/xen/smp_hvm.c -+++ b/arch/x86/xen/smp_hvm.c -@@ -19,6 +19,12 @@ static void __init xen_hvm_smp_prepare_boot_cpu(void) - */ - xen_vcpu_setup(0); - -+ /* -+ * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS. -+ * Refer to comments in xen_hvm_init_time_ops(). -+ */ -+ xen_hvm_init_time_ops(); -+ - /* - * The alternative logic (which patches the unlock/lock) runs before - * the smp bootup up code is activated. Hence we need to set this up -diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c -index 7ed56c6075b0c..a1f974309b1cf 100644 ---- a/arch/x86/xen/smp_pv.c -+++ b/arch/x86/xen/smp_pv.c -@@ -30,6 +30,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -63,6 +64,7 @@ static void cpu_bringup(void) - - cr4_init(); - cpu_init(); -+ fpu__init_cpu(); - touch_softlockup_watchdog(); - - /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */ -@@ -97,18 +99,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void) - - void xen_smp_intr_free_pv(unsigned int cpu) - { -+ kfree(per_cpu(xen_irq_work, cpu).name); -+ per_cpu(xen_irq_work, cpu).name = NULL; - if (per_cpu(xen_irq_work, cpu).irq >= 0) { - unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); - per_cpu(xen_irq_work, cpu).irq = -1; -- kfree(per_cpu(xen_irq_work, cpu).name); -- per_cpu(xen_irq_work, cpu).name = NULL; - } - -+ kfree(per_cpu(xen_pmu_irq, cpu).name); -+ per_cpu(xen_pmu_irq, cpu).name = NULL; - if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { - unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); - per_cpu(xen_pmu_irq, cpu).irq = -1; -- kfree(per_cpu(xen_pmu_irq, cpu).name); -- per_cpu(xen_pmu_irq, cpu).name = NULL; - } - } - -@@ -118,6 +120,7 @@ int xen_smp_intr_init_pv(unsigned int cpu) - char *callfunc_name, *pmu_name; - - callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); -+ per_cpu(xen_irq_work, cpu).name = callfunc_name; - rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, - cpu, - xen_irq_work_interrupt, -@@ -127,10 +130,10 @@ int xen_smp_intr_init_pv(unsigned int cpu) - if (rc < 0) - goto fail; - per_cpu(xen_irq_work, cpu).irq = rc; -- per_cpu(xen_irq_work, cpu).name = callfunc_name; - -- if (is_xen_pmu(cpu)) { -+ if (is_xen_pmu) { - pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); -+ per_cpu(xen_pmu_irq, cpu).name = pmu_name; - rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, - xen_pmu_irq_handler, - IRQF_PERCPU|IRQF_NOBALANCING, -@@ -138,7 +141,6 @@ int xen_smp_intr_init_pv(unsigned int cpu) - if (rc < 0) - goto fail; - per_cpu(xen_pmu_irq, cpu).irq = rc; -- per_cpu(xen_pmu_irq, cpu).name = pmu_name; - } - - return 0; -@@ -148,28 +150,12 @@ int xen_smp_intr_init_pv(unsigned int cpu) - return rc; - } - --static void __init xen_fill_possible_map(void) --{ -- int i, rc; -- -- if (xen_initial_domain()) -- return; -- -- for (i = 0; i < nr_cpu_ids; i++) { -- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); -- if (rc >= 0) { -- num_processors++; -- set_cpu_possible(i, true); -- } -- } --} -- --static void __init xen_filter_cpu_maps(void) -+static void __init _get_smp_config(unsigned int early) - { - int i, rc; - unsigned int subtract = 0; - -- if (!xen_initial_domain()) -+ if (early) - return; - - num_processors = 0; -@@ -210,7 +196,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void) - * sure the old memory can be recycled. */ - make_lowmem_page_readwrite(xen_initial_gdt); - -- xen_filter_cpu_maps(); - xen_setup_vcpu_info_placement(); - - /* -@@ -486,5 +471,8 @@ static const struct smp_ops xen_smp_ops __initconst = { - void __init xen_smp_init(void) - { - smp_ops = xen_smp_ops; -- xen_fill_possible_map(); -+ -+ /* Avoid searching for BIOS MP tables */ -+ x86_init.mpparse.find_smp_config = x86_init_noop; -+ x86_init.mpparse.get_smp_config = _get_smp_config; - } -diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c -index 043c73dfd2c98..5c6fc16e4b925 100644 ---- a/arch/x86/xen/spinlock.c -+++ b/arch/x86/xen/spinlock.c -@@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu) - cpu, per_cpu(lock_kicker_irq, cpu)); - - name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); -+ per_cpu(irq_name, cpu) = name; - irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, - cpu, - dummy_handler, -@@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu) - if (irq >= 0) { - disable_irq(irq); /* make sure it's never delivered */ - per_cpu(lock_kicker_irq, cpu) = irq; -- per_cpu(irq_name, cpu) = name; - } - - printk("cpu %d spinlock event irq %d\n", cpu, irq); -@@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu) - if (!xen_pvspin) - return; - -+ kfree(per_cpu(irq_name, cpu)); -+ per_cpu(irq_name, cpu) = NULL; - /* - * When booting the kernel with 'mitigations=auto,nosmt', the secondary - * CPUs are not activated, and lock_kicker_irq is not initialized. -@@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu) - - unbind_from_irqhandler(irq, NULL); - per_cpu(lock_kicker_irq, cpu) = -1; -- kfree(per_cpu(irq_name, cpu)); -- per_cpu(irq_name, cpu) = NULL; - } - - PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); -diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c -index d9c945ee11008..9ef0a5cca96ee 100644 ---- a/arch/x86/xen/time.c -+++ b/arch/x86/xen/time.c -@@ -558,6 +558,11 @@ static void xen_hvm_setup_cpu_clockevents(void) - - void __init xen_hvm_init_time_ops(void) - { -+ static bool hvm_time_initialized; -+ -+ if (hvm_time_initialized) -+ return; -+ - /* - * vector callback is needed otherwise we cannot receive interrupts - * on cpu > 0 and at this point we don't know how many cpus are -@@ -567,7 +572,22 @@ void __init xen_hvm_init_time_ops(void) - return; - - if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { -- pr_info("Xen doesn't support pvclock on HVM, disable pv timer"); -+ pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer"); -+ return; -+ } -+ -+ /* -+ * Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'. -+ * The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest -+ * boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access -+ * __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic. -+ * -+ * The xen_hvm_init_time_ops() should be called again later after -+ * __this_cpu_read(xen_vcpu) is available. -+ */ -+ if (!__this_cpu_read(xen_vcpu)) { -+ pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n", -+ xen_vcpu_nr(0)); - return; - } - -@@ -577,6 +597,8 @@ void __init xen_hvm_init_time_ops(void) - x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; - - x86_platform.set_wallclock = xen_set_wallclock; -+ -+ hvm_time_initialized = true; - } - #endif - -diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c -index e336f223f7f47..93697109592c3 100644 ---- a/arch/x86/xen/vga.c -+++ b/arch/x86/xen/vga.c -@@ -9,10 +9,9 @@ - - #include "xen-ops.h" - --void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) -+void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size, -+ struct screen_info *screen_info) - { -- struct screen_info *screen_info = &boot_params.screen_info; -- - /* This is drawn from a dump from vgacon:startup in - * standard Linux. */ - screen_info->orig_video_mode = 3; -diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S -index 1e626444712be..1b757a1ee1bb6 100644 ---- a/arch/x86/xen/xen-asm.S -+++ b/arch/x86/xen/xen-asm.S -@@ -20,6 +20,7 @@ - - #include - #include -+#include <../entry/calling.h> - - /* - * Enable events. This clears the event mask and tests the pending -@@ -44,7 +45,7 @@ SYM_FUNC_START(xen_irq_enable_direct) - call check_events - 1: - FRAME_END -- ret -+ RET - SYM_FUNC_END(xen_irq_enable_direct) - - -@@ -54,7 +55,7 @@ SYM_FUNC_END(xen_irq_enable_direct) - */ - SYM_FUNC_START(xen_irq_disable_direct) - movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask -- ret -+ RET - SYM_FUNC_END(xen_irq_disable_direct) - - /* -@@ -70,7 +71,7 @@ SYM_FUNC_START(xen_save_fl_direct) - testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask - setz %ah - addb %ah, %ah -- ret -+ RET - SYM_FUNC_END(xen_save_fl_direct) - - /* -@@ -99,7 +100,7 @@ SYM_FUNC_START(check_events) - pop %rcx - pop %rax - FRAME_END -- ret -+ RET - SYM_FUNC_END(check_events) - - SYM_FUNC_START(xen_read_cr2) -@@ -107,19 +108,19 @@ SYM_FUNC_START(xen_read_cr2) - _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX - _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX - FRAME_END -- ret -+ RET - SYM_FUNC_END(xen_read_cr2); - - SYM_FUNC_START(xen_read_cr2_direct) - FRAME_BEGIN - _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX - FRAME_END -- ret -+ RET - SYM_FUNC_END(xen_read_cr2_direct); - - .macro xen_pv_trap name - SYM_CODE_START(xen_\name) -- UNWIND_HINT_EMPTY -+ UNWIND_HINT_ENTRY - pop %rcx - pop %r11 - jmp \name -@@ -191,6 +192,25 @@ SYM_CODE_START(xen_iret) - jmp hypercall_iret - SYM_CODE_END(xen_iret) - -+/* -+ * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is -+ * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode() -+ * in XEN pv would cause %rsp to move up to the top of the kernel stack and -+ * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI -+ * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET -+ * frame at the same address is useless. -+ */ -+SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode) -+ UNWIND_HINT_REGS -+ POP_REGS -+ -+ /* stackleak_erase() can work safely on the kernel stack. */ -+ STACKLEAK_ERASE_NOCLOBBER -+ -+ addq $8, %rsp /* skip regs->orig_ax */ -+ jmp xen_iret -+SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) -+ - /* - * Xen handles syscall callbacks much like ordinary exceptions, which - * means we have: -@@ -207,8 +227,8 @@ SYM_CODE_END(xen_iret) - */ - - /* Normal 64-bit system call target */ --SYM_CODE_START(xen_syscall_target) -- UNWIND_HINT_EMPTY -+SYM_CODE_START(xen_entry_SYSCALL_64) -+ UNWIND_HINT_ENTRY - popq %rcx - popq %r11 - -@@ -221,13 +241,13 @@ SYM_CODE_START(xen_syscall_target) - movq $__USER_CS, 1*8(%rsp) - - jmp entry_SYSCALL_64_after_hwframe --SYM_CODE_END(xen_syscall_target) -+SYM_CODE_END(xen_entry_SYSCALL_64) - - #ifdef CONFIG_IA32_EMULATION - - /* 32-bit compat syscall target */ --SYM_CODE_START(xen_syscall32_target) -- UNWIND_HINT_EMPTY -+SYM_CODE_START(xen_entry_SYSCALL_compat) -+ UNWIND_HINT_ENTRY - popq %rcx - popq %r11 - -@@ -240,11 +260,11 @@ SYM_CODE_START(xen_syscall32_target) - movq $__USER32_CS, 1*8(%rsp) - - jmp entry_SYSCALL_compat_after_hwframe --SYM_CODE_END(xen_syscall32_target) -+SYM_CODE_END(xen_entry_SYSCALL_compat) - - /* 32-bit compat sysenter target */ --SYM_CODE_START(xen_sysenter_target) -- UNWIND_HINT_EMPTY -+SYM_CODE_START(xen_entry_SYSENTER_compat) -+ UNWIND_HINT_ENTRY - /* - * NB: Xen is polite and clears TF from EFLAGS for us. This means - * that we don't need to guard against single step exceptions here. -@@ -261,18 +281,18 @@ SYM_CODE_START(xen_sysenter_target) - movq $__USER32_CS, 1*8(%rsp) - - jmp entry_SYSENTER_compat_after_hwframe --SYM_CODE_END(xen_sysenter_target) -+SYM_CODE_END(xen_entry_SYSENTER_compat) - - #else /* !CONFIG_IA32_EMULATION */ - --SYM_CODE_START(xen_syscall32_target) --SYM_CODE_START(xen_sysenter_target) -- UNWIND_HINT_EMPTY -+SYM_CODE_START(xen_entry_SYSCALL_compat) -+SYM_CODE_START(xen_entry_SYSENTER_compat) -+ UNWIND_HINT_ENTRY - lea 16(%rsp), %rsp /* strip %rcx, %r11 */ - mov $-ENOSYS, %rax - pushq $0 - jmp hypercall_iret --SYM_CODE_END(xen_sysenter_target) --SYM_CODE_END(xen_syscall32_target) -+SYM_CODE_END(xen_entry_SYSENTER_compat) -+SYM_CODE_END(xen_entry_SYSCALL_compat) - - #endif /* CONFIG_IA32_EMULATION */ -diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S -index cb6538ae2fe07..2a3ef5fcba34b 100644 ---- a/arch/x86/xen/xen-head.S -+++ b/arch/x86/xen/xen-head.S -@@ -69,8 +69,9 @@ SYM_CODE_END(asm_cpu_bringup_and_idle) - SYM_CODE_START(hypercall_page) - .rept (PAGE_SIZE / 32) - UNWIND_HINT_FUNC -- .skip 31, 0x90 -+ ANNOTATE_UNRET_SAFE - ret -+ .skip 31, 0xcc - .endr - - #define HYPERCALL(n) \ -diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h -index 8bc8b72a205d4..71f31032c635f 100644 ---- a/arch/x86/xen/xen-ops.h -+++ b/arch/x86/xen/xen-ops.h -@@ -10,10 +10,10 @@ - /* These are code, but not functions. Defined in entry.S */ - extern const char xen_failsafe_callback[]; - --void xen_sysenter_target(void); -+void xen_entry_SYSENTER_compat(void); - #ifdef CONFIG_X86_64 --void xen_syscall_target(void); --void xen_syscall32_target(void); -+void xen_entry_SYSCALL_64(void); -+void xen_entry_SYSCALL_compat(void); - #endif - - extern void *xen_initial_gdt; -@@ -110,11 +110,12 @@ static inline void xen_uninit_lock_cpu(int cpu) - - struct dom0_vga_console_info; - --#ifdef CONFIG_XEN_PV_DOM0 --void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); -+#ifdef CONFIG_XEN_DOM0 -+void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size, -+ struct screen_info *); - #else - static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, -- size_t size) -+ size_t size, struct screen_info *si) - { - } - #endif -diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi -index 9bf8bad1dd18a..c33932568aa73 100644 ---- a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi -+++ b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi -@@ -8,19 +8,19 @@ - reg = <0x00000000 0x08000000>; - bank-width = <2>; - device-width = <2>; -- partition@0x0 { -+ partition@0 { - label = "data"; - reg = <0x00000000 0x06000000>; - }; -- partition@0x6000000 { -+ partition@6000000 { - label = "boot loader area"; - reg = <0x06000000 0x00800000>; - }; -- partition@0x6800000 { -+ partition@6800000 { - label = "kernel image"; - reg = <0x06800000 0x017e0000>; - }; -- partition@0x7fe0000 { -+ partition@7fe0000 { - label = "boot environment"; - reg = <0x07fe0000 0x00020000>; - }; -diff --git a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi -index 40c2f81f7cb66..7bde2ab2d6fb5 100644 ---- a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi -+++ b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi -@@ -8,19 +8,19 @@ - reg = <0x08000000 0x01000000>; - bank-width = <2>; - device-width = <2>; -- partition@0x0 { -+ partition@0 { - label = "boot loader area"; - reg = <0x00000000 0x00400000>; - }; -- partition@0x400000 { -+ partition@400000 { - label = "kernel image"; - reg = <0x00400000 0x00600000>; - }; -- partition@0xa00000 { -+ partition@a00000 { - label = "data"; - reg = <0x00a00000 0x005e0000>; - }; -- partition@0xfe0000 { -+ partition@fe0000 { - label = "boot environment"; - reg = <0x00fe0000 0x00020000>; - }; -diff --git a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi -index fb8d3a9f33c23..0655b868749a4 100644 ---- a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi -+++ b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi -@@ -8,11 +8,11 @@ - reg = <0x08000000 0x00400000>; - bank-width = <2>; - device-width = <2>; -- partition@0x0 { -+ partition@0 { - label = "boot loader area"; - reg = <0x00000000 0x003f0000>; - }; -- partition@0x3f0000 { -+ partition@3f0000 { - label = "boot environment"; - reg = <0x003f0000 0x00010000>; - }; -diff --git a/arch/xtensa/include/asm/bugs.h b/arch/xtensa/include/asm/bugs.h -deleted file mode 100644 -index 69b29d1982494..0000000000000 ---- a/arch/xtensa/include/asm/bugs.h -+++ /dev/null -@@ -1,18 +0,0 @@ --/* -- * include/asm-xtensa/bugs.h -- * -- * This is included by init/main.c to check for architecture-dependent bugs. -- * -- * Xtensa processors don't have any bugs. :) -- * -- * This file is subject to the terms and conditions of the GNU General -- * Public License. See the file "COPYING" in the main directory of -- * this archive for more details. -- */ -- --#ifndef _XTENSA_BUGS_H --#define _XTENSA_BUGS_H -- --static void check_bugs(void) { } -- --#endif /* _XTENSA_BUGS_H */ -diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h -index 5590b0f688376..a4e40166ff4bb 100644 ---- a/arch/xtensa/include/asm/core.h -+++ b/arch/xtensa/include/asm/core.h -@@ -26,4 +26,13 @@ - #define XCHAL_SPANNING_WAY 0 - #endif - -+#ifndef XCHAL_HW_MIN_VERSION -+#if defined(XCHAL_HW_MIN_VERSION_MAJOR) && defined(XCHAL_HW_MIN_VERSION_MINOR) -+#define XCHAL_HW_MIN_VERSION (XCHAL_HW_MIN_VERSION_MAJOR * 100 + \ -+ XCHAL_HW_MIN_VERSION_MINOR) -+#else -+#define XCHAL_HW_MIN_VERSION 0 -+#endif -+#endif -+ - #endif -diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h -index bd5aeb7955675..a63eca1266577 100644 ---- a/arch/xtensa/include/asm/pgtable.h -+++ b/arch/xtensa/include/asm/pgtable.h -@@ -411,6 +411,10 @@ extern void update_mmu_cache(struct vm_area_struct * vma, - - typedef pte_t *pte_addr_t; - -+void update_mmu_tlb(struct vm_area_struct *vma, -+ unsigned long address, pte_t *ptep); -+#define __HAVE_ARCH_UPDATE_MMU_TLB -+ - #endif /* !defined (__ASSEMBLY__) */ - - #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h -index 7f63aca6a0d34..9dd4efe1bf0bd 100644 ---- a/arch/xtensa/include/asm/processor.h -+++ b/arch/xtensa/include/asm/processor.h -@@ -226,8 +226,8 @@ extern unsigned long get_wchan(struct task_struct *p); - - #define xtensa_set_sr(x, sr) \ - ({ \ -- unsigned int v = (unsigned int)(x); \ -- __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \ -+ __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: \ -+ "a"((unsigned int)(x))); \ - }) - - #define xtensa_get_sr(sr) \ -diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h -index 233ec75e60c69..3f2462f2d0270 100644 ---- a/arch/xtensa/include/asm/timex.h -+++ b/arch/xtensa/include/asm/timex.h -@@ -29,10 +29,6 @@ - - extern unsigned long ccount_freq; - --typedef unsigned long long cycles_t; -- --#define get_cycles() (0) -- - void local_timer_setup(unsigned cpu); - - /* -@@ -59,4 +55,6 @@ static inline void set_linux_timer (unsigned long ccompare) - xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER); - } - -+#include -+ - #endif /* _XTENSA_TIMEX_H */ -diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S -index 45cc0ae0af6f9..c7b9f12896f20 100644 ---- a/arch/xtensa/kernel/coprocessor.S -+++ b/arch/xtensa/kernel/coprocessor.S -@@ -29,7 +29,7 @@ - .if XTENSA_HAVE_COPROCESSOR(x); \ - .align 4; \ - .Lsave_cp_regs_cp##x: \ -- xchal_cp##x##_store a2 a4 a5 a6 a7; \ -+ xchal_cp##x##_store a2 a3 a4 a5 a6; \ - jx a0; \ - .endif - -@@ -46,7 +46,7 @@ - .if XTENSA_HAVE_COPROCESSOR(x); \ - .align 4; \ - .Lload_cp_regs_cp##x: \ -- xchal_cp##x##_load a2 a4 a5 a6 a7; \ -+ xchal_cp##x##_load a2 a3 a4 a5 a6; \ - jx a0; \ - .endif - -diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c -index 61cf6497a646b..ad1841cecdfb7 100644 ---- a/arch/xtensa/kernel/jump_label.c -+++ b/arch/xtensa/kernel/jump_label.c -@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data) - { - struct patch *patch = data; - -- if (atomic_inc_return(&patch->cpu_count) == 1) { -+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { - local_patch_text(patch->addr, patch->data, patch->sz); - atomic_inc(&patch->cpu_count); - } else { -@@ -61,7 +61,7 @@ static void patch_text(unsigned long addr, const void *data, size_t sz) - .data = data, - }; - stop_machine_cpuslocked(patch_text_stop_machine, -- &patch, NULL); -+ &patch, cpu_online_mask); - } else { - unsigned long flags; - -diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c -index a0d05c8598d0f..183618090d05b 100644 ---- a/arch/xtensa/kernel/perf_event.c -+++ b/arch/xtensa/kernel/perf_event.c -@@ -13,17 +13,26 @@ - #include - #include - -+#include - #include - #include - -+#define XTENSA_HWVERSION_RG_2015_0 260000 -+ -+#if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RG_2015_0 -+#define XTENSA_PMU_ERI_BASE 0x00101000 -+#else -+#define XTENSA_PMU_ERI_BASE 0x00001000 -+#endif -+ - /* Global control/status for all perf counters */ --#define XTENSA_PMU_PMG 0x1000 -+#define XTENSA_PMU_PMG XTENSA_PMU_ERI_BASE - /* Perf counter values */ --#define XTENSA_PMU_PM(i) (0x1080 + (i) * 4) -+#define XTENSA_PMU_PM(i) (XTENSA_PMU_ERI_BASE + 0x80 + (i) * 4) - /* Perf counter control registers */ --#define XTENSA_PMU_PMCTRL(i) (0x1100 + (i) * 4) -+#define XTENSA_PMU_PMCTRL(i) (XTENSA_PMU_ERI_BASE + 0x100 + (i) * 4) - /* Perf counter status registers */ --#define XTENSA_PMU_PMSTAT(i) (0x1180 + (i) * 4) -+#define XTENSA_PMU_PMSTAT(i) (XTENSA_PMU_ERI_BASE + 0x180 + (i) * 4) - - #define XTENSA_PMU_PMG_PMEN 0x1 - -diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c -index bb3f4797d212b..db6cdea471d83 100644 ---- a/arch/xtensa/kernel/ptrace.c -+++ b/arch/xtensa/kernel/ptrace.c -@@ -226,12 +226,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) - - void user_enable_single_step(struct task_struct *child) - { -- child->ptrace |= PT_SINGLESTEP; -+ set_tsk_thread_flag(child, TIF_SINGLESTEP); - } - - void user_disable_single_step(struct task_struct *child) - { -- child->ptrace &= ~PT_SINGLESTEP; -+ clear_tsk_thread_flag(child, TIF_SINGLESTEP); - } - - /* -diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c -index c4d77dbfb61af..f2b00f43cf236 100644 ---- a/arch/xtensa/kernel/signal.c -+++ b/arch/xtensa/kernel/signal.c -@@ -465,7 +465,7 @@ static void do_signal(struct pt_regs *regs) - /* Set up the stack frame */ - ret = setup_frame(&ksig, sigmask_to_save(), regs); - signal_setup_done(ret, &ksig, 0); -- if (current->ptrace & PT_SINGLESTEP) -+ if (test_thread_flag(TIF_SINGLESTEP)) - task_pt_regs(current)->icountlevel = 1; - - return; -@@ -491,7 +491,7 @@ static void do_signal(struct pt_regs *regs) - /* If there's no signal to deliver, we just restore the saved mask. */ - restore_saved_sigmask(); - -- if (current->ptrace & PT_SINGLESTEP) -+ if (test_thread_flag(TIF_SINGLESTEP)) - task_pt_regs(current)->icountlevel = 1; - return; - } -diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c -index e8ceb15286081..16b8a6273772c 100644 ---- a/arch/xtensa/kernel/time.c -+++ b/arch/xtensa/kernel/time.c -@@ -154,6 +154,7 @@ static void __init calibrate_ccount(void) - cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu"); - if (cpu) { - clk = of_clk_get(cpu, 0); -+ of_node_put(cpu); - if (!IS_ERR(clk)) { - ccount_freq = clk_get_rate(clk); - return; -diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c -index 874b6efc6fb31..5624a64ff7e93 100644 ---- a/arch/xtensa/kernel/traps.c -+++ b/arch/xtensa/kernel/traps.c -@@ -510,7 +510,7 @@ static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; - - void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) - { -- size_t len; -+ size_t len, off = 0; - - if (!sp) - sp = stack_pointer(task); -@@ -519,9 +519,17 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) - kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE); - - printk("%sStack:\n", loglvl); -- print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE, -- STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, -- sp, len, false); -+ while (off < len) { -+ u8 line[STACK_DUMP_LINE_SIZE]; -+ size_t line_len = len - off > STACK_DUMP_LINE_SIZE ? -+ STACK_DUMP_LINE_SIZE : len - off; -+ -+ __memcpy(line, (u8 *)sp + off, line_len); -+ print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE, -+ STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, -+ line, line_len, false); -+ off += STACK_DUMP_LINE_SIZE; -+ } - show_trace(task, sp, loglvl); - } - -@@ -552,5 +560,5 @@ void die(const char * str, struct pt_regs * regs, long err) - if (panic_on_oops) - panic("Fatal exception"); - -- do_exit(err); -+ make_task_dead(err); - } -diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c -index f436cf2efd8b7..27a477dae2322 100644 ---- a/arch/xtensa/mm/tlb.c -+++ b/arch/xtensa/mm/tlb.c -@@ -162,6 +162,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) - } - } - -+void update_mmu_tlb(struct vm_area_struct *vma, -+ unsigned long address, pte_t *ptep) -+{ -+ local_flush_tlb_page(vma, address); -+} -+ - #ifdef CONFIG_DEBUG_TLB_SANITY - - static unsigned get_pte_for_vaddr(unsigned vaddr) -diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c -index 81d7c7e8f7e96..10b79d3c74e07 100644 ---- a/arch/xtensa/platforms/iss/console.c -+++ b/arch/xtensa/platforms/iss/console.c -@@ -36,24 +36,19 @@ static void rs_poll(struct timer_list *); - static struct tty_driver *serial_driver; - static struct tty_port serial_port; - static DEFINE_TIMER(serial_timer, rs_poll); --static DEFINE_SPINLOCK(timer_lock); - - static int rs_open(struct tty_struct *tty, struct file * filp) - { -- spin_lock_bh(&timer_lock); - if (tty->count == 1) - mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); -- spin_unlock_bh(&timer_lock); - - return 0; - } - - static void rs_close(struct tty_struct *tty, struct file * filp) - { -- spin_lock_bh(&timer_lock); - if (tty->count == 1) - del_timer_sync(&serial_timer); -- spin_unlock_bh(&timer_lock); - } - - -@@ -73,8 +68,6 @@ static void rs_poll(struct timer_list *unused) - int rd = 1; - unsigned char c; - -- spin_lock(&timer_lock); -- - while (simc_poll(0)) { - rd = simc_read(0, &c, 1); - if (rd <= 0) -@@ -87,7 +80,6 @@ static void rs_poll(struct timer_list *unused) - tty_flip_buffer_push(port); - if (rd) - mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); -- spin_unlock(&timer_lock); - } - - -diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c -index 4986226a5ab26..1270de83435eb 100644 ---- a/arch/xtensa/platforms/iss/network.c -+++ b/arch/xtensa/platforms/iss/network.c -@@ -231,7 +231,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init) - - init += sizeof(TRANSPORT_TUNTAP_NAME) - 1; - if (*init == ',') { -- rem = split_if_spec(init + 1, &mac_str, &dev_name); -+ rem = split_if_spec(init + 1, &mac_str, &dev_name, NULL); - if (rem != NULL) { - pr_err("%s: extra garbage on specification : '%s'\n", - dev->name, rem); -@@ -502,16 +502,24 @@ static const struct net_device_ops iss_netdev_ops = { - .ndo_set_rx_mode = iss_net_set_multicast_list, - }; - --static int iss_net_configure(int index, char *init) -+static void iss_net_pdev_release(struct device *dev) -+{ -+ struct platform_device *pdev = to_platform_device(dev); -+ struct iss_net_private *lp = -+ container_of(pdev, struct iss_net_private, pdev); -+ -+ free_netdev(lp->dev); -+} -+ -+static void iss_net_configure(int index, char *init) - { - struct net_device *dev; - struct iss_net_private *lp; -- int err; - - dev = alloc_etherdev(sizeof(*lp)); - if (dev == NULL) { - pr_err("eth_configure: failed to allocate device\n"); -- return 1; -+ return; - } - - /* Initialize private element. */ -@@ -540,7 +548,7 @@ static int iss_net_configure(int index, char *init) - if (!tuntap_probe(lp, index, init)) { - pr_err("%s: invalid arguments. Skipping device!\n", - dev->name); -- goto errout; -+ goto err_free_netdev; - } - - pr_info("Netdevice %d (%pM)\n", index, dev->dev_addr); -@@ -548,7 +556,8 @@ static int iss_net_configure(int index, char *init) - /* sysfs register */ - - if (!driver_registered) { -- platform_driver_register(&iss_net_driver); -+ if (platform_driver_register(&iss_net_driver)) -+ goto err_free_netdev; - driver_registered = 1; - } - -@@ -558,7 +567,9 @@ static int iss_net_configure(int index, char *init) - - lp->pdev.id = index; - lp->pdev.name = DRIVER_NAME; -- platform_device_register(&lp->pdev); -+ lp->pdev.dev.release = iss_net_pdev_release; -+ if (platform_device_register(&lp->pdev)) -+ goto err_free_netdev; - SET_NETDEV_DEV(dev, &lp->pdev.dev); - - dev->netdev_ops = &iss_netdev_ops; -@@ -567,23 +578,20 @@ static int iss_net_configure(int index, char *init) - dev->irq = -1; - - rtnl_lock(); -- err = register_netdevice(dev); -- rtnl_unlock(); -- -- if (err) { -+ if (register_netdevice(dev)) { -+ rtnl_unlock(); - pr_err("%s: error registering net device!\n", dev->name); -- /* XXX: should we call ->remove() here? */ -- free_netdev(dev); -- return 1; -+ platform_device_unregister(&lp->pdev); -+ return; - } -+ rtnl_unlock(); - - timer_setup(&lp->tl, iss_net_user_timer_expire, 0); - -- return 0; -+ return; - --errout: -- /* FIXME: unregister; free, etc.. */ -- return -EIO; -+err_free_netdev: -+ free_netdev(dev); - } - - /* ------------------------------------------------------------------------- */ -diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c -index 3cdfa00738e07..edb27649851f9 100644 ---- a/arch/xtensa/platforms/iss/simdisk.c -+++ b/arch/xtensa/platforms/iss/simdisk.c -@@ -212,12 +212,18 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf, - struct simdisk *dev = PDE_DATA(file_inode(file)); - const char *s = dev->filename; - if (s) { -- ssize_t n = simple_read_from_buffer(buf, size, ppos, -- s, strlen(s)); -- if (n < 0) -- return n; -- buf += n; -- size -= n; -+ ssize_t len = strlen(s); -+ char *temp = kmalloc(len + 2, GFP_KERNEL); -+ -+ if (!temp) -+ return -ENOMEM; -+ -+ len = scnprintf(temp, len + 2, "%s\n", s); -+ len = simple_read_from_buffer(buf, size, ppos, -+ temp, len); -+ -+ kfree(temp); -+ return len; - } - return simple_read_from_buffer(buf, size, ppos, "\n", 1); - } -diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c -index 538e6748e85a7..c79c1d09ea863 100644 ---- a/arch/xtensa/platforms/xtfpga/setup.c -+++ b/arch/xtensa/platforms/xtfpga/setup.c -@@ -133,6 +133,7 @@ static int __init machine_setup(void) - - if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) - update_local_mac(eth); -+ of_node_put(eth); - return 0; - } - arch_initcall(machine_setup); -diff --git a/block/Makefile b/block/Makefile -index 41aa1ba69c900..74df168729ecb 100644 ---- a/block/Makefile -+++ b/block/Makefile -@@ -3,7 +3,7 @@ - # Makefile for the kernel block layer - # - --obj-$(CONFIG_BLOCK) := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ -+obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ - blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ - blk-exec.o blk-merge.o blk-timeout.o \ - blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ -diff --git a/block/bdev.c b/block/bdev.c -index 485a258b0ab37..b8599a4088843 100644 ---- a/block/bdev.c -+++ b/block/bdev.c -@@ -184,14 +184,13 @@ int sb_min_blocksize(struct super_block *sb, int size) - - EXPORT_SYMBOL(sb_min_blocksize); - --int __sync_blockdev(struct block_device *bdev, int wait) -+int sync_blockdev_nowait(struct block_device *bdev) - { - if (!bdev) - return 0; -- if (!wait) -- return filemap_flush(bdev->bd_inode->i_mapping); -- return filemap_write_and_wait(bdev->bd_inode->i_mapping); -+ return filemap_flush(bdev->bd_inode->i_mapping); - } -+EXPORT_SYMBOL_GPL(sync_blockdev_nowait); - - /* - * Write out and wait upon all the dirty data associated with a block -@@ -199,7 +198,9 @@ int __sync_blockdev(struct block_device *bdev, int wait) - */ - int sync_blockdev(struct block_device *bdev) - { -- return __sync_blockdev(bdev, 1); -+ if (!bdev) -+ return 0; -+ return filemap_write_and_wait(bdev->bd_inode->i_mapping); - } - EXPORT_SYMBOL(sync_blockdev); - -@@ -834,7 +835,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) - * used in blkdev_get/put(). - */ - if ((mode & FMODE_WRITE) && !bdev->bd_write_holder && -- (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { -+ (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) { - bdev->bd_write_holder = true; - unblock_events = false; - } -@@ -1016,7 +1017,7 @@ int __invalidate_device(struct block_device *bdev, bool kill_dirty) - } - EXPORT_SYMBOL(__invalidate_device); - --void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) -+void sync_bdevs(bool wait) - { - struct inode *inode, *old_inode = NULL; - -@@ -1047,8 +1048,19 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) - bdev = I_BDEV(inode); - - mutex_lock(&bdev->bd_disk->open_mutex); -- if (bdev->bd_openers) -- func(bdev, arg); -+ if (!bdev->bd_openers) { -+ ; /* skip */ -+ } else if (wait) { -+ /* -+ * We keep the error status of individual mapping so -+ * that applications can catch the writeback error using -+ * fsync(2). See filemap_fdatawait_keep_errors() for -+ * details. -+ */ -+ filemap_fdatawait_keep_errors(inode->i_mapping); -+ } else { -+ filemap_fdatawrite(inode->i_mapping); -+ } - mutex_unlock(&bdev->bd_disk->open_mutex); - - spin_lock(&blockdev_superblock->s_inode_list_lock); -diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c -index 85b8e1c3a762d..53e275e377a73 100644 ---- a/block/bfq-cgroup.c -+++ b/block/bfq-cgroup.c -@@ -555,6 +555,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd) - */ - bfqg->bfqd = bfqd; - bfqg->active_entities = 0; -+ bfqg->online = true; - bfqg->rq_pos_tree = RB_ROOT; - } - -@@ -583,28 +584,11 @@ static void bfq_group_set_parent(struct bfq_group *bfqg, - entity->sched_data = &parent->sched_data; - } - --static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, -- struct blkcg *blkcg) -+static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg) - { -- struct blkcg_gq *blkg; -- -- blkg = blkg_lookup(blkcg, bfqd->queue); -- if (likely(blkg)) -- return blkg_to_bfqg(blkg); -- return NULL; --} -- --struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, -- struct blkcg *blkcg) --{ -- struct bfq_group *bfqg, *parent; -+ struct bfq_group *parent; - struct bfq_entity *entity; - -- bfqg = bfq_lookup_bfqg(bfqd, blkcg); -- -- if (unlikely(!bfqg)) -- return NULL; -- - /* - * Update chain of bfq_groups as we might be handling a leaf group - * which, along with some of its relatives, has not been hooked yet -@@ -621,8 +605,28 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, - bfq_group_set_parent(curr_bfqg, parent); - } - } -+} - -- return bfqg; -+struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) -+{ -+ struct blkcg_gq *blkg = bio->bi_blkg; -+ struct bfq_group *bfqg; -+ -+ while (blkg) { -+ if (!blkg->online) { -+ blkg = blkg->parent; -+ continue; -+ } -+ bfqg = blkg_to_bfqg(blkg); -+ if (bfqg->online) { -+ bio_associate_blkg_from_css(bio, &blkg->blkcg->css); -+ return bfqg; -+ } -+ blkg = blkg->parent; -+ } -+ bio_associate_blkg_from_css(bio, -+ &bfqg_to_blkg(bfqd->root_group)->blkcg->css); -+ return bfqd->root_group; - } - - /** -@@ -644,6 +648,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - { - struct bfq_entity *entity = &bfqq->entity; - -+ /* -+ * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group -+ * until elevator exit. -+ */ -+ if (bfqq == &bfqd->oom_bfqq) -+ return; - /* - * Get extra reference to prevent bfqq from being freed in - * next possible expire or deactivate. -@@ -698,38 +708,58 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - * Move bic to blkcg, assuming that bfqd->lock is held; which makes - * sure that the reference to cgroup is valid across the call (see - * comments in bfq_bic_update_cgroup on this issue) -- * -- * NOTE: an alternative approach might have been to store the current -- * cgroup in bfqq and getting a reference to it, reducing the lookup -- * time here, at the price of slightly more complex code. - */ --static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, -- struct bfq_io_cq *bic, -- struct blkcg *blkcg) -+static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd, -+ struct bfq_io_cq *bic, -+ struct bfq_group *bfqg) - { -- struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); -- struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); -- struct bfq_group *bfqg; -+ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false); -+ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true); - struct bfq_entity *entity; - -- bfqg = bfq_find_set_group(bfqd, blkcg); -- -- if (unlikely(!bfqg)) -- bfqg = bfqd->root_group; -- - if (async_bfqq) { - entity = &async_bfqq->entity; - - if (entity->sched_data != &bfqg->sched_data) { -- bic_set_bfqq(bic, NULL, 0); -+ bic_set_bfqq(bic, NULL, false); - bfq_release_process_ref(bfqd, async_bfqq); - } - } - - if (sync_bfqq) { -- entity = &sync_bfqq->entity; -- if (entity->sched_data != &bfqg->sched_data) -- bfq_bfqq_move(bfqd, sync_bfqq, bfqg); -+ if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) { -+ /* We are the only user of this bfqq, just move it */ -+ if (sync_bfqq->entity.sched_data != &bfqg->sched_data) -+ bfq_bfqq_move(bfqd, sync_bfqq, bfqg); -+ } else { -+ struct bfq_queue *bfqq; -+ -+ /* -+ * The queue was merged to a different queue. Check -+ * that the merge chain still belongs to the same -+ * cgroup. -+ */ -+ for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq) -+ if (bfqq->entity.sched_data != -+ &bfqg->sched_data) -+ break; -+ if (bfqq) { -+ /* -+ * Some queue changed cgroup so the merge is -+ * not valid anymore. We cannot easily just -+ * cancel the merge (by clearing new_bfqq) as -+ * there may be other processes using this -+ * queue and holding refs to all queues below -+ * sync_bfqq->new_bfqq. Similarly if the merge -+ * already happened, we need to detach from -+ * bfqq now so that we cannot merge bio to a -+ * request from the old cgroup. -+ */ -+ bfq_put_cooperator(sync_bfqq); -+ bic_set_bfqq(bic, NULL, true); -+ bfq_release_process_ref(bfqd, sync_bfqq); -+ } -+ } - } - - return bfqg; -@@ -738,20 +768,24 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, - void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) - { - struct bfq_data *bfqd = bic_to_bfqd(bic); -- struct bfq_group *bfqg = NULL; -+ struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio); - uint64_t serial_nr; - -- rcu_read_lock(); -- serial_nr = __bio_blkcg(bio)->css.serial_nr; -+ serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr; - - /* - * Check whether blkcg has changed. The condition may trigger - * spuriously on a newly created cic but there's no harm. - */ - if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) -- goto out; -+ return; - -- bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio)); -+ /* -+ * New cgroup for this process. Make sure it is linked to bfq internal -+ * cgroup hierarchy. -+ */ -+ bfq_link_bfqg(bfqd, bfqg); -+ __bfq_bic_change_cgroup(bfqd, bic, bfqg); - /* - * Update blkg_path for bfq_log_* functions. We cache this - * path, and update it here, for the following -@@ -804,8 +838,6 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) - */ - blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); - bic->blkcg_serial_nr = serial_nr; --out: -- rcu_read_unlock(); - } - - /** -@@ -933,6 +965,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) - - put_async_queues: - bfq_put_async_queues(bfqd, bfqg); -+ bfqg->online = false; - - spin_unlock_irqrestore(&bfqd->lock, flags); - /* -@@ -1422,7 +1455,7 @@ void bfq_end_wr_async(struct bfq_data *bfqd) - bfq_end_wr_async_queues(bfqd, bfqd->root_group); - } - --struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg) -+struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) - { - return bfqd->root_group; - } -diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c -index 480e1a1348596..f54554906451e 100644 ---- a/block/bfq-iosched.c -+++ b/block/bfq-iosched.c -@@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq); - - void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) - { -+ struct bfq_queue *old_bfqq = bic->bfqq[is_sync]; -+ -+ /* Clear bic pointer if bfqq is detached from this bic */ -+ if (old_bfqq && old_bfqq->bic == bic) -+ old_bfqq->bic = NULL; -+ - /* - * If bfqq != NULL, then a non-stable queue merge between - * bic->bfqq and bfqq is happening here. This causes troubles -@@ -461,6 +467,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, - */ - void bfq_schedule_dispatch(struct bfq_data *bfqd) - { -+ lockdep_assert_held(&bfqd->lock); -+ - if (bfqd->queued != 0) { - bfq_log(bfqd, "schedule dispatch"); - blk_mq_run_hw_queues(bfqd->queue, true); -@@ -2022,9 +2030,7 @@ static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq, - if (!bfqd->last_completed_rq_bfqq || - bfqd->last_completed_rq_bfqq == bfqq || - bfq_bfqq_has_short_ttime(bfqq) || -- bfqq->dispatched > 0 || -- now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC || -- bfqd->last_completed_rq_bfqq == bfqq->waker_bfqq) -+ now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC) - return; - - if (bfqd->last_completed_rq_bfqq != -@@ -2084,7 +2090,7 @@ static void bfq_add_request(struct request *rq) - bfqq->queued[rq_is_sync(rq)]++; - bfqd->queued++; - -- if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) { -+ if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) { - bfq_check_waker(bfqd, bfqq, now_ns); - - /* -@@ -2337,10 +2343,17 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio, - - spin_lock_irq(&bfqd->lock); - -- if (bic) -+ if (bic) { -+ /* -+ * Make sure cgroup info is uptodate for current process before -+ * considering the merge. -+ */ -+ bfq_bic_update_cgroup(bic, bio); -+ - bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); -- else -+ } else { - bfqd->bio_bfqq = NULL; -+ } - bfqd->bio_bic = bic; - - ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); -@@ -2370,8 +2383,6 @@ static int bfq_request_merge(struct request_queue *q, struct request **req, - return ELEVATOR_NO_MERGE; - } - --static struct bfq_queue *bfq_init_rq(struct request *rq); -- - static void bfq_request_merged(struct request_queue *q, struct request *req, - enum elv_merge type) - { -@@ -2380,7 +2391,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - blk_rq_pos(req) < - blk_rq_pos(container_of(rb_prev(&req->rb_node), - struct request, rb_node))) { -- struct bfq_queue *bfqq = bfq_init_rq(req); -+ struct bfq_queue *bfqq = RQ_BFQQ(req); - struct bfq_data *bfqd; - struct request *prev, *next_rq; - -@@ -2432,8 +2443,8 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - static void bfq_requests_merged(struct request_queue *q, struct request *rq, - struct request *next) - { -- struct bfq_queue *bfqq = bfq_init_rq(rq), -- *next_bfqq = bfq_init_rq(next); -+ struct bfq_queue *bfqq = RQ_BFQQ(rq), -+ *next_bfqq = RQ_BFQQ(next); - - if (!bfqq) - goto remove; -@@ -2638,6 +2649,14 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) - if (process_refs == 0 || new_process_refs == 0) - return NULL; - -+ /* -+ * Make sure merged queues belong to the same parent. Parents could -+ * have changed since the time we decided the two queues are suitable -+ * for merging. -+ */ -+ if (new_bfqq->entity.parent != bfqq->entity.parent) -+ return NULL; -+ - bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", - new_bfqq->pid); - -@@ -2662,6 +2681,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) - * are likely to increase the throughput. - */ - bfqq->new_bfqq = new_bfqq; -+ /* -+ * The above assignment schedules the following redirections: -+ * each time some I/O for bfqq arrives, the process that -+ * generated that I/O is disassociated from bfqq and -+ * associated with new_bfqq. Here we increases new_bfqq->ref -+ * in advance, adding the number of processes that are -+ * expected to be associated with new_bfqq as they happen to -+ * issue I/O. -+ */ - new_bfqq->ref += process_refs; - return new_bfqq; - } -@@ -2724,6 +2752,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - { - struct bfq_queue *in_service_bfqq, *new_bfqq; - -+ /* if a merge has already been setup, then proceed with that first */ -+ if (bfqq->new_bfqq) -+ return bfqq->new_bfqq; -+ - /* - * Check delayed stable merge for rotational or non-queueing - * devs. For this branch to be executed, bfqq must not be -@@ -2762,9 +2794,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - struct bfq_queue *new_bfqq = - bfq_setup_merge(bfqq, stable_merge_bfqq); - -- bic->stably_merged = true; -- if (new_bfqq && new_bfqq->bic) -- new_bfqq->bic->stably_merged = true; -+ if (new_bfqq) { -+ bic->stably_merged = true; -+ if (new_bfqq->bic) -+ new_bfqq->bic->stably_merged = -+ true; -+ } - return new_bfqq; - } else - return NULL; -@@ -2825,9 +2860,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - if (bfq_too_late_for_merging(bfqq)) - return NULL; - -- if (bfqq->new_bfqq) -- return bfqq->new_bfqq; -- - if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) - return NULL; - -@@ -3014,7 +3046,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - /* - * Merge queues (that is, let bic redirect its requests to new_bfqq) - */ -- bic_set_bfqq(bic, new_bfqq, 1); -+ bic_set_bfqq(bic, new_bfqq, true); - bfq_mark_bfqq_coop(new_bfqq); - /* - * new_bfqq now belongs to at least two bics (it is a shared queue): -@@ -5061,7 +5093,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; - struct request *rq; - struct bfq_queue *in_serv_queue; -- bool waiting_rq, idle_timer_disabled; -+ bool waiting_rq, idle_timer_disabled = false; - - spin_lock_irq(&bfqd->lock); - -@@ -5069,14 +5101,15 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); - - rq = __bfq_dispatch_request(hctx); -- -- idle_timer_disabled = -- waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); -+ if (in_serv_queue == bfqd->in_service_queue) { -+ idle_timer_disabled = -+ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); -+ } - - spin_unlock_irq(&bfqd->lock); -- -- bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, -- idle_timer_disabled); -+ bfq_update_dispatch_stats(hctx->queue, rq, -+ idle_timer_disabled ? in_serv_queue : NULL, -+ idle_timer_disabled); - - return rq; - } -@@ -5173,7 +5206,7 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq) - bfq_put_queue(bfqq); - } - --static void bfq_put_cooperator(struct bfq_queue *bfqq) -+void bfq_put_cooperator(struct bfq_queue *bfqq) - { - struct bfq_queue *__bfqq, *next; - -@@ -5218,9 +5251,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) - unsigned long flags; - - spin_lock_irqsave(&bfqd->lock, flags); -- bfqq->bic = NULL; -- bfq_exit_bfqq(bfqd, bfqq); - bic_set_bfqq(bic, NULL, is_sync); -+ bfq_exit_bfqq(bfqd, bfqq); - spin_unlock_irqrestore(&bfqd->lock, flags); - } - } -@@ -5327,9 +5359,11 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) - - bfqq = bic_to_bfqq(bic, false); - if (bfqq) { -- bfq_release_process_ref(bfqd, bfqq); -- bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic, true); -+ struct bfq_queue *old_bfqq = bfqq; -+ -+ bfqq = bfq_get_queue(bfqd, bio, false, bic, true); - bic_set_bfqq(bic, bfqq, false); -+ bfq_release_process_ref(bfqd, old_bfqq); - } - - bfqq = bic_to_bfqq(bic, true); -@@ -5579,14 +5613,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, - struct bfq_queue *bfqq; - struct bfq_group *bfqg; - -- rcu_read_lock(); -- -- bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio)); -- if (!bfqg) { -- bfqq = &bfqd->oom_bfqq; -- goto out; -- } -- -+ bfqg = bfq_bio_bfqg(bfqd, bio); - if (!is_sync) { - async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, - ioprio); -@@ -5632,8 +5659,6 @@ out: - - if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn) - bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic); -- -- rcu_read_unlock(); - return bfqq; - } - -@@ -5964,6 +5989,8 @@ static inline void bfq_update_insert_stats(struct request_queue *q, - unsigned int cmd_flags) {} - #endif /* CONFIG_BFQ_CGROUP_DEBUG */ - -+static struct bfq_queue *bfq_init_rq(struct request *rq); -+ - static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head) - { -@@ -5979,60 +6006,16 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bfqg_stats_update_legacy_io(q, rq); - #endif - spin_lock_irq(&bfqd->lock); -+ bfqq = bfq_init_rq(rq); - if (blk_mq_sched_try_insert_merge(q, rq, &free)) { - spin_unlock_irq(&bfqd->lock); - blk_mq_free_requests(&free); - return; - } - -- spin_unlock_irq(&bfqd->lock); -- - trace_block_rq_insert(rq); - -- spin_lock_irq(&bfqd->lock); -- bfqq = bfq_init_rq(rq); -- -- /* -- * Reqs with at_head or passthrough flags set are to be put -- * directly into dispatch list. Additional case for putting rq -- * directly into the dispatch queue: the only active -- * bfq_queues are bfqq and either its waker bfq_queue or one -- * of its woken bfq_queues. The rationale behind this -- * additional condition is as follows: -- * - consider a bfq_queue, say Q1, detected as a waker of -- * another bfq_queue, say Q2 -- * - by definition of a waker, Q1 blocks the I/O of Q2, i.e., -- * some I/O of Q1 needs to be completed for new I/O of Q2 -- * to arrive. A notable example of waker is journald -- * - so, Q1 and Q2 are in any respect the queues of two -- * cooperating processes (or of two cooperating sets of -- * processes): the goal of Q1's I/O is doing what needs to -- * be done so that new Q2's I/O can finally be -- * issued. Therefore, if the service of Q1's I/O is delayed, -- * then Q2's I/O is delayed too. Conversely, if Q2's I/O is -- * delayed, the goal of Q1's I/O is hindered. -- * - as a consequence, if some I/O of Q1/Q2 arrives while -- * Q2/Q1 is the only queue in service, there is absolutely -- * no point in delaying the service of such an I/O. The -- * only possible result is a throughput loss -- * - so, when the above condition holds, the best option is to -- * have the new I/O dispatched as soon as possible -- * - the most effective and efficient way to attain the above -- * goal is to put the new I/O directly in the dispatch -- * list -- * - as an additional restriction, Q1 and Q2 must be the only -- * busy queues for this commit to put the I/O of Q2/Q1 in -- * the dispatch list. This is necessary, because, if also -- * other queues are waiting for service, then putting new -- * I/O directly in the dispatch list may evidently cause a -- * violation of service guarantees for the other queues -- */ -- if (!bfqq || -- (bfqq != bfqd->in_service_queue && -- bfqd->in_service_queue != NULL && -- bfq_tot_busy_queues(bfqd) == 1 + bfq_bfqq_busy(bfqq) && -- (bfqq->waker_bfqq == bfqd->in_service_queue || -- bfqd->in_service_queue->waker_bfqq == bfqq)) || at_head) { -+ if (!bfqq || at_head) { - if (at_head) - list_add(&rq->queuelist, &bfqd->dispatch); - else -@@ -6059,7 +6042,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - * merge). - */ - cmd_flags = rq->cmd_flags; -- - spin_unlock_irq(&bfqd->lock); - - bfq_update_insert_stats(q, bfqq, idle_timer_disabled, -@@ -6453,6 +6435,7 @@ static void bfq_finish_requeue_request(struct request *rq) - bfq_completed_request(bfqq, bfqd); - } - bfq_finish_requeue_request_body(bfqq); -+ RQ_BIC(rq)->requests--; - spin_unlock_irqrestore(&bfqd->lock, flags); - - /* -@@ -6494,7 +6477,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) - return bfqq; - } - -- bic_set_bfqq(bic, NULL, 1); -+ bic_set_bfqq(bic, NULL, true); - - bfq_put_cooperator(bfqq); - -@@ -6654,6 +6637,12 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) - bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, - true, is_sync, - NULL); -+ if (unlikely(bfqq == &bfqd->oom_bfqq)) -+ bfqq_already_existing = true; -+ } else -+ bfqq_already_existing = true; -+ -+ if (!bfqq_already_existing) { - bfqq->waker_bfqq = old_bfqq->waker_bfqq; - bfqq->tentative_waker_bfqq = NULL; - -@@ -6667,13 +6656,13 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) - if (bfqq->waker_bfqq) - hlist_add_head(&bfqq->woken_list_node, - &bfqq->waker_bfqq->woken_list); -- } else -- bfqq_already_existing = true; -+ } - } - } - - bfqq->allocated++; - bfqq->ref++; -+ bic->requests++; - bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", - rq, bfqq, bfqq->ref); - -@@ -6770,8 +6759,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfq_bfqq_expire(bfqd, bfqq, true, reason); - - schedule_dispatch: -- spin_unlock_irqrestore(&bfqd->lock, flags); - bfq_schedule_dispatch(bfqd); -+ spin_unlock_irqrestore(&bfqd->lock, flags); - } - - /* -@@ -6920,6 +6909,8 @@ static void bfq_exit_queue(struct elevator_queue *e) - spin_unlock_irq(&bfqd->lock); - #endif - -+ wbt_enable_default(bfqd->queue); -+ - kfree(bfqd); - } - -diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h -index a73488eec8a47..2bd696aaf02cc 100644 ---- a/block/bfq-iosched.h -+++ b/block/bfq-iosched.h -@@ -466,6 +466,7 @@ struct bfq_io_cq { - struct bfq_queue *stable_merge_bfqq; - - bool stably_merged; /* non splittable if true */ -+ unsigned int requests; /* Number of requests this process has in flight */ - }; - - /** -@@ -925,6 +926,8 @@ struct bfq_group { - - /* reference counter (see comments in bfq_bic_update_cgroup) */ - int ref; -+ /* Is bfq_group still online? */ -+ bool online; - - struct bfq_entity entity; - struct bfq_sched_data sched_data; -@@ -976,6 +979,7 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd, - void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bool compensate, enum bfqq_expiration reason); - void bfq_put_queue(struct bfq_queue *bfqq); -+void bfq_put_cooperator(struct bfq_queue *bfqq); - void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); - void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq); - void bfq_schedule_dispatch(struct bfq_data *bfqd); -@@ -1003,8 +1007,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg); - void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio); - void bfq_end_wr_async(struct bfq_data *bfqd); --struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, -- struct blkcg *blkcg); -+struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio); - struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - struct bfq_group *bfqq_group(struct bfq_queue *bfqq); - struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); -diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c -index b74cc0da118ec..709b901de3ca9 100644 ---- a/block/bfq-wf2q.c -+++ b/block/bfq-wf2q.c -@@ -519,7 +519,7 @@ unsigned short bfq_ioprio_to_weight(int ioprio) - static unsigned short bfq_weight_to_ioprio(int weight) - { - return max_t(int, 0, -- IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF - weight); -+ IOPRIO_NR_LEVELS - weight / BFQ_WEIGHT_CONVERSION_COEFF); - } - - static void bfq_get_entity(struct bfq_entity *entity) -diff --git a/block/bio-integrity.c b/block/bio-integrity.c -index 6b47cddbbca17..4f34ac27c47dd 100644 ---- a/block/bio-integrity.c -+++ b/block/bio-integrity.c -@@ -373,7 +373,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) - struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); - unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); - -- bip->bip_iter.bi_sector += bytes_done >> 9; -+ bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9); - bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); - } - -@@ -417,6 +417,7 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, - - bip->bip_vcnt = bip_src->bip_vcnt; - bip->bip_iter = bip_src->bip_iter; -+ bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY; - - return 0; - } -diff --git a/block/bio.c b/block/bio.c -index a6fb6a0b42955..ba9120d4fe499 100644 ---- a/block/bio.c -+++ b/block/bio.c -@@ -567,7 +567,8 @@ void bio_truncate(struct bio *bio, unsigned new_size) - offset = new_size - done; - else - offset = 0; -- zero_user(bv.bv_page, offset, bv.bv_len - offset); -+ zero_user(bv.bv_page, bv.bv_offset + offset, -+ bv.bv_len - offset); - truncated = true; - } - done += bv.bv_len; -@@ -664,6 +665,7 @@ static void bio_alloc_cache_destroy(struct bio_set *bs) - bio_alloc_cache_prune(cache, -1U); - } - free_percpu(bs->cache); -+ bs->cache = NULL; - } - - /** -@@ -908,7 +910,7 @@ EXPORT_SYMBOL(bio_add_pc_page); - int bio_add_zone_append_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) - { -- struct request_queue *q = bio->bi_bdev->bd_disk->queue; -+ struct request_queue *q = bdev_get_queue(bio->bi_bdev); - bool same_page = false; - - if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) -@@ -1052,7 +1054,7 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) - - static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter) - { -- struct request_queue *q = bio->bi_bdev->bd_disk->queue; -+ struct request_queue *q = bdev_get_queue(bio->bi_bdev); - struct iov_iter i = *iter; - - iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9); -@@ -1069,6 +1071,37 @@ static void bio_put_pages(struct page **pages, size_t size, size_t off) - put_page(pages[i]); - } - -+static int bio_iov_add_page(struct bio *bio, struct page *page, -+ unsigned int len, unsigned int offset) -+{ -+ bool same_page = false; -+ -+ if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { -+ if (WARN_ON_ONCE(bio_full(bio, len))) -+ return -EINVAL; -+ __bio_add_page(bio, page, len, offset); -+ return 0; -+ } -+ -+ if (same_page) -+ put_page(page); -+ return 0; -+} -+ -+static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page, -+ unsigned int len, unsigned int offset) -+{ -+ struct request_queue *q = bdev_get_queue(bio->bi_bdev); -+ bool same_page = false; -+ -+ if (bio_add_hw_page(q, bio, page, len, offset, -+ queue_max_zone_append_sectors(q), &same_page) != len) -+ return -EINVAL; -+ if (same_page) -+ put_page(page); -+ return 0; -+} -+ - #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) - - /** -@@ -1087,61 +1120,11 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) - unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; - struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; - struct page **pages = (struct page **)bv; -- bool same_page = false; -- ssize_t size, left; -- unsigned len, i; -- size_t offset; -- -- /* -- * Move page array up in the allocated memory for the bio vecs as far as -- * possible so that we can start filling biovecs from the beginning -- * without overwriting the temporary page array. -- */ -- BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); -- pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); -- -- size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); -- if (unlikely(size <= 0)) -- return size ? size : -EFAULT; -- -- for (left = size, i = 0; left > 0; left -= len, i++) { -- struct page *page = pages[i]; -- -- len = min_t(size_t, PAGE_SIZE - offset, left); -- -- if (__bio_try_merge_page(bio, page, len, offset, &same_page)) { -- if (same_page) -- put_page(page); -- } else { -- if (WARN_ON_ONCE(bio_full(bio, len))) { -- bio_put_pages(pages + i, left, offset); -- return -EINVAL; -- } -- __bio_add_page(bio, page, len, offset); -- } -- offset = 0; -- } -- -- iov_iter_advance(iter, size); -- return 0; --} -- --static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) --{ -- unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; -- unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; -- struct request_queue *q = bio->bi_bdev->bd_disk->queue; -- unsigned int max_append_sectors = queue_max_zone_append_sectors(q); -- struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; -- struct page **pages = (struct page **)bv; - ssize_t size, left; - unsigned len, i; - size_t offset; - int ret = 0; - -- if (WARN_ON_ONCE(!max_append_sectors)) -- return 0; -- - /* - * Move page array up in the allocated memory for the bio vecs as far as - * possible so that we can start filling biovecs from the beginning -@@ -1156,17 +1139,18 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) - - for (left = size, i = 0; left > 0; left -= len, i++) { - struct page *page = pages[i]; -- bool same_page = false; - - len = min_t(size_t, PAGE_SIZE - offset, left); -- if (bio_add_hw_page(q, bio, page, len, offset, -- max_append_sectors, &same_page) != len) { -+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) -+ ret = bio_iov_add_zone_append_page(bio, page, len, -+ offset); -+ else -+ ret = bio_iov_add_page(bio, page, len, offset); -+ -+ if (ret) { - bio_put_pages(pages + i, left, offset); -- ret = -EINVAL; - break; - } -- if (same_page) -- put_page(page); - offset = 0; - } - -@@ -1208,10 +1192,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) - } - - do { -- if (bio_op(bio) == REQ_OP_ZONE_APPEND) -- ret = __bio_iov_append_get_pages(bio, iter); -- else -- ret = __bio_iov_iter_get_pages(bio, iter); -+ ret = __bio_iov_iter_get_pages(bio, iter); - } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); - - /* don't account direct I/O as memory stall */ -@@ -1288,10 +1269,12 @@ void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, - struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); - struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); - unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); -- void *src_buf; -+ void *src_buf = bvec_kmap_local(&src_bv); -+ void *dst_buf = bvec_kmap_local(&dst_bv); -+ -+ memcpy(dst_buf, src_buf, bytes); - -- src_buf = bvec_kmap_local(&src_bv); -- memcpy_to_bvec(&dst_bv, src_buf); -+ kunmap_local(dst_buf); - kunmap_local(src_buf); - - bio_advance_iter_single(src, src_iter, bytes); -@@ -1466,11 +1449,10 @@ again: - if (!bio_integrity_endio(bio)) - return; - -- if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED)) -- rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio); -+ rq_qos_done_bio(bio); - - if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { -- trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio); -+ trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); - bio_clear_flag(bio, BIO_TRACE_COMPLETION); - } - -@@ -1551,7 +1533,7 @@ EXPORT_SYMBOL(bio_split); - void bio_trim(struct bio *bio, sector_t offset, sector_t size) - { - if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || -- offset + size > bio->bi_iter.bi_size)) -+ offset + size > bio_sectors(bio))) - return; - - size <<= 9; -diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c -index 9a1c5839dd469..3ee4c1217b636 100644 ---- a/block/blk-cgroup.c -+++ b/block/blk-cgroup.c -@@ -633,6 +633,14 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, - - q = bdev->bd_disk->queue; - -+ /* -+ * blkcg_deactivate_policy() requires queue to be frozen, we can grab -+ * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). -+ */ -+ ret = blk_queue_enter(q, 0); -+ if (ret) -+ goto fail; -+ - rcu_read_lock(); - spin_lock_irq(&q->queue_lock); - -@@ -667,13 +675,13 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, - new_blkg = blkg_alloc(pos, q, GFP_KERNEL); - if (unlikely(!new_blkg)) { - ret = -ENOMEM; -- goto fail; -+ goto fail_exit_queue; - } - - if (radix_tree_preload(GFP_KERNEL)) { - blkg_free(new_blkg); - ret = -ENOMEM; -- goto fail; -+ goto fail_exit_queue; - } - - rcu_read_lock(); -@@ -702,6 +710,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, - goto success; - } - success: -+ blk_queue_exit(q); - ctx->bdev = bdev; - ctx->blkg = blkg; - ctx->body = input; -@@ -712,6 +721,8 @@ fail_preloaded: - fail_unlock: - spin_unlock_irq(&q->queue_lock); - rcu_read_unlock(); -+fail_exit_queue: -+ blk_queue_exit(q); - fail: - blkdev_put_no_open(bdev); - /* -@@ -844,11 +855,11 @@ static void blkcg_fill_root_iostats(void) - blk_queue_root_blkg(bdev->bd_disk->queue); - struct blkg_iostat tmp; - int cpu; -+ unsigned long flags; - - memset(&tmp, 0, sizeof(tmp)); - for_each_possible_cpu(cpu) { - struct disk_stats *cpu_dkstats; -- unsigned long flags; - - cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); - tmp.ios[BLKG_IOSTAT_READ] += -@@ -864,11 +875,11 @@ static void blkcg_fill_root_iostats(void) - cpu_dkstats->sectors[STAT_WRITE] << 9; - tmp.bytes[BLKG_IOSTAT_DISCARD] += - cpu_dkstats->sectors[STAT_DISCARD] << 9; -- -- flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); -- blkg_iostat_set(&blkg->iostat.cur, &tmp); -- u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); - } -+ -+ flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); -+ blkg_iostat_set(&blkg->iostat.cur, &tmp); -+ u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); - } - } - -@@ -1349,6 +1360,10 @@ retry: - list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) - pol->pd_init_fn(blkg->pd[pol->plid]); - -+ if (pol->pd_online_fn) -+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) -+ pol->pd_online_fn(blkg->pd[pol->plid]); -+ - __set_bit(pol->plid, q->blkcg_pols); - ret = 0; - -@@ -1875,12 +1890,8 @@ EXPORT_SYMBOL_GPL(bio_associate_blkg); - */ - void bio_clone_blkg_association(struct bio *dst, struct bio *src) - { -- if (src->bi_blkg) { -- if (dst->bi_blkg) -- blkg_put(dst->bi_blkg); -- blkg_get(src->bi_blkg); -- dst->bi_blkg = src->bi_blkg; -- } -+ if (src->bi_blkg) -+ bio_associate_blkg_from_css(dst, &bio_blkcg(src)->css); - } - EXPORT_SYMBOL_GPL(bio_clone_blkg_association); - -diff --git a/block/blk-core.c b/block/blk-core.c -index 4d8f5fe915887..0c4a4e42ad870 100644 ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -49,6 +49,7 @@ - #include "blk-mq.h" - #include "blk-mq-sched.h" - #include "blk-pm.h" -+#include "blk-rq-qos.h" - - struct dentry *blk_debugfs_root; - -@@ -350,13 +351,6 @@ void blk_queue_start_drain(struct request_queue *q) - wake_up_all(&q->mq_freeze_wq); - } - --void blk_set_queue_dying(struct request_queue *q) --{ -- blk_queue_flag_set(QUEUE_FLAG_DYING, q); -- blk_queue_start_drain(q); --} --EXPORT_SYMBOL_GPL(blk_set_queue_dying); -- - /** - * blk_cleanup_queue - shutdown a request queue - * @q: request queue to shutdown -@@ -374,7 +368,8 @@ void blk_cleanup_queue(struct request_queue *q) - WARN_ON_ONCE(blk_queue_registered(q)); - - /* mark @q DYING, no new request or merges will be allowed afterwards */ -- blk_set_queue_dying(q); -+ blk_queue_flag_set(QUEUE_FLAG_DYING, q); -+ blk_queue_start_drain(q); - - blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); - blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); -@@ -386,11 +381,16 @@ void blk_cleanup_queue(struct request_queue *q) - */ - blk_freeze_queue(q); - -+ /* cleanup rq qos structures for queue without disk */ -+ rq_qos_exit(q); -+ - blk_queue_flag_set(QUEUE_FLAG_DEAD, q); - - blk_sync_queue(q); -- if (queue_is_mq(q)) -+ if (queue_is_mq(q)) { -+ blk_mq_cancel_work_sync(q); - blk_mq_exit_queue(q); -+ } - - /* - * In theory, request pool of sched_tags belongs to request queue. -@@ -447,7 +447,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) - - while (!blk_try_enter_queue(q, pm)) { - if (flags & BLK_MQ_REQ_NOWAIT) -- return -EBUSY; -+ return -EAGAIN; - - /* - * read pair of barrier in blk_freeze_queue_start(), we need to -@@ -478,7 +478,7 @@ static inline int bio_queue_enter(struct bio *bio) - if (test_bit(GD_DEAD, &disk->state)) - goto dead; - bio_wouldblock_error(bio); -- return -EBUSY; -+ return -EAGAIN; - } - - /* -@@ -698,14 +698,10 @@ static inline bool should_fail_request(struct block_device *part, - static inline bool bio_check_ro(struct bio *bio) - { - if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { -- char b[BDEVNAME_SIZE]; -- - if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) - return false; -- -- WARN_ONCE(1, -- "Trying to write to read-only block-device %s (partno %d)\n", -- bio_devname(bio, b), bio->bi_bdev->bd_partno); -+ pr_warn("Trying to write to read-only block-device %pg\n", -+ bio->bi_bdev); - /* Older lvm-tools actually trigger this */ - return false; - } -@@ -887,10 +883,8 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio) - if (unlikely(!current->io_context)) - create_task_io_context(current, GFP_ATOMIC, q->node); - -- if (blk_throtl_bio(bio)) { -- blkcg_bio_issue_init(bio); -+ if (blk_throtl_bio(bio)) - return false; -- } - - blk_cgroup_bio_start(bio); - blkcg_bio_issue_init(bio); -@@ -1293,21 +1287,33 @@ void blk_account_io_start(struct request *rq) - } - - static unsigned long __part_start_io_acct(struct block_device *part, -- unsigned int sectors, unsigned int op) -+ unsigned int sectors, unsigned int op, -+ unsigned long start_time) - { - const int sgrp = op_stat_group(op); -- unsigned long now = READ_ONCE(jiffies); - - part_stat_lock(); -- update_io_ticks(part, now, false); -+ update_io_ticks(part, start_time, false); - part_stat_inc(part, ios[sgrp]); - part_stat_add(part, sectors[sgrp], sectors); - part_stat_local_inc(part, in_flight[op_is_write(op)]); - part_stat_unlock(); - -- return now; -+ return start_time; - } - -+/** -+ * bio_start_io_acct_time - start I/O accounting for bio based drivers -+ * @bio: bio to start account for -+ * @start_time: start time that should be passed back to bio_end_io_acct(). -+ */ -+void bio_start_io_acct_time(struct bio *bio, unsigned long start_time) -+{ -+ __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), -+ bio_op(bio), start_time); -+} -+EXPORT_SYMBOL_GPL(bio_start_io_acct_time); -+ - /** - * bio_start_io_acct - start I/O accounting for bio based drivers - * @bio: bio to start account for -@@ -1316,14 +1322,15 @@ static unsigned long __part_start_io_acct(struct block_device *part, - */ - unsigned long bio_start_io_acct(struct bio *bio) - { -- return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio)); -+ return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), -+ bio_op(bio), jiffies); - } - EXPORT_SYMBOL_GPL(bio_start_io_acct); - - unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, - unsigned int op) - { -- return __part_start_io_acct(disk->part0, sectors, op); -+ return __part_start_io_acct(disk->part0, sectors, op, jiffies); - } - EXPORT_SYMBOL(disk_start_io_acct); - -@@ -1414,6 +1421,13 @@ bool blk_update_request(struct request *req, blk_status_t error, - req->q->integrity.profile->complete_fn(req, nr_bytes); - #endif - -+ /* -+ * Upper layers may call blk_crypto_evict_key() anytime after the last -+ * bio_endio(). Therefore, the keyslot must be released before that. -+ */ -+ if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req)) -+ __blk_crypto_rq_put_keyslot(req); -+ - if (unlikely(error && !blk_rq_is_passthrough(req) && - !(req->rq_flags & RQF_QUIET))) - print_req_error(req, error, __func__); -diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h -index 0d36aae538d7b..8e08345576203 100644 ---- a/block/blk-crypto-internal.h -+++ b/block/blk-crypto-internal.h -@@ -60,6 +60,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq) - return rq->crypt_ctx; - } - -+static inline bool blk_crypto_rq_has_keyslot(struct request *rq) -+{ -+ return rq->crypt_keyslot; -+} -+ - #else /* CONFIG_BLK_INLINE_ENCRYPTION */ - - static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, -@@ -93,6 +98,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq) - return false; - } - -+static inline bool blk_crypto_rq_has_keyslot(struct request *rq) -+{ -+ return false; -+} -+ - #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ - - void __bio_crypt_advance(struct bio *bio, unsigned int bytes); -@@ -127,14 +137,21 @@ static inline bool blk_crypto_bio_prep(struct bio **bio_ptr) - return true; - } - --blk_status_t __blk_crypto_init_request(struct request *rq); --static inline blk_status_t blk_crypto_init_request(struct request *rq) -+blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq); -+static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq) - { - if (blk_crypto_rq_is_encrypted(rq)) -- return __blk_crypto_init_request(rq); -+ return __blk_crypto_rq_get_keyslot(rq); - return BLK_STS_OK; - } - -+void __blk_crypto_rq_put_keyslot(struct request *rq); -+static inline void blk_crypto_rq_put_keyslot(struct request *rq) -+{ -+ if (blk_crypto_rq_has_keyslot(rq)) -+ __blk_crypto_rq_put_keyslot(rq); -+} -+ - void __blk_crypto_free_request(struct request *rq); - static inline void blk_crypto_free_request(struct request *rq) - { -@@ -173,7 +190,7 @@ static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq) - { - - if (blk_crypto_rq_is_encrypted(rq)) -- return blk_crypto_init_request(rq); -+ return blk_crypto_rq_get_keyslot(rq); - return BLK_STS_OK; - } - -diff --git a/block/blk-crypto.c b/block/blk-crypto.c -index 103c2e2d50d67..5029a50807d5d 100644 ---- a/block/blk-crypto.c -+++ b/block/blk-crypto.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - - #include "blk-crypto-internal.h" -@@ -216,26 +217,26 @@ static bool bio_crypt_check_alignment(struct bio *bio) - return true; - } - --blk_status_t __blk_crypto_init_request(struct request *rq) -+blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq) - { - return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key, - &rq->crypt_keyslot); - } - --/** -- * __blk_crypto_free_request - Uninitialize the crypto fields of a request. -- * -- * @rq: The request whose crypto fields to uninitialize. -- * -- * Completely uninitializes the crypto fields of a request. If a keyslot has -- * been programmed into some inline encryption hardware, that keyslot is -- * released. The rq->crypt_ctx is also freed. -- */ --void __blk_crypto_free_request(struct request *rq) -+void __blk_crypto_rq_put_keyslot(struct request *rq) - { - blk_ksm_put_slot(rq->crypt_keyslot); -+ rq->crypt_keyslot = NULL; -+} -+ -+void __blk_crypto_free_request(struct request *rq) -+{ -+ /* The keyslot, if one was needed, should have been released earlier. */ -+ if (WARN_ON_ONCE(rq->crypt_keyslot)) -+ __blk_crypto_rq_put_keyslot(rq); -+ - mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); -- blk_crypto_rq_set_defaults(rq); -+ rq->crypt_ctx = NULL; - } - - /** -@@ -384,29 +385,39 @@ int blk_crypto_start_using_key(const struct blk_crypto_key *key, - } - - /** -- * blk_crypto_evict_key() - Evict a key from any inline encryption hardware -- * it may have been programmed into -- * @q: The request queue who's associated inline encryption hardware this key -- * might have been programmed into -- * @key: The key to evict -+ * blk_crypto_evict_key() - Evict a blk_crypto_key from a request_queue -+ * @q: a request_queue on which I/O using the key may have been done -+ * @key: the key to evict - * -- * Upper layers (filesystems) must call this function to ensure that a key is -- * evicted from any hardware that it might have been programmed into. The key -- * must not be in use by any in-flight IO when this function is called. -+ * For a given request_queue, this function removes the given blk_crypto_key -+ * from the keyslot management structures and evicts it from any underlying -+ * hardware keyslot(s) or blk-crypto-fallback keyslot it may have been -+ * programmed into. - * -- * Return: 0 on success or if key is not present in the q's ksm, -err on error. -+ * Upper layers must call this before freeing the blk_crypto_key. It must be -+ * called for every request_queue the key may have been used on. The key must -+ * no longer be in use by any I/O when this function is called. -+ * -+ * Context: May sleep. - */ --int blk_crypto_evict_key(struct request_queue *q, -- const struct blk_crypto_key *key) -+void blk_crypto_evict_key(struct request_queue *q, -+ const struct blk_crypto_key *key) - { -- if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) -- return blk_ksm_evict_key(q->ksm, key); -+ int err; - -+ if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) -+ err = blk_ksm_evict_key(q->ksm, key); -+ else -+ err = blk_crypto_fallback_evict_key(key); - /* -- * If the request queue's associated inline encryption hardware didn't -- * have support for the key, then the key might have been programmed -- * into the fallback keyslot manager, so try to evict from there. -+ * An error can only occur here if the key failed to be evicted from a -+ * keyslot (due to a hardware or driver issue) or is allegedly still in -+ * use by I/O (due to a kernel bug). Even in these cases, the key is -+ * still unlinked from the keyslot management structures, and the caller -+ * is allowed and expected to free it right away. There's nothing -+ * callers can do to handle errors, so just log them and return void. - */ -- return blk_crypto_fallback_evict_key(key); -+ if (err) -+ pr_warn_ratelimited("error %d evicting key\n", err); - } - EXPORT_SYMBOL_GPL(blk_crypto_evict_key); -diff --git a/block/blk-flush.c b/block/blk-flush.c -index 4201728bf3a5a..94a86acbb7f67 100644 ---- a/block/blk-flush.c -+++ b/block/blk-flush.c -@@ -235,8 +235,10 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) - * avoiding use-after-free. - */ - WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); -- if (fq->rq_status != BLK_STS_OK) -+ if (fq->rq_status != BLK_STS_OK) { - error = fq->rq_status; -+ fq->rq_status = BLK_STS_OK; -+ } - - if (!q->elevator) { - flush_rq->tag = BLK_MQ_NO_TAG; -diff --git a/block/blk-ioc.c b/block/blk-ioc.c -index 57299f860d41e..90c05971f71e0 100644 ---- a/block/blk-ioc.c -+++ b/block/blk-ioc.c -@@ -265,6 +265,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) - INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC); - INIT_HLIST_HEAD(&ioc->icq_list); - INIT_WORK(&ioc->release_work, ioc_release_fn); -+ ioc->ioprio = IOPRIO_DEFAULT; - - /* - * Try to install. ioc shouldn't be installed if someone else -diff --git a/block/blk-iocost.c b/block/blk-iocost.c -index b3880e4ba22a1..f95feabb3ca88 100644 ---- a/block/blk-iocost.c -+++ b/block/blk-iocost.c -@@ -232,7 +232,9 @@ enum { - - /* 1/64k is granular enough and can easily be handled w/ u32 */ - WEIGHT_ONE = 1 << 16, -+}; - -+enum { - /* - * As vtime is used to calculate the cost of each IO, it needs to - * be fairly high precision. For example, it should be able to -@@ -256,6 +258,11 @@ enum { - VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION, - VRATE_CLAMP_ADJ_PCT = 4, - -+ /* switch iff the conditions are met for longer than this */ -+ AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC, -+}; -+ -+enum { - /* if IOs end up waiting for requests, issue less */ - RQ_WAIT_BUSY_PCT = 5, - -@@ -294,9 +301,6 @@ enum { - /* don't let cmds which take a very long time pin lagging for too long */ - MAX_LAGGING_PERIODS = 10, - -- /* switch iff the conditions are met for longer than this */ -- AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC, -- - /* - * Count IO size in 4k pages. The 12bit shift helps keeping - * size-proportional components of cost calculation in closer -@@ -870,9 +874,14 @@ static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops, - - *page = *seqio = *randio = 0; - -- if (bps) -- *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, -- DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE)); -+ if (bps) { -+ u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE); -+ -+ if (bps_pages) -+ *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages); -+ else -+ *page = 1; -+ } - - if (seqiops) { - v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops); -@@ -2311,11 +2320,28 @@ static void ioc_timer_fn(struct timer_list *timer) - hwm = current_hweight_max(iocg); - new_hwi = hweight_after_donation(iocg, old_hwi, hwm, - usage, &now); -- if (new_hwi < hwm) { -+ /* -+ * Donation calculation assumes hweight_after_donation -+ * to be positive, a condition that a donor w/ hwa < 2 -+ * can't meet. Don't bother with donation if hwa is -+ * below 2. It's not gonna make a meaningful difference -+ * anyway. -+ */ -+ if (new_hwi < hwm && hwa >= 2) { - iocg->hweight_donating = hwa; - iocg->hweight_after_donation = new_hwi; - list_add(&iocg->surplus_list, &surpluses); -- } else { -+ } else if (!iocg->abs_vdebt) { -+ /* -+ * @iocg doesn't have enough to donate. Reset -+ * its inuse to active. -+ * -+ * Don't reset debtors as their inuse's are -+ * owned by debt handling. This shouldn't affect -+ * donation calculuation in any meaningful way -+ * as @iocg doesn't have a meaningful amount of -+ * share anyway. -+ */ - TRACE_IOCG_PATH(inuse_shortage, iocg, &now, - iocg->inuse, iocg->active, - iocg->hweight_inuse, new_hwi); -@@ -2422,6 +2448,7 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, - u32 hwi, adj_step; - s64 margin; - u64 cost, new_inuse; -+ unsigned long flags; - - current_hweight(iocg, NULL, &hwi); - old_hwi = hwi; -@@ -2440,11 +2467,11 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, - iocg->inuse == iocg->active) - return cost; - -- spin_lock_irq(&ioc->lock); -+ spin_lock_irqsave(&ioc->lock, flags); - - /* we own inuse only when @iocg is in the normal active state */ - if (iocg->abs_vdebt || list_empty(&iocg->active_list)) { -- spin_unlock_irq(&ioc->lock); -+ spin_unlock_irqrestore(&ioc->lock, flags); - return cost; - } - -@@ -2465,7 +2492,7 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, - } while (time_after64(vtime + cost, now->vnow) && - iocg->inuse != iocg->active); - -- spin_unlock_irq(&ioc->lock); -+ spin_unlock_irqrestore(&ioc->lock, flags); - - TRACE_IOCG_PATH(inuse_adjust, iocg, now, - old_inuse, iocg->inuse, old_hwi, hwi); -@@ -2876,15 +2903,21 @@ static int blk_iocost_init(struct request_queue *q) - * called before policy activation completion, can't assume that the - * target bio has an iocg associated and need to test for NULL iocg. - */ -- rq_qos_add(q, rqos); -+ ret = rq_qos_add(q, rqos); -+ if (ret) -+ goto err_free_ioc; -+ - ret = blkcg_activate_policy(q, &blkcg_policy_iocost); -- if (ret) { -- rq_qos_del(q, rqos); -- free_percpu(ioc->pcpu_stat); -- kfree(ioc); -- return ret; -- } -+ if (ret) -+ goto err_del_qos; - return 0; -+ -+err_del_qos: -+ rq_qos_del(q, rqos); -+err_free_ioc: -+ free_percpu(ioc->pcpu_stat); -+ kfree(ioc); -+ return ret; - } - - static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp) -diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c -index c0545f9da549c..bdef8395af6e7 100644 ---- a/block/blk-iolatency.c -+++ b/block/blk-iolatency.c -@@ -86,7 +86,17 @@ struct iolatency_grp; - struct blk_iolatency { - struct rq_qos rqos; - struct timer_list timer; -- atomic_t enabled; -+ -+ /* -+ * ->enabled is the master enable switch gating the throttling logic and -+ * inflight tracking. The number of cgroups which have iolat enabled is -+ * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly -+ * from ->enable_work with the request_queue frozen. For details, See -+ * blkiolatency_enable_work_fn(). -+ */ -+ bool enabled; -+ atomic_t enable_cnt; -+ struct work_struct enable_work; - }; - - static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos) -@@ -94,11 +104,6 @@ static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos) - return container_of(rqos, struct blk_iolatency, rqos); - } - --static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat) --{ -- return atomic_read(&blkiolat->enabled) > 0; --} -- - struct child_latency_info { - spinlock_t lock; - -@@ -463,7 +468,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio) - struct blkcg_gq *blkg = bio->bi_blkg; - bool issue_as_root = bio_issue_as_root_blkg(bio); - -- if (!blk_iolatency_enabled(blkiolat)) -+ if (!blkiolat->enabled) - return; - - while (blkg && blkg->parent) { -@@ -593,19 +598,17 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) - u64 window_start; - u64 now; - bool issue_as_root = bio_issue_as_root_blkg(bio); -- bool enabled = false; - int inflight = 0; - - blkg = bio->bi_blkg; -- if (!blkg || !bio_flagged(bio, BIO_TRACKED)) -+ if (!blkg || !bio_flagged(bio, BIO_QOS_THROTTLED)) - return; - - iolat = blkg_to_lat(bio->bi_blkg); - if (!iolat) - return; - -- enabled = blk_iolatency_enabled(iolat->blkiolat); -- if (!enabled) -+ if (!iolat->blkiolat->enabled) - return; - - now = ktime_to_ns(ktime_get()); -@@ -644,6 +647,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos) - struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); - - del_timer_sync(&blkiolat->timer); -+ flush_work(&blkiolat->enable_work); - blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency); - kfree(blkiolat); - } -@@ -715,6 +719,44 @@ next: - rcu_read_unlock(); - } - -+/** -+ * blkiolatency_enable_work_fn - Enable or disable iolatency on the device -+ * @work: enable_work of the blk_iolatency of interest -+ * -+ * iolatency needs to keep track of the number of in-flight IOs per cgroup. This -+ * is relatively expensive as it involves walking up the hierarchy twice for -+ * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we -+ * want to disable the in-flight tracking. -+ * -+ * We have to make sure that the counting is balanced - we don't want to leak -+ * the in-flight counts by disabling accounting in the completion path while IOs -+ * are in flight. This is achieved by ensuring that no IO is in flight by -+ * freezing the queue while flipping ->enabled. As this requires a sleepable -+ * context, ->enabled flipping is punted to this work function. -+ */ -+static void blkiolatency_enable_work_fn(struct work_struct *work) -+{ -+ struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency, -+ enable_work); -+ bool enabled; -+ -+ /* -+ * There can only be one instance of this function running for @blkiolat -+ * and it's guaranteed to be executed at least once after the latest -+ * ->enabled_cnt modification. Acting on the latest ->enable_cnt is -+ * sufficient. -+ * -+ * Also, we know @blkiolat is safe to access as ->enable_work is flushed -+ * in blkcg_iolatency_exit(). -+ */ -+ enabled = atomic_read(&blkiolat->enable_cnt); -+ if (enabled != blkiolat->enabled) { -+ blk_mq_freeze_queue(blkiolat->rqos.q); -+ blkiolat->enabled = enabled; -+ blk_mq_unfreeze_queue(blkiolat->rqos.q); -+ } -+} -+ - int blk_iolatency_init(struct request_queue *q) - { - struct blk_iolatency *blkiolat; -@@ -730,27 +772,29 @@ int blk_iolatency_init(struct request_queue *q) - rqos->ops = &blkcg_iolatency_ops; - rqos->q = q; - -- rq_qos_add(q, rqos); -- -+ ret = rq_qos_add(q, rqos); -+ if (ret) -+ goto err_free; - ret = blkcg_activate_policy(q, &blkcg_policy_iolatency); -- if (ret) { -- rq_qos_del(q, rqos); -- kfree(blkiolat); -- return ret; -- } -+ if (ret) -+ goto err_qos_del; - - timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0); -+ INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn); - - return 0; -+ -+err_qos_del: -+ rq_qos_del(q, rqos); -+err_free: -+ kfree(blkiolat); -+ return ret; - } - --/* -- * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise -- * return 0. -- */ --static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) -+static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) - { - struct iolatency_grp *iolat = blkg_to_lat(blkg); -+ struct blk_iolatency *blkiolat = iolat->blkiolat; - u64 oldval = iolat->min_lat_nsec; - - iolat->min_lat_nsec = val; -@@ -758,13 +802,15 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) - iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec, - BLKIOLATENCY_MAX_WIN_SIZE); - -- if (!oldval && val) -- return 1; -+ if (!oldval && val) { -+ if (atomic_inc_return(&blkiolat->enable_cnt) == 1) -+ schedule_work(&blkiolat->enable_work); -+ } - if (oldval && !val) { - blkcg_clear_delay(blkg); -- return -1; -+ if (atomic_dec_return(&blkiolat->enable_cnt) == 0) -+ schedule_work(&blkiolat->enable_work); - } -- return 0; - } - - static void iolatency_clear_scaling(struct blkcg_gq *blkg) -@@ -796,7 +842,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, - u64 lat_val = 0; - u64 oldval; - int ret; -- int enable = 0; - - ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); - if (ret) -@@ -831,41 +876,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, - blkg = ctx.blkg; - oldval = iolat->min_lat_nsec; - -- enable = iolatency_set_min_lat_nsec(blkg, lat_val); -- if (enable) { -- if (!blk_get_queue(blkg->q)) { -- ret = -ENODEV; -- goto out; -- } -- -- blkg_get(blkg); -- } -- -- if (oldval != iolat->min_lat_nsec) { -+ iolatency_set_min_lat_nsec(blkg, lat_val); -+ if (oldval != iolat->min_lat_nsec) - iolatency_clear_scaling(blkg); -- } -- - ret = 0; - out: - blkg_conf_finish(&ctx); -- if (ret == 0 && enable) { -- struct iolatency_grp *tmp = blkg_to_lat(blkg); -- struct blk_iolatency *blkiolat = tmp->blkiolat; -- -- blk_mq_freeze_queue(blkg->q); -- -- if (enable == 1) -- atomic_inc(&blkiolat->enabled); -- else if (enable == -1) -- atomic_dec(&blkiolat->enabled); -- else -- WARN_ON_ONCE(1); -- -- blk_mq_unfreeze_queue(blkg->q); -- -- blkg_put(blkg); -- blk_put_queue(blkg->q); -- } - return ret ?: nbytes; - } - -@@ -1006,14 +1022,8 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd) - { - struct iolatency_grp *iolat = pd_to_lat(pd); - struct blkcg_gq *blkg = lat_to_blkg(iolat); -- struct blk_iolatency *blkiolat = iolat->blkiolat; -- int ret; - -- ret = iolatency_set_min_lat_nsec(blkg, 0); -- if (ret == 1) -- atomic_inc(&blkiolat->enabled); -- if (ret == -1) -- atomic_dec(&blkiolat->enabled); -+ iolatency_set_min_lat_nsec(blkg, 0); - iolatency_clear_scaling(blkg); - } - -diff --git a/block/blk-map.c b/block/blk-map.c -index 4526adde01564..c7f71d83eff18 100644 ---- a/block/blk-map.c -+++ b/block/blk-map.c -@@ -446,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data, - if (bytes > len) - bytes = len; - -- page = alloc_page(GFP_NOIO | gfp_mask); -+ page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); - if (!page) - goto cleanup; - -diff --git a/block/blk-merge.c b/block/blk-merge.c -index 7a5c81c02c800..1affc5fd35f0c 100644 ---- a/block/blk-merge.c -+++ b/block/blk-merge.c -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - - #include - -@@ -278,6 +279,16 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, - *segs = nsegs; - return NULL; - split: -+ /* -+ * We can't sanely support splitting for a REQ_NOWAIT bio. End it -+ * with EAGAIN if splitting is required and return an error pointer. -+ */ -+ if (bio->bi_opf & REQ_NOWAIT) { -+ bio->bi_status = BLK_STS_AGAIN; -+ bio_endio(bio); -+ return ERR_PTR(-EAGAIN); -+ } -+ - *segs = nsegs; - - /* -@@ -337,11 +348,13 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) - break; - } - split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); -+ if (IS_ERR(split)) -+ *bio = split = NULL; - break; - } - - if (split) { -- /* there isn't chance to merge the splitted bio */ -+ /* there isn't chance to merge the split bio */ - split->bi_opf |= REQ_NOMERGE; - - bio_chain(split, *bio); -@@ -561,6 +574,9 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq) - static inline int ll_new_hw_segment(struct request *req, struct bio *bio, - unsigned int nr_phys_segs) - { -+ if (!blk_cgroup_mergeable(req, bio)) -+ goto no_merge; -+ - if (blk_integrity_merge_bio(req->q, req, bio) == false) - goto no_merge; - -@@ -657,6 +673,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, - if (total_phys_segments > blk_rq_get_max_segments(req)) - return 0; - -+ if (!blk_cgroup_mergeable(req, next->bio)) -+ return 0; -+ - if (blk_integrity_merge_rq(q, req, next) == false) - return 0; - -@@ -799,6 +818,8 @@ static struct request *attempt_merge(struct request_queue *q, - if (!blk_discard_mergable(req)) - elv_merge_requests(q, req, next); - -+ blk_crypto_rq_put_keyslot(next); -+ - /* - * 'next' is going away, so update stats accordingly - */ -@@ -863,6 +884,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) - if (rq->rq_disk != bio->bi_bdev->bd_disk) - return false; - -+ /* don't merge across cgroup boundaries */ -+ if (!blk_cgroup_mergeable(rq, bio)) -+ return false; -+ - /* only merge integrity protected bio into ditto rq */ - if (blk_integrity_merge_bio(rq->q, rq, bio) == false) - return false; -diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c -index 3b38d15723de1..7023257a133df 100644 ---- a/block/blk-mq-debugfs.c -+++ b/block/blk-mq-debugfs.c -@@ -879,6 +879,9 @@ void blk_mq_debugfs_register_hctx(struct request_queue *q, - char name[20]; - int i; - -+ if (!q->debugfs_dir) -+ return; -+ - snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); - hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); - -diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c -index 0f006cabfd914..ff1021dbb0d22 100644 ---- a/block/blk-mq-sched.c -+++ b/block/blk-mq-sched.c -@@ -45,8 +45,7 @@ void blk_mq_sched_assign_ioc(struct request *rq) - } - - /* -- * Mark a hardware queue as needing a restart. For shared queues, maintain -- * a count of how many hardware queues are marked for restart. -+ * Mark a hardware queue as needing a restart. - */ - void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) - { -@@ -110,7 +109,7 @@ dispatch: - /* - * Only SCSI implements .get_budget and .put_budget, and SCSI restarts - * its queue by itself in its completion handler, so we don't need to -- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. -+ * restart queue if .get_budget() fails to get the budget. - * - * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to - * be run again. This is necessary to avoid starving flushes. -@@ -208,11 +207,18 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) - - static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) - { -+ unsigned long end = jiffies + HZ; - int ret; - - do { - ret = __blk_mq_do_dispatch_sched(hctx); -- } while (ret == 1); -+ if (ret != 1) -+ break; -+ if (need_resched() || time_is_before_jiffies(end)) { -+ blk_mq_delay_run_hw_queue(hctx, 0); -+ break; -+ } -+ } while (1); - - return ret; - } -@@ -231,7 +237,7 @@ static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, - /* - * Only SCSI implements .get_budget and .put_budget, and SCSI restarts - * its queue by itself in its completion handler, so we don't need to -- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. -+ * restart queue if .get_budget() fails to get the budget. - * - * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to - * be run again. This is necessary to avoid starving flushes. -diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c -index 253c857cba47c..7074ce8d2d03f 100644 ---- a/block/blk-mq-sysfs.c -+++ b/block/blk-mq-sysfs.c -@@ -187,7 +187,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) - { - struct request_queue *q = hctx->queue; - struct blk_mq_ctx *ctx; -- int i, ret; -+ int i, j, ret; - - if (!hctx->nr_ctx) - return 0; -@@ -199,9 +199,16 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) - hctx_for_each_ctx(hctx, ctx, i) { - ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); - if (ret) -- break; -+ goto out; - } - -+ return 0; -+out: -+ hctx_for_each_ctx(hctx, ctx, j) { -+ if (j < i) -+ kobject_del(&ctx->kobj); -+ } -+ kobject_del(&hctx->kobj); - return ret; - } - -diff --git a/block/blk-mq.c b/block/blk-mq.c -index 652a31fc3bb38..bbbbcd2c19418 100644 ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -457,7 +457,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, - * allocator for this for the rare use case of a command tied to - * a specific queue. - */ -- if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED)))) -+ if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) || -+ WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED))) - return ERR_PTR(-EINVAL); - - if (hctx_idx >= q->nr_hw_queues) -@@ -476,6 +477,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, - if (!blk_mq_hw_queue_mapped(data.hctx)) - goto out_queue_exit; - cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); -+ if (cpu >= nr_cpu_ids) -+ goto out_queue_exit; - data.ctx = __blk_mq_get_ctx(q, cpu); - - if (!q->elevator) -@@ -763,7 +766,6 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) - /* this request will be re-inserted to io scheduler queue */ - blk_mq_sched_requeue_request(rq); - -- BUG_ON(!list_empty(&rq->queuelist)); - blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); - } - EXPORT_SYMBOL(blk_mq_requeue_request); -@@ -1399,7 +1401,8 @@ out: - /* If we didn't flush the entire list, we could have told the driver - * there was more coming, but that turned out to be a lie. - */ -- if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) -+ if ((!list_empty(list) || errors || needs_resource || -+ ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued) - q->mq_ops->commit_rqs(hctx); - /* - * Any items that need requeuing? Stuff them into hctx->dispatch, -@@ -1643,8 +1646,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q) - */ - static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) - { -- struct blk_mq_hw_ctx *hctx; -- -+ struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); - /* - * If the IO scheduler does not respect hardware queues when - * dispatching, we just don't bother with multiple HW queues and -@@ -1652,8 +1654,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) - * just causes lock contention inside the scheduler and pointless cache - * bouncing. - */ -- hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, -- raw_smp_processor_id()); -+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx); -+ - if (!blk_mq_hctx_stopped(hctx)) - return hctx; - return NULL; -@@ -2111,6 +2113,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, - list_del_init(&rq->queuelist); - ret = blk_mq_request_issue_directly(rq, list_empty(list)); - if (ret != BLK_STS_OK) { -+ errors++; - if (ret == BLK_STS_RESOURCE || - ret == BLK_STS_DEV_RESOURCE) { - blk_mq_request_bypass_insert(rq, false, -@@ -2118,7 +2121,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, - break; - } - blk_mq_end_request(rq, ret); -- errors++; - } else - queued++; - } -@@ -2148,14 +2150,14 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) - } - - /* -- * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple -+ * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple - * queues. This is important for md arrays to benefit from merging - * requests. - */ - static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) - { - if (plug->multiple_queues) -- return BLK_MAX_REQUEST_COUNT * 4; -+ return BLK_MAX_REQUEST_COUNT * 2; - return BLK_MAX_REQUEST_COUNT; - } - -@@ -2192,6 +2194,8 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) - - blk_queue_bounce(q, &bio); - __blk_queue_split(&bio, &nr_segs); -+ if (!bio) -+ goto queue_exit; - - if (!bio_integrity_prep(bio)) - goto queue_exit; -@@ -2224,7 +2228,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) - - blk_mq_bio_to_request(rq, bio, nr_segs); - -- ret = blk_crypto_init_request(rq); -+ ret = blk_crypto_rq_get_keyslot(rq); - if (ret != BLK_STS_OK) { - bio->bi_status = ret; - bio_endio(bio); -@@ -4019,6 +4023,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq) - } - EXPORT_SYMBOL(blk_mq_rq_cpu); - -+void blk_mq_cancel_work_sync(struct request_queue *q) -+{ -+ if (queue_is_mq(q)) { -+ struct blk_mq_hw_ctx *hctx; -+ int i; -+ -+ cancel_delayed_work_sync(&q->requeue_work); -+ -+ queue_for_each_hw_ctx(q, hctx, i) -+ cancel_delayed_work_sync(&hctx->run_work); -+ } -+} -+ - static int __init blk_mq_init(void) - { - int i; -diff --git a/block/blk-mq.h b/block/blk-mq.h -index d08779f77a265..7cdca23b6263d 100644 ---- a/block/blk-mq.h -+++ b/block/blk-mq.h -@@ -129,6 +129,8 @@ extern int blk_mq_sysfs_register(struct request_queue *q); - extern void blk_mq_sysfs_unregister(struct request_queue *q); - extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); - -+void blk_mq_cancel_work_sync(struct request_queue *q); -+ - void blk_mq_release(struct request_queue *q); - - static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, -diff --git a/block/blk-pm.c b/block/blk-pm.c -index 17bd020268d42..2dad62cc15727 100644 ---- a/block/blk-pm.c -+++ b/block/blk-pm.c -@@ -163,27 +163,19 @@ EXPORT_SYMBOL(blk_pre_runtime_resume); - /** - * blk_post_runtime_resume - Post runtime resume processing - * @q: the queue of the device -- * @err: return value of the device's runtime_resume function - * - * Description: -- * Update the queue's runtime status according to the return value of the -- * device's runtime_resume function. If the resume was successful, call -- * blk_set_runtime_active() to do the real work of restarting the queue. -+ * For historical reasons, this routine merely calls blk_set_runtime_active() -+ * to do the real work of restarting the queue. It does this regardless of -+ * whether the device's runtime-resume succeeded; even if it failed the -+ * driver or error handler will need to communicate with the device. - * - * This function should be called near the end of the device's - * runtime_resume callback. - */ --void blk_post_runtime_resume(struct request_queue *q, int err) -+void blk_post_runtime_resume(struct request_queue *q) - { -- if (!q->dev) -- return; -- if (!err) { -- blk_set_runtime_active(q); -- } else { -- spin_lock_irq(&q->queue_lock); -- q->rpm_status = RPM_SUSPENDED; -- spin_unlock_irq(&q->queue_lock); -- } -+ blk_set_runtime_active(q); - } - EXPORT_SYMBOL(blk_post_runtime_resume); - -@@ -201,7 +193,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume); - * runtime PM status and re-enable peeking requests from the queue. It - * should be called before first request is added to the queue. - * -- * This function is also called by blk_post_runtime_resume() for successful -+ * This function is also called by blk_post_runtime_resume() for - * runtime resumes. It does everything necessary to restart the queue. - */ - void blk_set_runtime_active(struct request_queue *q) -diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h -index f000f83e0621c..1655f76b6a1b6 100644 ---- a/block/blk-rq-qos.h -+++ b/block/blk-rq-qos.h -@@ -86,7 +86,7 @@ static inline void rq_wait_init(struct rq_wait *rq_wait) - init_waitqueue_head(&rq_wait->wait); - } - --static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) -+static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos) - { - /* - * No IO can be in-flight when adding rqos, so freeze queue, which -@@ -98,6 +98,8 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) - blk_mq_freeze_queue(q); - - spin_lock_irq(&q->queue_lock); -+ if (rq_qos_id(q, rqos->id)) -+ goto ebusy; - rqos->next = q->rq_qos; - q->rq_qos = rqos; - spin_unlock_irq(&q->queue_lock); -@@ -106,6 +108,13 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) - - if (rqos->ops->debugfs_attrs) - blk_mq_debugfs_register_rqos(rqos); -+ -+ return 0; -+ebusy: -+ spin_unlock_irq(&q->queue_lock); -+ blk_mq_unfreeze_queue(q); -+ return -EBUSY; -+ - } - - static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) -@@ -177,21 +186,22 @@ static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) - __rq_qos_requeue(q->rq_qos, rq); - } - --static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio) -+static inline void rq_qos_done_bio(struct bio *bio) - { -- if (q->rq_qos) -- __rq_qos_done_bio(q->rq_qos, bio); -+ if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) || -+ bio_flagged(bio, BIO_QOS_MERGED))) { -+ struct request_queue *q = bdev_get_queue(bio->bi_bdev); -+ if (q->rq_qos) -+ __rq_qos_done_bio(q->rq_qos, bio); -+ } - } - - static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) - { -- /* -- * BIO_TRACKED lets controllers know that a bio went through the -- * normal rq_qos path. -- */ -- bio_set_flag(bio, BIO_TRACKED); -- if (q->rq_qos) -+ if (q->rq_qos) { -+ bio_set_flag(bio, BIO_QOS_THROTTLED); - __rq_qos_throttle(q->rq_qos, bio); -+ } - } - - static inline void rq_qos_track(struct request_queue *q, struct request *rq, -@@ -204,8 +214,10 @@ static inline void rq_qos_track(struct request_queue *q, struct request *rq, - static inline void rq_qos_merge(struct request_queue *q, struct request *rq, - struct bio *bio) - { -- if (q->rq_qos) -+ if (q->rq_qos) { -+ bio_set_flag(bio, BIO_QOS_MERGED); - __rq_qos_merge(q->rq_qos, rq, bio); -+ } - } - - static inline void rq_qos_queue_depth_changed(struct request_queue *q) -diff --git a/block/blk-settings.c b/block/blk-settings.c -index b880c70e22e4e..73a80895e3ae1 100644 ---- a/block/blk-settings.c -+++ b/block/blk-settings.c -@@ -875,6 +875,7 @@ static bool disk_has_partitions(struct gendisk *disk) - void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) - { - struct request_queue *q = disk->queue; -+ unsigned int old_model = q->limits.zoned; - - switch (model) { - case BLK_ZONED_HM: -@@ -912,7 +913,7 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) - */ - blk_queue_zone_write_granularity(q, - queue_logical_block_size(q)); -- } else { -+ } else if (old_model != BLK_ZONED_NONE) { - blk_queue_clear_zone_settings(q); - } - } -diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c -index 614d9d47de36b..00021f0123701 100644 ---- a/block/blk-sysfs.c -+++ b/block/blk-sysfs.c -@@ -805,16 +805,6 @@ static void blk_release_queue(struct kobject *kobj) - - blk_free_queue_stats(q->stats); - -- if (queue_is_mq(q)) { -- struct blk_mq_hw_ctx *hctx; -- int i; -- -- cancel_delayed_work_sync(&q->requeue_work); -- -- queue_for_each_hw_ctx(q, hctx, i) -- cancel_delayed_work_sync(&hctx->run_work); -- } -- - blk_exit_queue(q); - - blk_queue_free_zone_bitmaps(q); -@@ -964,15 +954,17 @@ void blk_unregister_queue(struct gendisk *disk) - */ - if (queue_is_mq(q)) - blk_mq_unregister_dev(disk_to_dev(disk), q); -- -- kobject_uevent(&q->kobj, KOBJ_REMOVE); -- kobject_del(&q->kobj); - blk_trace_remove_sysfs(disk_to_dev(disk)); - - mutex_lock(&q->sysfs_lock); - if (q->elevator) - elv_unregister_queue(q); - mutex_unlock(&q->sysfs_lock); -+ -+ /* Now that we've deleted all child objects, we can delete the queue. */ -+ kobject_uevent(&q->kobj, KOBJ_REMOVE); -+ kobject_del(&q->kobj); -+ - mutex_unlock(&q->sysfs_dir_lock); - - kobject_put(&disk_to_dev(disk)->kobj); -diff --git a/block/blk-throttle.c b/block/blk-throttle.c -index 7c4e7993ba970..68cf8dbb4c67a 100644 ---- a/block/blk-throttle.c -+++ b/block/blk-throttle.c -@@ -950,7 +950,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, - u64 bps_limit, unsigned long *wait) - { - bool rw = bio_data_dir(bio); -- u64 bytes_allowed, extra_bytes, tmp; -+ u64 bytes_allowed, extra_bytes; - unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; - unsigned int bio_size = throtl_bio_data_size(bio); - -@@ -967,10 +967,8 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, - jiffy_elapsed_rnd = tg->td->throtl_slice; - - jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); -- -- tmp = bps_limit * jiffy_elapsed_rnd; -- do_div(tmp, HZ); -- bytes_allowed = tmp; -+ bytes_allowed = mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed_rnd, -+ (u64)HZ); - - if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { - if (wait) -diff --git a/block/blk-wbt.c b/block/blk-wbt.c -index 874c1c37bf0c6..e91d334b2788c 100644 ---- a/block/blk-wbt.c -+++ b/block/blk-wbt.c -@@ -357,6 +357,9 @@ static void wb_timer_fn(struct blk_stat_callback *cb) - unsigned int inflight = wbt_inflight(rwb); - int status; - -+ if (!rwb->rqos.q->disk) -+ return; -+ - status = latency_exceeded(rwb, cb->stat); - - trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step, -@@ -817,6 +820,7 @@ int wbt_init(struct request_queue *q) - { - struct rq_wb *rwb; - int i; -+ int ret; - - rwb = kzalloc(sizeof(*rwb), GFP_KERNEL); - if (!rwb) -@@ -837,19 +841,26 @@ int wbt_init(struct request_queue *q) - rwb->last_comp = rwb->last_issue = jiffies; - rwb->win_nsec = RWB_WINDOW_NSEC; - rwb->enable_state = WBT_STATE_ON_DEFAULT; -- rwb->wc = 1; -+ rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags); - rwb->rq_depth.default_depth = RWB_DEF_DEPTH; -+ rwb->min_lat_nsec = wbt_default_latency_nsec(q); -+ -+ wbt_queue_depth_changed(&rwb->rqos); - - /* - * Assign rwb and add the stats callback. - */ -- rq_qos_add(q, &rwb->rqos); -+ ret = rq_qos_add(q, &rwb->rqos); -+ if (ret) -+ goto err_free; -+ - blk_stat_add_callback(q, rwb->cb); - -- rwb->min_lat_nsec = wbt_default_latency_nsec(q); -+ return 0; - -- wbt_queue_depth_changed(&rwb->rqos); -- wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); -+err_free: -+ blk_stat_free_callback(rwb->cb); -+ kfree(rwb); -+ return ret; - -- return 0; - } -diff --git a/block/blk-zoned.c b/block/blk-zoned.c -index 1d0c76c18fc52..774ecc598bee2 100644 ---- a/block/blk-zoned.c -+++ b/block/blk-zoned.c -@@ -429,9 +429,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, - op = REQ_OP_ZONE_RESET; - - /* Invalidate the page cache, including dirty pages. */ -+ filemap_invalidate_lock(bdev->bd_inode->i_mapping); - ret = blkdev_truncate_zone_range(bdev, mode, &zrange); - if (ret) -- return ret; -+ goto fail; - break; - case BLKOPENZONE: - op = REQ_OP_ZONE_OPEN; -@@ -449,15 +450,9 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, - ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, - GFP_KERNEL); - -- /* -- * Invalidate the page cache again for zone reset: writes can only be -- * direct for zoned devices so concurrent writes would not add any page -- * to the page cache after/during reset. The page cache may be filled -- * again due to concurrent reads though and dropping the pages for -- * these is fine. -- */ -- if (!ret && cmd == BLKRESETZONE) -- ret = blkdev_truncate_zone_range(bdev, mode, &zrange); -+fail: -+ if (cmd == BLKRESETZONE) -+ filemap_invalidate_unlock(bdev->bd_inode->i_mapping); - - return ret; - } -diff --git a/block/blk.h b/block/blk.h -index 6c3c00a8fe19d..aab72194d2266 100644 ---- a/block/blk.h -+++ b/block/blk.h -@@ -184,6 +184,12 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, - void blk_account_io_start(struct request *req); - void blk_account_io_done(struct request *req, u64 now); - -+/* -+ * Plug flush limits -+ */ -+#define BLK_MAX_REQUEST_COUNT 32 -+#define BLK_PLUG_FLUSH_SIZE (128 * 1024) -+ - /* - * Internal elevator interface - */ -diff --git a/block/disk-events.c b/block/disk-events.c -index 8d5496e7592a5..c3488409dd32f 100644 ---- a/block/disk-events.c -+++ b/block/disk-events.c -@@ -307,6 +307,7 @@ bool disk_force_media_change(struct gendisk *disk, unsigned int events) - if (!(events & DISK_EVENT_MEDIA_CHANGE)) - return false; - -+ inc_diskseq(disk); - if (__invalidate_device(disk->part0, true)) - pr_warn("VFS: busy inodes on changed media %s\n", - disk->disk_name); -diff --git a/block/elevator.c b/block/elevator.c -index ff45d8388f487..1b5e57f6115f3 100644 ---- a/block/elevator.c -+++ b/block/elevator.c -@@ -523,8 +523,6 @@ void elv_unregister_queue(struct request_queue *q) - kobject_del(&e->kobj); - - e->registered = 0; -- /* Re-enable throttling in case elevator disabled it */ -- wbt_enable_default(q); - } - } - -@@ -694,12 +692,18 @@ void elevator_init_mq(struct request_queue *q) - if (!e) - return; - -+ /* -+ * We are called before adding disk, when there isn't any FS I/O, -+ * so freezing queue plus canceling dispatch work is enough to -+ * drain any dispatch activities originated from passthrough -+ * requests, then no need to quiesce queue which may add long boot -+ * latency, especially when lots of disks are involved. -+ */ - blk_mq_freeze_queue(q); -- blk_mq_quiesce_queue(q); -+ blk_mq_cancel_work_sync(q); - - err = blk_mq_init_sched(q, e); - -- blk_mq_unquiesce_queue(q); - blk_mq_unfreeze_queue(q); - - if (err) { -diff --git a/block/fops.c b/block/fops.c -index 1e970c247e0eb..6c265a1bcf1b1 100644 ---- a/block/fops.c -+++ b/block/fops.c -@@ -243,6 +243,24 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - bio_endio(bio); - break; - } -+ if (iocb->ki_flags & IOCB_NOWAIT) { -+ /* -+ * This is nonblocking IO, and we need to allocate -+ * another bio if we have data left to map. As we -+ * cannot guarantee that one of the sub bios will not -+ * fail getting issued FOR NOWAIT and as error results -+ * are coalesced across all of them, be safe and ask for -+ * a retry of this from blocking context. -+ */ -+ if (unlikely(iov_iter_count(iter))) { -+ bio_release_pages(bio, false); -+ bio_clear_flag(bio, BIO_REFFED); -+ bio_put(bio); -+ blk_finish_plug(&plug); -+ return -EAGAIN; -+ } -+ bio->bi_opf |= REQ_NOWAIT; -+ } - - if (is_read) { - bio->bi_opf = REQ_OP_READ; -@@ -252,9 +270,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - bio->bi_opf = dio_bio_write_op(iocb); - task_io_account_write(bio->bi_iter.bi_size); - } -- if (iocb->ki_flags & IOCB_NOWAIT) -- bio->bi_opf |= REQ_NOWAIT; -- - dio->size += bio->bi_iter.bi_size; - pos += bio->bi_iter.bi_size; - -diff --git a/block/genhd.c b/block/genhd.c -index ab12ae6e636e8..6123f13e148e0 100644 ---- a/block/genhd.c -+++ b/block/genhd.c -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -323,7 +324,7 @@ int blk_alloc_ext_minor(void) - { - int idx; - -- idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL); -+ idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL); - if (idx == -ENOSPC) - return -EBUSY; - return idx; -@@ -420,6 +421,8 @@ int device_add_disk(struct device *parent, struct gendisk *disk, - DISK_MAX_PARTS); - disk->minors = DISK_MAX_PARTS; - } -+ if (disk->first_minor + disk->minors > MINORMASK + 1) -+ return -EINVAL; - } else { - if (WARN_ON(disk->minors)) - return -EINVAL; -@@ -432,10 +435,6 @@ int device_add_disk(struct device *parent, struct gendisk *disk, - disk->flags |= GENHD_FL_EXT_DEVT; - } - -- ret = disk_alloc_events(disk); -- if (ret) -- goto out_free_ext_minor; -- - /* delay uevents, until we scanned partition table */ - dev_set_uevent_suppress(ddev, 1); - -@@ -446,7 +445,12 @@ int device_add_disk(struct device *parent, struct gendisk *disk, - ddev->devt = MKDEV(disk->major, disk->first_minor); - ret = device_add(ddev); - if (ret) -- goto out_disk_release_events; -+ goto out_free_ext_minor; -+ -+ ret = disk_alloc_events(disk); -+ if (ret) -+ goto out_device_del; -+ - if (!sysfs_deprecated) { - ret = sysfs_create_link(block_depr, &ddev->kobj, - kobject_name(&ddev->kobj)); -@@ -467,11 +471,15 @@ int device_add_disk(struct device *parent, struct gendisk *disk, - - disk->part0->bd_holder_dir = - kobject_create_and_add("holders", &ddev->kobj); -- if (!disk->part0->bd_holder_dir) -+ if (!disk->part0->bd_holder_dir) { -+ ret = -ENOMEM; - goto out_del_integrity; -+ } - disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); -- if (!disk->slave_dir) -+ if (!disk->slave_dir) { -+ ret = -ENOMEM; - goto out_put_holder_dir; -+ } - - ret = bd_register_pending_holders(disk); - if (ret < 0) -@@ -487,7 +495,7 @@ int device_add_disk(struct device *parent, struct gendisk *disk, - * and don't bother scanning for partitions either. - */ - disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; -- disk->flags |= GENHD_FL_NO_PART_SCAN; -+ disk->flags |= GENHD_FL_NO_PART; - } else { - ret = bdi_register(disk->bdi, "%u:%u", - disk->major, disk->first_minor); -@@ -519,8 +527,10 @@ out_unregister_bdi: - bdi_unregister(disk->bdi); - out_unregister_queue: - blk_unregister_queue(disk); -+ rq_qos_exit(disk->queue); - out_put_slave_dir: - kobject_put(disk->slave_dir); -+ disk->slave_dir = NULL; - out_put_holder_dir: - kobject_put(disk->part0->bd_holder_dir); - out_del_integrity: -@@ -530,8 +540,6 @@ out_del_block_link: - sysfs_remove_link(block_depr, dev_name(ddev)); - out_device_del: - device_del(ddev); --out_disk_release_events: -- disk_release_events(disk); - out_free_ext_minor: - if (disk->major == BLOCK_EXT_MAJOR) - blk_free_ext_minor(disk->first_minor); -@@ -539,6 +547,20 @@ out_free_ext_minor: - } - EXPORT_SYMBOL(device_add_disk); - -+/** -+ * blk_mark_disk_dead - mark a disk as dead -+ * @disk: disk to mark as dead -+ * -+ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O -+ * to this disk. -+ */ -+void blk_mark_disk_dead(struct gendisk *disk) -+{ -+ set_bit(GD_DEAD, &disk->state); -+ blk_queue_start_drain(disk->queue); -+} -+EXPORT_SYMBOL_GPL(blk_mark_disk_dead); -+ - /** - * del_gendisk - remove the gendisk - * @disk: the struct gendisk to remove -@@ -603,6 +625,7 @@ void del_gendisk(struct gendisk *disk) - - kobject_put(disk->part0->bd_holder_dir); - kobject_put(disk->slave_dir); -+ disk->slave_dir = NULL; - - part_stat_set_all(disk->part0, 0); - disk->part0->bd_stamp = 0; -@@ -1082,6 +1105,8 @@ static void disk_release(struct device *dev) - might_sleep(); - WARN_ON_ONCE(disk_live(disk)); - -+ blk_mq_cancel_work_sync(disk->queue); -+ - disk_release_events(disk); - kfree(disk->random); - xa_destroy(&disk->part_tbl); -diff --git a/block/holder.c b/block/holder.c -index 9dc084182337f..27cddce1b4461 100644 ---- a/block/holder.c -+++ b/block/holder.c -@@ -1,5 +1,6 @@ - // SPDX-License-Identifier: GPL-2.0-only - #include -+#include - - struct bd_holder_disk { - struct list_head list; -diff --git a/block/ioctl.c b/block/ioctl.c -index eb0491e90b9a0..8f39e413f12a3 100644 ---- a/block/ioctl.c -+++ b/block/ioctl.c -@@ -20,6 +20,8 @@ static int blkpg_do_ioctl(struct block_device *bdev, - struct blkpg_partition p; - long long start, length; - -+ if (disk->flags & GENHD_FL_NO_PART) -+ return -EINVAL; - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - if (copy_from_user(&p, upart, sizeof(struct blkpg_partition))) -@@ -113,6 +115,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, - uint64_t range[2]; - uint64_t start, len; - struct request_queue *q = bdev_get_queue(bdev); -+ struct inode *inode = bdev->bd_inode; - int err; - - if (!(mode & FMODE_WRITE)) -@@ -135,12 +138,17 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, - if (start + len > i_size_read(bdev->bd_inode)) - return -EINVAL; - -+ filemap_invalidate_lock(inode->i_mapping); - err = truncate_bdev_range(bdev, mode, start, start + len - 1); - if (err) -- return err; -+ goto fail; -+ -+ err = blkdev_issue_discard(bdev, start >> 9, len >> 9, -+ GFP_KERNEL, flags); - -- return blkdev_issue_discard(bdev, start >> 9, len >> 9, -- GFP_KERNEL, flags); -+fail: -+ filemap_invalidate_unlock(inode->i_mapping); -+ return err; - } - - static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, -@@ -148,6 +156,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, - { - uint64_t range[2]; - uint64_t start, end, len; -+ struct inode *inode = bdev->bd_inode; - int err; - - if (!(mode & FMODE_WRITE)) -@@ -170,12 +179,17 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, - return -EINVAL; - - /* Invalidate the page cache, including dirty pages */ -+ filemap_invalidate_lock(inode->i_mapping); - err = truncate_bdev_range(bdev, mode, start, end); - if (err) -- return err; -+ goto fail; - -- return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, -- BLKDEV_ZERO_NOUNMAP); -+ err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, -+ BLKDEV_ZERO_NOUNMAP); -+ -+fail: -+ filemap_invalidate_unlock(inode->i_mapping); -+ return err; - } - - static int put_ushort(unsigned short __user *argp, unsigned short val) -@@ -633,7 +647,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) - (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512); - case BLKGETSIZE: - size = i_size_read(bdev->bd_inode); -- if ((size >> 9) > ~0UL) -+ if ((size >> 9) > ~(compat_ulong_t)0) - return -EFBIG; - return compat_put_ulong(argp, size >> 9); - -diff --git a/block/ioprio.c b/block/ioprio.c -index 0e4ff245f2bf2..6c830154856fc 100644 ---- a/block/ioprio.c -+++ b/block/ioprio.c -@@ -69,7 +69,14 @@ int ioprio_check_cap(int ioprio) - - switch (class) { - case IOPRIO_CLASS_RT: -- if (!capable(CAP_SYS_NICE) && !capable(CAP_SYS_ADMIN)) -+ /* -+ * Originally this only checked for CAP_SYS_ADMIN, -+ * which was implicitly allowed for pid 0 by security -+ * modules such as SELinux. Make sure we check -+ * CAP_SYS_ADMIN first to avoid a denial/avc for -+ * possibly missing CAP_SYS_NICE permission. -+ */ -+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE)) - return -EPERM; - fallthrough; - /* rt has prio field too */ -@@ -182,9 +189,9 @@ out: - int ioprio_best(unsigned short aprio, unsigned short bprio) - { - if (!ioprio_valid(aprio)) -- aprio = IOPRIO_DEFAULT; -+ aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM); - if (!ioprio_valid(bprio)) -- bprio = IOPRIO_DEFAULT; -+ bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM); - - return min(aprio, bprio); - } -@@ -213,6 +220,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) - pgrp = task_pgrp(current); - else - pgrp = find_vpid(who); -+ read_lock(&tasklist_lock); - do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { - tmpio = get_task_ioprio(p); - if (tmpio < 0) -@@ -222,6 +230,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) - else - ret = ioprio_best(ret, tmpio); - } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); -+ read_unlock(&tasklist_lock); -+ - break; - case IOPRIO_WHO_USER: - uid = make_kuid(current_user_ns(), who); -diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c -index 2c4a55bea6ca1..2a7a36551cfae 100644 ---- a/block/keyslot-manager.c -+++ b/block/keyslot-manager.c -@@ -343,25 +343,16 @@ bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm, - return true; - } - --/** -- * blk_ksm_evict_key() - Evict a key from the lower layer device. -- * @ksm: The keyslot manager to evict from -- * @key: The key to evict -- * -- * Find the keyslot that the specified key was programmed into, and evict that -- * slot from the lower layer device. The slot must not be in use by any -- * in-flight IO when this function is called. -- * -- * Context: Process context. Takes and releases ksm->lock. -- * Return: 0 on success or if there's no keyslot with the specified key, -EBUSY -- * if the keyslot is still in use, or another -errno value on other -- * error. -+/* -+ * This is an internal function that evicts a key from an inline encryption -+ * device that can be either a real device or the blk-crypto-fallback "device". -+ * It is used only by blk_crypto_evict_key(); see that function for details. - */ - int blk_ksm_evict_key(struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key) - { - struct blk_ksm_keyslot *slot; -- int err = 0; -+ int err; - - if (blk_ksm_is_passthrough(ksm)) { - if (ksm->ksm_ll_ops.keyslot_evict) { -@@ -375,22 +366,30 @@ int blk_ksm_evict_key(struct blk_keyslot_manager *ksm, - - blk_ksm_hw_enter(ksm); - slot = blk_ksm_find_keyslot(ksm, key); -- if (!slot) -- goto out_unlock; -+ if (!slot) { -+ /* -+ * Not an error, since a key not in use by I/O is not guaranteed -+ * to be in a keyslot. There can be more keys than keyslots. -+ */ -+ err = 0; -+ goto out; -+ } - - if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) { -+ /* BUG: key is still in use by I/O */ - err = -EBUSY; -- goto out_unlock; -+ goto out_remove; - } - err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, - blk_ksm_get_slot_idx(slot)); -- if (err) -- goto out_unlock; -- -+out_remove: -+ /* -+ * Callers free the key even on error, so unlink the key from the hash -+ * table and clear slot->key even on error. -+ */ - hlist_del(&slot->hash_node); - slot->key = NULL; -- err = 0; --out_unlock: -+out: - blk_ksm_hw_exit(ksm); - return err; - } -diff --git a/block/mq-deadline.c b/block/mq-deadline.c -index 7f3c3932b723e..aaef5088a3baf 100644 ---- a/block/mq-deadline.c -+++ b/block/mq-deadline.c -@@ -153,6 +153,20 @@ static u8 dd_rq_ioclass(struct request *rq) - return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); - } - -+/* -+ * get the request before `rq' in sector-sorted order -+ */ -+static inline struct request * -+deadline_earlier_request(struct request *rq) -+{ -+ struct rb_node *node = rb_prev(&rq->rb_node); -+ -+ if (node) -+ return rb_entry_rq(node); -+ -+ return NULL; -+} -+ - /* - * get the request after `rq' in sector-sorted order - */ -@@ -288,6 +302,39 @@ static inline int deadline_check_fifo(struct dd_per_prio *per_prio, - return 0; - } - -+/* -+ * Check if rq has a sequential request preceding it. -+ */ -+static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq) -+{ -+ struct request *prev = deadline_earlier_request(rq); -+ -+ if (!prev) -+ return false; -+ -+ return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq); -+} -+ -+/* -+ * Skip all write requests that are sequential from @rq, even if we cross -+ * a zone boundary. -+ */ -+static struct request *deadline_skip_seq_writes(struct deadline_data *dd, -+ struct request *rq) -+{ -+ sector_t pos = blk_rq_pos(rq); -+ sector_t skipped_sectors = 0; -+ -+ while (rq) { -+ if (blk_rq_pos(rq) != pos + skipped_sectors) -+ break; -+ skipped_sectors += blk_rq_sectors(rq); -+ rq = deadline_latter_request(rq); -+ } -+ -+ return rq; -+} -+ - /* - * For the specified data direction, return the next request to - * dispatch using arrival ordered lists. -@@ -308,11 +355,16 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, - - /* - * Look for a write request that can be dispatched, that is one with -- * an unlocked target zone. -+ * an unlocked target zone. For some HDDs, breaking a sequential -+ * write stream can lead to lower throughput, so make sure to preserve -+ * sequential write streams, even if that stream crosses into the next -+ * zones and these zones are unlocked. - */ - spin_lock_irqsave(&dd->zone_lock, flags); - list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { -- if (blk_req_can_dispatch_to_zone(rq)) -+ if (blk_req_can_dispatch_to_zone(rq) && -+ (blk_queue_nonrot(rq->q) || -+ !deadline_is_seq_write(dd, rq))) - goto out; - } - rq = NULL; -@@ -342,13 +394,19 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, - - /* - * Look for a write request that can be dispatched, that is one with -- * an unlocked target zone. -+ * an unlocked target zone. For some HDDs, breaking a sequential -+ * write stream can lead to lower throughput, so make sure to preserve -+ * sequential write streams, even if that stream crosses into the next -+ * zones and these zones are unlocked. - */ - spin_lock_irqsave(&dd->zone_lock, flags); - while (rq) { - if (blk_req_can_dispatch_to_zone(rq)) - break; -- rq = deadline_latter_request(rq); -+ if (blk_queue_nonrot(rq->q)) -+ rq = deadline_latter_request(rq); -+ else -+ rq = deadline_skip_seq_writes(dd, rq); - } - spin_unlock_irqrestore(&dd->zone_lock, flags); - -@@ -733,6 +791,18 @@ static void dd_prepare_request(struct request *rq) - rq->elv.priv[0] = NULL; - } - -+static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx) -+{ -+ struct deadline_data *dd = hctx->queue->elevator->elevator_data; -+ enum dd_prio p; -+ -+ for (p = 0; p <= DD_PRIO_MAX; p++) -+ if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE])) -+ return true; -+ -+ return false; -+} -+ - /* - * Callback from inside blk_mq_free_request(). - * -@@ -755,7 +825,6 @@ static void dd_finish_request(struct request *rq) - struct deadline_data *dd = q->elevator->elevator_data; - const u8 ioprio_class = dd_rq_ioclass(rq); - const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; -- struct dd_per_prio *per_prio = &dd->per_prio[prio]; - - /* - * The block layer core may call dd_finish_request() without having -@@ -771,9 +840,10 @@ static void dd_finish_request(struct request *rq) - - spin_lock_irqsave(&dd->zone_lock, flags); - blk_req_zone_write_unlock(rq); -- if (!list_empty(&per_prio->fifo_list[DD_WRITE])) -- blk_mq_sched_mark_restart_hctx(rq->mq_hctx); - spin_unlock_irqrestore(&dd->zone_lock, flags); -+ -+ if (dd_has_write_work(rq->mq_hctx)) -+ blk_mq_sched_mark_restart_hctx(rq->mq_hctx); - } - } - -@@ -811,7 +881,7 @@ SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); - SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); - SHOW_INT(deadline_writes_starved_show, dd->writes_starved); - SHOW_INT(deadline_front_merges_show, dd->front_merges); --SHOW_INT(deadline_async_depth_show, dd->front_merges); -+SHOW_INT(deadline_async_depth_show, dd->async_depth); - SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); - #undef SHOW_INT - #undef SHOW_JIFFIES -@@ -840,7 +910,7 @@ STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX) - STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); - STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); - STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); --STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX); -+STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX); - STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); - #undef STORE_FUNCTION - #undef STORE_INT -diff --git a/block/partitions/amiga.c b/block/partitions/amiga.c -index 5c8624e26a54c..5069210954129 100644 ---- a/block/partitions/amiga.c -+++ b/block/partitions/amiga.c -@@ -11,10 +11,18 @@ - #define pr_fmt(fmt) fmt - - #include -+#include -+#include - #include - - #include "check.h" - -+/* magic offsets in partition DosEnvVec */ -+#define NR_HD 3 -+#define NR_SECT 5 -+#define LO_CYL 9 -+#define HI_CYL 10 -+ - static __inline__ u32 - checksum_block(__be32 *m, int size) - { -@@ -31,8 +39,12 @@ int amiga_partition(struct parsed_partitions *state) - unsigned char *data; - struct RigidDiskBlock *rdb; - struct PartitionBlock *pb; -- int start_sect, nr_sects, blk, part, res = 0; -- int blksize = 1; /* Multiplier for disk block size */ -+ u64 start_sect, nr_sects; -+ sector_t blk, end_sect; -+ u32 cylblk; /* rdb_CylBlocks = nr_heads*sect_per_track */ -+ u32 nr_hd, nr_sect, lo_cyl, hi_cyl; -+ int part, res = 0; -+ unsigned int blksize = 1; /* Multiplier for disk block size */ - int slot = 1; - - for (blk = 0; ; blk++, put_dev_sector(sect)) { -@@ -40,7 +52,7 @@ int amiga_partition(struct parsed_partitions *state) - goto rdb_done; - data = read_part_sector(state, blk, §); - if (!data) { -- pr_err("Dev %s: unable to read RDB block %d\n", -+ pr_err("Dev %s: unable to read RDB block %llu\n", - state->disk->disk_name, blk); - res = -1; - goto rdb_done; -@@ -57,12 +69,12 @@ int amiga_partition(struct parsed_partitions *state) - *(__be32 *)(data+0xdc) = 0; - if (checksum_block((__be32 *)data, - be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F)==0) { -- pr_err("Trashed word at 0xd0 in block %d ignored in checksum calculation\n", -+ pr_err("Trashed word at 0xd0 in block %llu ignored in checksum calculation\n", - blk); - break; - } - -- pr_err("Dev %s: RDB in block %d has bad checksum\n", -+ pr_err("Dev %s: RDB in block %llu has bad checksum\n", - state->disk->disk_name, blk); - } - -@@ -78,11 +90,16 @@ int amiga_partition(struct parsed_partitions *state) - } - blk = be32_to_cpu(rdb->rdb_PartitionList); - put_dev_sector(sect); -- for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) { -- blk *= blksize; /* Read in terms partition table understands */ -+ for (part = 1; (s32) blk>0 && part<=16; part++, put_dev_sector(sect)) { -+ /* Read in terms partition table understands */ -+ if (check_mul_overflow(blk, (sector_t) blksize, &blk)) { -+ pr_err("Dev %s: overflow calculating partition block %llu! Skipping partitions %u and beyond\n", -+ state->disk->disk_name, blk, part); -+ break; -+ } - data = read_part_sector(state, blk, §); - if (!data) { -- pr_err("Dev %s: unable to read partition block %d\n", -+ pr_err("Dev %s: unable to read partition block %llu\n", - state->disk->disk_name, blk); - res = -1; - goto rdb_done; -@@ -94,19 +111,70 @@ int amiga_partition(struct parsed_partitions *state) - if (checksum_block((__be32 *)pb, be32_to_cpu(pb->pb_SummedLongs) & 0x7F) != 0 ) - continue; - -- /* Tell Kernel about it */ -+ /* RDB gives us more than enough rope to hang ourselves with, -+ * many times over (2^128 bytes if all fields max out). -+ * Some careful checks are in order, so check for potential -+ * overflows. -+ * We are multiplying four 32 bit numbers to one sector_t! -+ */ -+ -+ nr_hd = be32_to_cpu(pb->pb_Environment[NR_HD]); -+ nr_sect = be32_to_cpu(pb->pb_Environment[NR_SECT]); -+ -+ /* CylBlocks is total number of blocks per cylinder */ -+ if (check_mul_overflow(nr_hd, nr_sect, &cylblk)) { -+ pr_err("Dev %s: heads*sects %u overflows u32, skipping partition!\n", -+ state->disk->disk_name, cylblk); -+ continue; -+ } -+ -+ /* check for consistency with RDB defined CylBlocks */ -+ if (cylblk > be32_to_cpu(rdb->rdb_CylBlocks)) { -+ pr_warn("Dev %s: cylblk %u > rdb_CylBlocks %u!\n", -+ state->disk->disk_name, cylblk, -+ be32_to_cpu(rdb->rdb_CylBlocks)); -+ } -+ -+ /* RDB allows for variable logical block size - -+ * normalize to 512 byte blocks and check result. -+ */ -+ -+ if (check_mul_overflow(cylblk, blksize, &cylblk)) { -+ pr_err("Dev %s: partition %u bytes per cyl. overflows u32, skipping partition!\n", -+ state->disk->disk_name, part); -+ continue; -+ } -+ -+ /* Calculate partition start and end. Limit of 32 bit on cylblk -+ * guarantees no overflow occurs if LBD support is enabled. -+ */ -+ -+ lo_cyl = be32_to_cpu(pb->pb_Environment[LO_CYL]); -+ start_sect = ((u64) lo_cyl * cylblk); -+ -+ hi_cyl = be32_to_cpu(pb->pb_Environment[HI_CYL]); -+ nr_sects = (((u64) hi_cyl - lo_cyl + 1) * cylblk); - -- nr_sects = (be32_to_cpu(pb->pb_Environment[10]) + 1 - -- be32_to_cpu(pb->pb_Environment[9])) * -- be32_to_cpu(pb->pb_Environment[3]) * -- be32_to_cpu(pb->pb_Environment[5]) * -- blksize; - if (!nr_sects) - continue; -- start_sect = be32_to_cpu(pb->pb_Environment[9]) * -- be32_to_cpu(pb->pb_Environment[3]) * -- be32_to_cpu(pb->pb_Environment[5]) * -- blksize; -+ -+ /* Warn user if partition end overflows u32 (AmigaDOS limit) */ -+ -+ if ((start_sect + nr_sects) > UINT_MAX) { -+ pr_warn("Dev %s: partition %u (%llu-%llu) needs 64 bit device support!\n", -+ state->disk->disk_name, part, -+ start_sect, start_sect + nr_sects); -+ } -+ -+ if (check_add_overflow(start_sect, nr_sects, &end_sect)) { -+ pr_err("Dev %s: partition %u (%llu-%llu) needs LBD device support, skipping partition!\n", -+ state->disk->disk_name, part, -+ start_sect, end_sect); -+ continue; -+ } -+ -+ /* Tell Kernel about it */ -+ - put_partition(state,slot++,start_sect,nr_sects); - { - /* Be even more informative to aid mounting */ -diff --git a/block/partitions/core.c b/block/partitions/core.c -index 7bea19dd9458f..1ead8c0015616 100644 ---- a/block/partitions/core.c -+++ b/block/partitions/core.c -@@ -5,6 +5,7 @@ - * Copyright (C) 2020 Christoph Hellwig - */ - #include -+#include - #include - #include - #include -@@ -525,18 +526,15 @@ out_unlock: - - static bool disk_unlock_native_capacity(struct gendisk *disk) - { -- const struct block_device_operations *bdops = disk->fops; -- -- if (bdops->unlock_native_capacity && -- !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) { -- printk(KERN_CONT "enabling native capacity\n"); -- bdops->unlock_native_capacity(disk); -- disk->flags |= GENHD_FL_NATIVE_CAPACITY; -- return true; -- } else { -+ if (!disk->fops->unlock_native_capacity || -+ test_and_set_bit(GD_NATIVE_CAPACITY, &disk->state)) { - printk(KERN_CONT "truncated\n"); - return false; - } -+ -+ printk(KERN_CONT "enabling native capacity\n"); -+ disk->fops->unlock_native_capacity(disk); -+ return true; - } - - void blk_drop_partitions(struct gendisk *disk) -diff --git a/block/sed-opal.c b/block/sed-opal.c -index daafadbb88cae..0ac5a4f3f2261 100644 ---- a/block/sed-opal.c -+++ b/block/sed-opal.c -@@ -88,8 +88,8 @@ struct opal_dev { - u64 lowest_lba; - - size_t pos; -- u8 cmd[IO_BUFFER_LENGTH]; -- u8 resp[IO_BUFFER_LENGTH]; -+ u8 *cmd; -+ u8 *resp; - - struct parsed_resp parsed; - size_t prev_d_len; -@@ -2134,6 +2134,8 @@ void free_opal_dev(struct opal_dev *dev) - return; - - clean_opal_dev(dev); -+ kfree(dev->resp); -+ kfree(dev->cmd); - kfree(dev); - } - EXPORT_SYMBOL(free_opal_dev); -@@ -2146,17 +2148,39 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv) - if (!dev) - return NULL; - -+ /* -+ * Presumably DMA-able buffers must be cache-aligned. Kmalloc makes -+ * sure the allocated buffer is DMA-safe in that regard. -+ */ -+ dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL); -+ if (!dev->cmd) -+ goto err_free_dev; -+ -+ dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL); -+ if (!dev->resp) -+ goto err_free_cmd; -+ - INIT_LIST_HEAD(&dev->unlk_lst); - mutex_init(&dev->dev_lock); - dev->data = data; - dev->send_recv = send_recv; - if (check_opal_support(dev) != 0) { - pr_debug("Opal is not supported on this device\n"); -- kfree(dev); -- return NULL; -+ goto err_free_resp; - } - - return dev; -+ -+err_free_resp: -+ kfree(dev->resp); -+ -+err_free_cmd: -+ kfree(dev->cmd); -+ -+err_free_dev: -+ kfree(dev); -+ -+ return NULL; - } - EXPORT_SYMBOL(init_opal_dev); - -diff --git a/certs/blacklist_hashes.c b/certs/blacklist_hashes.c -index 344892337be07..d5961aa3d3380 100644 ---- a/certs/blacklist_hashes.c -+++ b/certs/blacklist_hashes.c -@@ -1,7 +1,7 @@ - // SPDX-License-Identifier: GPL-2.0 - #include "blacklist.h" - --const char __initdata *const blacklist_hashes[] = { -+const char __initconst *const blacklist_hashes[] = { - #include CONFIG_SYSTEM_BLACKLIST_HASH_LIST - , NULL - }; -diff --git a/crypto/Kconfig b/crypto/Kconfig -index 536df4b6b825c..db260ccfba51b 100644 ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -15,6 +15,7 @@ source "crypto/async_tx/Kconfig" - # - menuconfig CRYPTO - tristate "Cryptographic API" -+ select LIB_MEMNEQ - help - This option provides the core Cryptographic API. - -@@ -233,12 +234,12 @@ config CRYPTO_DH - - config CRYPTO_ECC - tristate -+ select CRYPTO_RNG_DEFAULT - - config CRYPTO_ECDH - tristate "ECDH algorithm" - select CRYPTO_ECC - select CRYPTO_KPP -- select CRYPTO_RNG_DEFAULT - help - Generic implementation of the ECDH algorithm - -@@ -683,26 +684,8 @@ config CRYPTO_BLAKE2B - - See https://blake2.net for further information. - --config CRYPTO_BLAKE2S -- tristate "BLAKE2s digest algorithm" -- select CRYPTO_LIB_BLAKE2S_GENERIC -- select CRYPTO_HASH -- help -- Implementation of cryptographic hash function BLAKE2s -- optimized for 8-32bit platforms and can produce digests of any size -- between 1 to 32. The keyed hash is also implemented. -- -- This module provides the following algorithms: -- -- - blake2s-128 -- - blake2s-160 -- - blake2s-224 -- - blake2s-256 -- -- See https://blake2.net for further information. -- - config CRYPTO_BLAKE2S_X86 -- tristate "BLAKE2s digest algorithm (x86 accelerated version)" -+ bool "BLAKE2s digest algorithm (x86 accelerated version)" - depends on X86 && 64BIT - select CRYPTO_LIB_BLAKE2S_GENERIC - select CRYPTO_ARCH_HAVE_LIB_BLAKE2S -@@ -1919,7 +1902,6 @@ config CRYPTO_STATS - config CRYPTO_HASH_INFO - bool - --source "lib/crypto/Kconfig" - source "drivers/crypto/Kconfig" - source "crypto/asymmetric_keys/Kconfig" - source "certs/Kconfig" -diff --git a/crypto/Makefile b/crypto/Makefile -index c633f15a04813..429591ffeb5da 100644 ---- a/crypto/Makefile -+++ b/crypto/Makefile -@@ -4,7 +4,7 @@ - # - - obj-$(CONFIG_CRYPTO) += crypto.o --crypto-y := api.o cipher.o compress.o memneq.o -+crypto-y := api.o cipher.o compress.o - - obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o - obj-$(CONFIG_CRYPTO_FIPS) += fips.o -@@ -83,7 +83,6 @@ obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o - obj-$(CONFIG_CRYPTO_WP512) += wp512.o - CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 - obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o --obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o - obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o - obj-$(CONFIG_CRYPTO_ECB) += ecb.o - obj-$(CONFIG_CRYPTO_CBC) += cbc.o -diff --git a/crypto/akcipher.c b/crypto/akcipher.c -index f866085c8a4a3..ab975a420e1e9 100644 ---- a/crypto/akcipher.c -+++ b/crypto/akcipher.c -@@ -120,6 +120,12 @@ static int akcipher_default_op(struct akcipher_request *req) - return -ENOSYS; - } - -+static int akcipher_default_set_key(struct crypto_akcipher *tfm, -+ const void *key, unsigned int keylen) -+{ -+ return -ENOSYS; -+} -+ - int crypto_register_akcipher(struct akcipher_alg *alg) - { - struct crypto_alg *base = &alg->base; -@@ -132,6 +138,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg) - alg->encrypt = akcipher_default_op; - if (!alg->decrypt) - alg->decrypt = akcipher_default_op; -+ if (!alg->set_priv_key) -+ alg->set_priv_key = akcipher_default_set_key; - - akcipher_prepare_alg(alg); - return crypto_register_alg(base); -diff --git a/crypto/algapi.c b/crypto/algapi.c -index 43f999dba4dc0..c390a79c5a669 100644 ---- a/crypto/algapi.c -+++ b/crypto/algapi.c -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - - #include "internal.h" - -@@ -68,15 +69,26 @@ static void crypto_free_instance(struct crypto_instance *inst) - inst->alg.cra_type->free(inst); - } - --static void crypto_destroy_instance(struct crypto_alg *alg) -+static void crypto_destroy_instance_workfn(struct work_struct *w) - { -- struct crypto_instance *inst = (void *)alg; -+ struct crypto_instance *inst = container_of(w, struct crypto_instance, -+ free_work); - struct crypto_template *tmpl = inst->tmpl; - - crypto_free_instance(inst); - crypto_tmpl_put(tmpl); - } - -+static void crypto_destroy_instance(struct crypto_alg *alg) -+{ -+ struct crypto_instance *inst = container_of(alg, -+ struct crypto_instance, -+ alg); -+ -+ INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn); -+ schedule_work(&inst->free_work); -+} -+ - /* - * This function adds a spawn to the list secondary_spawns which - * will be used at the end of crypto_remove_spawns to unregister -@@ -456,7 +468,9 @@ void crypto_unregister_alg(struct crypto_alg *alg) - if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name)) - return; - -- BUG_ON(refcount_read(&alg->cra_refcnt) != 1); -+ if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1)) -+ return; -+ - if (alg->cra_destroy) - alg->cra_destroy(alg); - -@@ -918,6 +932,9 @@ EXPORT_SYMBOL_GPL(crypto_enqueue_request); - void crypto_enqueue_request_head(struct crypto_queue *queue, - struct crypto_async_request *request) - { -+ if (unlikely(queue->qlen >= queue->max_qlen)) -+ queue->backlog = queue->backlog->prev; -+ - queue->qlen++; - list_add(&request->list, &queue->list); - } -@@ -1277,3 +1294,4 @@ module_exit(crypto_algapi_exit); - - MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("Cryptographic algorithms API"); -+MODULE_SOFTDEP("pre: cryptomgr"); -diff --git a/crypto/api.c b/crypto/api.c -index c4eda56cff891..5ffcd3ab4a753 100644 ---- a/crypto/api.c -+++ b/crypto/api.c -@@ -603,4 +603,3 @@ EXPORT_SYMBOL_GPL(crypto_req_done); - - MODULE_DESCRIPTION("Cryptographic core API"); - MODULE_LICENSE("GPL"); --MODULE_SOFTDEP("pre: cryptomgr"); -diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c -index 0b4d07aa88111..df279538cead3 100644 ---- a/crypto/asymmetric_keys/pkcs7_verify.c -+++ b/crypto/asymmetric_keys/pkcs7_verify.c -@@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7, - } - - if (sinfo->msgdigest_len != sig->digest_size) { -- pr_debug("Sig %u: Invalid digest size (%u)\n", -- sinfo->index, sinfo->msgdigest_len); -+ pr_warn("Sig %u: Invalid digest size (%u)\n", -+ sinfo->index, sinfo->msgdigest_len); - ret = -EBADMSG; - goto error; - } - - if (memcmp(sig->digest, sinfo->msgdigest, - sinfo->msgdigest_len) != 0) { -- pr_debug("Sig %u: Message digest doesn't match\n", -- sinfo->index); -+ pr_warn("Sig %u: Message digest doesn't match\n", -+ sinfo->index); - ret = -EKEYREJECTED; - goto error; - } -@@ -174,12 +174,6 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7, - pr_devel("Sig %u: Found cert serial match X.509[%u]\n", - sinfo->index, certix); - -- if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) { -- pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", -- sinfo->index); -- continue; -- } -- - sinfo->signer = x509; - return 0; - } -@@ -487,7 +481,7 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, - const void *data, size_t datalen) - { - if (pkcs7->data) { -- pr_debug("Data already supplied\n"); -+ pr_warn("Data already supplied\n"); - return -EINVAL; - } - pkcs7->data = data; -diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c -index 4fefb219bfdc8..50c933f86b218 100644 ---- a/crypto/asymmetric_keys/public_key.c -+++ b/crypto/asymmetric_keys/public_key.c -@@ -60,39 +60,83 @@ static void public_key_destroy(void *payload0, void *payload3) - } - - /* -- * Determine the crypto algorithm name. -+ * Given a public_key, and an encoding and hash_algo to be used for signing -+ * and/or verification with that key, determine the name of the corresponding -+ * akcipher algorithm. Also check that encoding and hash_algo are allowed. - */ --static --int software_key_determine_akcipher(const char *encoding, -- const char *hash_algo, -- const struct public_key *pkey, -- char alg_name[CRYPTO_MAX_ALG_NAME]) -+static int -+software_key_determine_akcipher(const struct public_key *pkey, -+ const char *encoding, const char *hash_algo, -+ char alg_name[CRYPTO_MAX_ALG_NAME]) - { - int n; - -- if (strcmp(encoding, "pkcs1") == 0) { -- /* The data wangled by the RSA algorithm is typically padded -- * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447 -- * sec 8.2]. -+ if (!encoding) -+ return -EINVAL; -+ -+ if (strcmp(pkey->pkey_algo, "rsa") == 0) { -+ /* -+ * RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2]. -+ */ -+ if (strcmp(encoding, "pkcs1") == 0) { -+ if (!hash_algo) -+ n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, -+ "pkcs1pad(%s)", -+ pkey->pkey_algo); -+ else -+ n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, -+ "pkcs1pad(%s,%s)", -+ pkey->pkey_algo, hash_algo); -+ return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; -+ } -+ if (strcmp(encoding, "raw") != 0) -+ return -EINVAL; -+ /* -+ * Raw RSA cannot differentiate between different hash -+ * algorithms. -+ */ -+ if (hash_algo) -+ return -EINVAL; -+ } else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { -+ if (strcmp(encoding, "x962") != 0) -+ return -EINVAL; -+ /* -+ * ECDSA signatures are taken over a raw hash, so they don't -+ * differentiate between different hash algorithms. That means -+ * that the verifier should hard-code a specific hash algorithm. -+ * Unfortunately, in practice ECDSA is used with multiple SHAs, -+ * so we have to allow all of them and not just one. - */ - if (!hash_algo) -- n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, -- "pkcs1pad(%s)", -- pkey->pkey_algo); -- else -- n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, -- "pkcs1pad(%s,%s)", -- pkey->pkey_algo, hash_algo); -- return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; -- } -- -- if (strcmp(encoding, "raw") == 0 || -- strcmp(encoding, "x962") == 0) { -- strcpy(alg_name, pkey->pkey_algo); -- return 0; -+ return -EINVAL; -+ if (strcmp(hash_algo, "sha1") != 0 && -+ strcmp(hash_algo, "sha224") != 0 && -+ strcmp(hash_algo, "sha256") != 0 && -+ strcmp(hash_algo, "sha384") != 0 && -+ strcmp(hash_algo, "sha512") != 0) -+ return -EINVAL; -+ } else if (strcmp(pkey->pkey_algo, "sm2") == 0) { -+ if (strcmp(encoding, "raw") != 0) -+ return -EINVAL; -+ if (!hash_algo) -+ return -EINVAL; -+ if (strcmp(hash_algo, "sm3") != 0) -+ return -EINVAL; -+ } else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) { -+ if (strcmp(encoding, "raw") != 0) -+ return -EINVAL; -+ if (!hash_algo) -+ return -EINVAL; -+ if (strcmp(hash_algo, "streebog256") != 0 && -+ strcmp(hash_algo, "streebog512") != 0) -+ return -EINVAL; -+ } else { -+ /* Unknown public key algorithm */ -+ return -ENOPKG; - } -- -- return -ENOPKG; -+ if (strscpy(alg_name, pkey->pkey_algo, CRYPTO_MAX_ALG_NAME) < 0) -+ return -EINVAL; -+ return 0; - } - - static u8 *pkey_pack_u32(u8 *dst, u32 val) -@@ -113,9 +157,8 @@ static int software_key_query(const struct kernel_pkey_params *params, - u8 *key, *ptr; - int ret, len; - -- ret = software_key_determine_akcipher(params->encoding, -- params->hash_algo, -- pkey, alg_name); -+ ret = software_key_determine_akcipher(pkey, params->encoding, -+ params->hash_algo, alg_name); - if (ret < 0) - return ret; - -@@ -143,8 +186,28 @@ static int software_key_query(const struct kernel_pkey_params *params, - - len = crypto_akcipher_maxsize(tfm); - info->key_size = len * 8; -- info->max_data_size = len; -- info->max_sig_size = len; -+ -+ if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) { -+ /* -+ * ECDSA key sizes are much smaller than RSA, and thus could -+ * operate on (hashed) inputs that are larger than key size. -+ * For example SHA384-hashed input used with secp256r1 -+ * based keys. Set max_data_size to be at least as large as -+ * the largest supported hash size (SHA512) -+ */ -+ info->max_data_size = 64; -+ -+ /* -+ * Verify takes ECDSA-Sig (described in RFC 5480) as input, -+ * which is actually 2 'key_size'-bit integers encoded in -+ * ASN.1. Account for the ASN.1 encoding overhead here. -+ */ -+ info->max_sig_size = 2 * (len + 3) + 2; -+ } else { -+ info->max_data_size = len; -+ info->max_sig_size = len; -+ } -+ - info->max_enc_size = len; - info->max_dec_size = len; - info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT | -@@ -179,9 +242,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params, - - pr_devel("==>%s()\n", __func__); - -- ret = software_key_determine_akcipher(params->encoding, -- params->hash_algo, -- pkey, alg_name); -+ ret = software_key_determine_akcipher(pkey, params->encoding, -+ params->hash_algo, alg_name); - if (ret < 0) - return ret; - -@@ -262,6 +324,10 @@ static int cert_sig_digest_update(const struct public_key_signature *sig, - - BUG_ON(!sig->data); - -+ /* SM2 signatures always use the SM3 hash algorithm */ -+ if (!sig->hash_algo || strcmp(sig->hash_algo, "sm3") != 0) -+ return -EINVAL; -+ - ret = sm2_compute_z_digest(tfm_pkey, SM2_DEFAULT_USERID, - SM2_DEFAULT_USERID_LEN, dgst); - if (ret) -@@ -314,9 +380,10 @@ int public_key_verify_signature(const struct public_key *pkey, - struct crypto_wait cwait; - struct crypto_akcipher *tfm; - struct akcipher_request *req; -- struct scatterlist src_sg[2]; -+ struct scatterlist src_sg; - char alg_name[CRYPTO_MAX_ALG_NAME]; -- char *key, *ptr; -+ char *buf, *ptr; -+ size_t buf_len; - int ret; - - pr_devel("==>%s()\n", __func__); -@@ -325,9 +392,23 @@ int public_key_verify_signature(const struct public_key *pkey, - BUG_ON(!sig); - BUG_ON(!sig->s); - -- ret = software_key_determine_akcipher(sig->encoding, -- sig->hash_algo, -- pkey, alg_name); -+ /* -+ * If the signature specifies a public key algorithm, it *must* match -+ * the key's actual public key algorithm. -+ * -+ * Small exception: ECDSA signatures don't specify the curve, but ECDSA -+ * keys do. So the strings can mismatch slightly in that case: -+ * "ecdsa-nist-*" for the key, but "ecdsa" for the signature. -+ */ -+ if (sig->pkey_algo) { -+ if (strcmp(pkey->pkey_algo, sig->pkey_algo) != 0 && -+ (strncmp(pkey->pkey_algo, "ecdsa-", 6) != 0 || -+ strcmp(sig->pkey_algo, "ecdsa") != 0)) -+ return -EKEYREJECTED; -+ } -+ -+ ret = software_key_determine_akcipher(pkey, sig->encoding, -+ sig->hash_algo, alg_name); - if (ret < 0) - return ret; - -@@ -340,35 +421,37 @@ int public_key_verify_signature(const struct public_key *pkey, - if (!req) - goto error_free_tfm; - -- key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, -- GFP_KERNEL); -- if (!key) -+ buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, -+ sig->s_size + sig->digest_size); -+ -+ buf = kmalloc(buf_len, GFP_KERNEL); -+ if (!buf) - goto error_free_req; - -- memcpy(key, pkey->key, pkey->keylen); -- ptr = key + pkey->keylen; -+ memcpy(buf, pkey->key, pkey->keylen); -+ ptr = buf + pkey->keylen; - ptr = pkey_pack_u32(ptr, pkey->algo); - ptr = pkey_pack_u32(ptr, pkey->paramlen); - memcpy(ptr, pkey->params, pkey->paramlen); - - if (pkey->key_is_private) -- ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); -+ ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen); - else -- ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); -+ ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen); - if (ret) -- goto error_free_key; -+ goto error_free_buf; - -- if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 && -- sig->data_size) { -+ if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) { - ret = cert_sig_digest_update(sig, tfm); - if (ret) -- goto error_free_key; -+ goto error_free_buf; - } - -- sg_init_table(src_sg, 2); -- sg_set_buf(&src_sg[0], sig->s, sig->s_size); -- sg_set_buf(&src_sg[1], sig->digest, sig->digest_size); -- akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size, -+ memcpy(buf, sig->s, sig->s_size); -+ memcpy(buf + sig->s_size, sig->digest, sig->digest_size); -+ -+ sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size); -+ akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size, - sig->digest_size); - crypto_init_wait(&cwait); - akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | -@@ -376,8 +459,8 @@ int public_key_verify_signature(const struct public_key *pkey, - crypto_req_done, &cwait); - ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); - --error_free_key: -- kfree(key); -+error_free_buf: -+ kfree(buf); - error_free_req: - akcipher_request_free(req); - error_free_tfm: -diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c -index 7553ab18db898..22beaf2213a22 100644 ---- a/crypto/asymmetric_keys/verify_pefile.c -+++ b/crypto/asymmetric_keys/verify_pefile.c -@@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, - break; - - default: -- pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic); -+ pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic); - return -ELIBBAD; - } - -@@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, - ctx->certs_size = ddir->certs.size; - - if (!ddir->certs.virtual_address || !ddir->certs.size) { -- pr_debug("Unsigned PE binary\n"); -+ pr_warn("Unsigned PE binary\n"); - return -ENODATA; - } - -@@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf, - unsigned len; - - if (ctx->sig_len < sizeof(wrapper)) { -- pr_debug("Signature wrapper too short\n"); -+ pr_warn("Signature wrapper too short\n"); - return -ELIBBAD; - } - -@@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf, - pr_debug("sig wrapper = { %x, %x, %x }\n", - wrapper.length, wrapper.revision, wrapper.cert_type); - -- /* Both pesign and sbsign round up the length of certificate table -- * (in optional header data directories) to 8 byte alignment. -+ /* sbsign rounds up the length of certificate table (in optional -+ * header data directories) to 8 byte alignment. However, the PE -+ * specification states that while entries are 8-byte aligned, this is -+ * not included in their length, and as a result, pesign has not -+ * rounded up since 0.110. - */ -- if (round_up(wrapper.length, 8) != ctx->sig_len) { -- pr_debug("Signature wrapper len wrong\n"); -+ if (wrapper.length > ctx->sig_len) { -+ pr_warn("Signature wrapper bigger than sig len (%x > %x)\n", -+ ctx->sig_len, wrapper.length); - return -ELIBBAD; - } - if (wrapper.revision != WIN_CERT_REVISION_2_0) { -- pr_debug("Signature is not revision 2.0\n"); -+ pr_warn("Signature is not revision 2.0\n"); - return -ENOTSUPP; - } - if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) { -- pr_debug("Signature certificate type is not PKCS\n"); -+ pr_warn("Signature certificate type is not PKCS\n"); - return -ENOTSUPP; - } - -@@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf, - ctx->sig_offset += sizeof(wrapper); - ctx->sig_len -= sizeof(wrapper); - if (ctx->sig_len < 4) { -- pr_debug("Signature data missing\n"); -+ pr_warn("Signature data missing\n"); - return -EKEYREJECTED; - } - -@@ -194,7 +198,7 @@ check_len: - return 0; - } - not_pkcs7: -- pr_debug("Signature data not PKCS#7\n"); -+ pr_warn("Signature data not PKCS#7\n"); - return -ELIBBAD; - } - -@@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, - digest_size = crypto_shash_digestsize(tfm); - - if (digest_size != ctx->digest_len) { -- pr_debug("Digest size mismatch (%zx != %x)\n", -- digest_size, ctx->digest_len); -+ pr_warn("Digest size mismatch (%zx != %x)\n", -+ digest_size, ctx->digest_len); - ret = -EBADMSG; - goto error_no_desc; - } -@@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, - * PKCS#7 certificate. - */ - if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) { -- pr_debug("Digest mismatch\n"); -+ pr_warn("Digest mismatch\n"); - ret = -EKEYREJECTED; - } else { - pr_debug("The digests match!\n"); -diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c -index 3d45161b271a4..99fe28663f948 100644 ---- a/crypto/asymmetric_keys/x509_public_key.c -+++ b/crypto/asymmetric_keys/x509_public_key.c -@@ -128,11 +128,10 @@ int x509_check_for_self_signed(struct x509_certificate *cert) - goto out; - } - -- ret = -EKEYREJECTED; -- if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0 && -- (strncmp(cert->pub->pkey_algo, "ecdsa-", 6) != 0 || -- strcmp(cert->sig->pkey_algo, "ecdsa") != 0)) -+ if (cert->unsupported_sig) { -+ ret = 0; - goto out; -+ } - - ret = public_key_verify_signature(cert->pub, cert->sig); - if (ret < 0) { -diff --git a/crypto/authenc.c b/crypto/authenc.c -index 670bf1a01d00e..17f674a7cdff5 100644 ---- a/crypto/authenc.c -+++ b/crypto/authenc.c -@@ -253,7 +253,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, - dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); - - skcipher_request_set_tfm(skreq, ctx->enc); -- skcipher_request_set_callback(skreq, aead_request_flags(req), -+ skcipher_request_set_callback(skreq, flags, - req->base.complete, req->base.data); - skcipher_request_set_crypt(skreq, src, dst, - req->cryptlen - authsize, req->iv); -diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c -deleted file mode 100644 -index 72fe480f9bd67..0000000000000 ---- a/crypto/blake2s_generic.c -+++ /dev/null -@@ -1,75 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 OR MIT --/* -- * shash interface to the generic implementation of BLAKE2s -- * -- * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -- */ -- --#include --#include -- --#include --#include --#include -- --static int crypto_blake2s_update_generic(struct shash_desc *desc, -- const u8 *in, unsigned int inlen) --{ -- return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic); --} -- --static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out) --{ -- return crypto_blake2s_final(desc, out, blake2s_compress_generic); --} -- --#define BLAKE2S_ALG(name, driver_name, digest_size) \ -- { \ -- .base.cra_name = name, \ -- .base.cra_driver_name = driver_name, \ -- .base.cra_priority = 100, \ -- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ -- .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ -- .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ -- .base.cra_module = THIS_MODULE, \ -- .digestsize = digest_size, \ -- .setkey = crypto_blake2s_setkey, \ -- .init = crypto_blake2s_init, \ -- .update = crypto_blake2s_update_generic, \ -- .final = crypto_blake2s_final_generic, \ -- .descsize = sizeof(struct blake2s_state), \ -- } -- --static struct shash_alg blake2s_algs[] = { -- BLAKE2S_ALG("blake2s-128", "blake2s-128-generic", -- BLAKE2S_128_HASH_SIZE), -- BLAKE2S_ALG("blake2s-160", "blake2s-160-generic", -- BLAKE2S_160_HASH_SIZE), -- BLAKE2S_ALG("blake2s-224", "blake2s-224-generic", -- BLAKE2S_224_HASH_SIZE), -- BLAKE2S_ALG("blake2s-256", "blake2s-256-generic", -- BLAKE2S_256_HASH_SIZE), --}; -- --static int __init blake2s_mod_init(void) --{ -- return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); --} -- --static void __exit blake2s_mod_exit(void) --{ -- crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); --} -- --subsys_initcall(blake2s_mod_init); --module_exit(blake2s_mod_exit); -- --MODULE_ALIAS_CRYPTO("blake2s-128"); --MODULE_ALIAS_CRYPTO("blake2s-128-generic"); --MODULE_ALIAS_CRYPTO("blake2s-160"); --MODULE_ALIAS_CRYPTO("blake2s-160-generic"); --MODULE_ALIAS_CRYPTO("blake2s-224"); --MODULE_ALIAS_CRYPTO("blake2s-224-generic"); --MODULE_ALIAS_CRYPTO("blake2s-256"); --MODULE_ALIAS_CRYPTO("blake2s-256-generic"); --MODULE_LICENSE("GPL v2"); -diff --git a/crypto/cryptd.c b/crypto/cryptd.c -index a1bea0f4baa88..ca3a40fc7da91 100644 ---- a/crypto/cryptd.c -+++ b/crypto/cryptd.c -@@ -39,6 +39,10 @@ struct cryptd_cpu_queue { - }; - - struct cryptd_queue { -+ /* -+ * Protected by disabling BH to allow enqueueing from softinterrupt and -+ * dequeuing from kworker (cryptd_queue_worker()). -+ */ - struct cryptd_cpu_queue __percpu *cpu_queue; - }; - -@@ -64,11 +68,12 @@ struct aead_instance_ctx { - - struct cryptd_skcipher_ctx { - refcount_t refcnt; -- struct crypto_sync_skcipher *child; -+ struct crypto_skcipher *child; - }; - - struct cryptd_skcipher_request_ctx { - crypto_completion_t complete; -+ struct skcipher_request req; - }; - - struct cryptd_hash_ctx { -@@ -125,28 +130,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue) - static int cryptd_enqueue_request(struct cryptd_queue *queue, - struct crypto_async_request *request) - { -- int cpu, err; -+ int err; - struct cryptd_cpu_queue *cpu_queue; - refcount_t *refcnt; - -- cpu = get_cpu(); -+ local_bh_disable(); - cpu_queue = this_cpu_ptr(queue->cpu_queue); - err = crypto_enqueue_request(&cpu_queue->queue, request); - - refcnt = crypto_tfm_ctx(request->tfm); - - if (err == -ENOSPC) -- goto out_put_cpu; -+ goto out; - -- queue_work_on(cpu, cryptd_wq, &cpu_queue->work); -+ queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); - - if (!refcount_read(refcnt)) -- goto out_put_cpu; -+ goto out; - - refcount_inc(refcnt); - --out_put_cpu: -- put_cpu(); -+out: -+ local_bh_enable(); - - return err; - } -@@ -162,15 +167,10 @@ static void cryptd_queue_worker(struct work_struct *work) - cpu_queue = container_of(work, struct cryptd_cpu_queue, work); - /* - * Only handle one request at a time to avoid hogging crypto workqueue. -- * preempt_disable/enable is used to prevent being preempted by -- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent -- * cryptd_enqueue_request() being accessed from software interrupts. - */ - local_bh_disable(); -- preempt_disable(); - backlog = crypto_get_backlog(&cpu_queue->queue); - req = crypto_dequeue_request(&cpu_queue->queue); -- preempt_enable(); - local_bh_enable(); - - if (!req) -@@ -228,13 +228,13 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, - const u8 *key, unsigned int keylen) - { - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); -- struct crypto_sync_skcipher *child = ctx->child; -+ struct crypto_skcipher *child = ctx->child; - -- crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); -- crypto_sync_skcipher_set_flags(child, -- crypto_skcipher_get_flags(parent) & -- CRYPTO_TFM_REQ_MASK); -- return crypto_sync_skcipher_setkey(child, key, keylen); -+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); -+ crypto_skcipher_set_flags(child, -+ crypto_skcipher_get_flags(parent) & -+ CRYPTO_TFM_REQ_MASK); -+ return crypto_skcipher_setkey(child, key, keylen); - } - - static void cryptd_skcipher_complete(struct skcipher_request *req, int err) -@@ -259,13 +259,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base, - struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); -- struct crypto_sync_skcipher *child = ctx->child; -- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); -+ struct skcipher_request *subreq = &rctx->req; -+ struct crypto_skcipher *child = ctx->child; - - if (unlikely(err == -EINPROGRESS)) - goto out; - -- skcipher_request_set_sync_tfm(subreq, child); -+ skcipher_request_set_tfm(subreq, child); - skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, -@@ -287,13 +287,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base, - struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); -- struct crypto_sync_skcipher *child = ctx->child; -- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); -+ struct skcipher_request *subreq = &rctx->req; -+ struct crypto_skcipher *child = ctx->child; - - if (unlikely(err == -EINPROGRESS)) - goto out; - -- skcipher_request_set_sync_tfm(subreq, child); -+ skcipher_request_set_tfm(subreq, child); - skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, - NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, -@@ -344,9 +344,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - -- ctx->child = (struct crypto_sync_skcipher *)cipher; -+ ctx->child = cipher; - crypto_skcipher_set_reqsize( -- tfm, sizeof(struct cryptd_skcipher_request_ctx)); -+ tfm, sizeof(struct cryptd_skcipher_request_ctx) + -+ crypto_skcipher_reqsize(cipher)); - return 0; - } - -@@ -354,7 +355,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) - { - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - -- crypto_free_sync_skcipher(ctx->child); -+ crypto_free_skcipher(ctx->child); - } - - static void cryptd_skcipher_free(struct skcipher_instance *inst) -@@ -932,7 +933,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) - { - struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - -- return &ctx->child->base; -+ return ctx->child; - } - EXPORT_SYMBOL_GPL(cryptd_skcipher_child); - -diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c -index cff21f4e03e32..34effd4826c03 100644 ---- a/crypto/crypto_engine.c -+++ b/crypto/crypto_engine.c -@@ -53,7 +53,8 @@ static void crypto_finalize_request(struct crypto_engine *engine, - dev_err(engine->dev, "failed to unprepare request\n"); - } - } -- req->complete(req, err); -+ lockdep_assert_in_softirq(); -+ crypto_request_complete(req, err); - - kthread_queue_work(engine->kworker, &engine->pump_requests); - } -@@ -128,9 +129,6 @@ start_request: - if (!engine->retry_support) - engine->cur_req = async_req; - -- if (backlog) -- backlog->complete(backlog, -EINPROGRESS); -- - if (engine->busy) - was_busy = true; - else -@@ -213,9 +211,12 @@ req_err_1: - } - - req_err_2: -- async_req->complete(async_req, ret); -+ crypto_request_complete(async_req, ret); - - retry: -+ if (backlog) -+ crypto_request_complete(backlog, -EINPROGRESS); -+ - /* If retry mechanism is supported, send new requests to engine */ - if (engine->retry_support) { - spin_lock_irqsave(&engine->queue_lock, flags); -diff --git a/crypto/drbg.c b/crypto/drbg.c -index ea85d4a0fe9e9..44b0a7f624021 100644 ---- a/crypto/drbg.c -+++ b/crypto/drbg.c -@@ -1036,17 +1036,38 @@ static const struct drbg_state_ops drbg_hash_ops = { - ******************************************************************/ - - static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed, -- int reseed) -+ int reseed, enum drbg_seed_state new_seed_state) - { - int ret = drbg->d_ops->update(drbg, seed, reseed); - - if (ret) - return ret; - -- drbg->seeded = true; -+ drbg->seeded = new_seed_state; - /* 10.1.1.2 / 10.1.1.3 step 5 */ - drbg->reseed_ctr = 1; - -+ switch (drbg->seeded) { -+ case DRBG_SEED_STATE_UNSEEDED: -+ /* Impossible, but handle it to silence compiler warnings. */ -+ fallthrough; -+ case DRBG_SEED_STATE_PARTIAL: -+ /* -+ * Require frequent reseeds until the seed source is -+ * fully initialized. -+ */ -+ drbg->reseed_threshold = 50; -+ break; -+ -+ case DRBG_SEED_STATE_FULL: -+ /* -+ * Seed source has become fully initialized, frequent -+ * reseeds no longer required. -+ */ -+ drbg->reseed_threshold = drbg_max_requests(drbg); -+ break; -+ } -+ - return ret; - } - -@@ -1066,12 +1087,10 @@ static inline int drbg_get_random_bytes(struct drbg_state *drbg, - return 0; - } - --static void drbg_async_seed(struct work_struct *work) -+static int drbg_seed_from_random(struct drbg_state *drbg) - { - struct drbg_string data; - LIST_HEAD(seedlist); -- struct drbg_state *drbg = container_of(work, struct drbg_state, -- seed_work); - unsigned int entropylen = drbg_sec_strength(drbg->core->flags); - unsigned char entropy[32]; - int ret; -@@ -1082,26 +1101,15 @@ static void drbg_async_seed(struct work_struct *work) - drbg_string_fill(&data, entropy, entropylen); - list_add_tail(&data.list, &seedlist); - -- mutex_lock(&drbg->drbg_mutex); -- - ret = drbg_get_random_bytes(drbg, entropy, entropylen); - if (ret) -- goto unlock; -- -- /* Set seeded to false so that if __drbg_seed fails the -- * next generate call will trigger a reseed. -- */ -- drbg->seeded = false; -- -- __drbg_seed(drbg, &seedlist, true); -- -- if (drbg->seeded) -- drbg->reseed_threshold = drbg_max_requests(drbg); -+ goto out; - --unlock: -- mutex_unlock(&drbg->drbg_mutex); -+ ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL); - -+out: - memzero_explicit(entropy, entropylen); -+ return ret; - } - - /* -@@ -1123,6 +1131,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, - unsigned int entropylen = drbg_sec_strength(drbg->core->flags); - struct drbg_string data1; - LIST_HEAD(seedlist); -+ enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL; - - /* 9.1 / 9.2 / 9.3.1 step 3 */ - if (pers && pers->len > (drbg_max_addtl(drbg))) { -@@ -1150,6 +1159,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, - BUG_ON((entropylen * 2) > sizeof(entropy)); - - /* Get seed from in-kernel /dev/urandom */ -+ if (!rng_is_initialized()) -+ new_seed_state = DRBG_SEED_STATE_PARTIAL; -+ - ret = drbg_get_random_bytes(drbg, entropy, entropylen); - if (ret) - goto out; -@@ -1206,7 +1218,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, - memset(drbg->C, 0, drbg_statelen(drbg)); - } - -- ret = __drbg_seed(drbg, &seedlist, reseed); -+ ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state); - - out: - memzero_explicit(entropy, entropylen * 2); -@@ -1386,19 +1398,25 @@ static int drbg_generate(struct drbg_state *drbg, - * here. The spec is a bit convoluted here, we make it simpler. - */ - if (drbg->reseed_threshold < drbg->reseed_ctr) -- drbg->seeded = false; -+ drbg->seeded = DRBG_SEED_STATE_UNSEEDED; - -- if (drbg->pr || !drbg->seeded) { -+ if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) { - pr_devel("DRBG: reseeding before generation (prediction " - "resistance: %s, state %s)\n", - drbg->pr ? "true" : "false", -- drbg->seeded ? "seeded" : "unseeded"); -+ (drbg->seeded == DRBG_SEED_STATE_FULL ? -+ "seeded" : "unseeded")); - /* 9.3.1 steps 7.1 through 7.3 */ - len = drbg_seed(drbg, addtl, true); - if (len) - goto err; - /* 9.3.1 step 7.4 */ - addtl = NULL; -+ } else if (rng_is_initialized() && -+ drbg->seeded == DRBG_SEED_STATE_PARTIAL) { -+ len = drbg_seed_from_random(drbg); -+ if (len) -+ goto err; - } - - if (addtl && 0 < addtl->len) -@@ -1491,51 +1509,23 @@ static int drbg_generate_long(struct drbg_state *drbg, - return 0; - } - --static void drbg_schedule_async_seed(struct random_ready_callback *rdy) --{ -- struct drbg_state *drbg = container_of(rdy, struct drbg_state, -- random_ready); -- -- schedule_work(&drbg->seed_work); --} -- - static int drbg_prepare_hrng(struct drbg_state *drbg) - { -- int err; -- - /* We do not need an HRNG in test mode. */ - if (list_empty(&drbg->test_data.list)) - return 0; - - drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); -+ if (IS_ERR(drbg->jent)) { -+ const int err = PTR_ERR(drbg->jent); - -- INIT_WORK(&drbg->seed_work, drbg_async_seed); -- -- drbg->random_ready.owner = THIS_MODULE; -- drbg->random_ready.func = drbg_schedule_async_seed; -- -- err = add_random_ready_callback(&drbg->random_ready); -- -- switch (err) { -- case 0: -- break; -- -- case -EALREADY: -- err = 0; -- fallthrough; -- -- default: -- drbg->random_ready.func = NULL; -- return err; -+ drbg->jent = NULL; -+ if (fips_enabled) -+ return err; -+ pr_info("DRBG: Continuing without Jitter RNG\n"); - } - -- /* -- * Require frequent reseeds until the seed source is fully -- * initialized. -- */ -- drbg->reseed_threshold = 50; -- -- return err; -+ return 0; - } - - /* -@@ -1578,7 +1568,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, - if (!drbg->core) { - drbg->core = &drbg_cores[coreref]; - drbg->pr = pr; -- drbg->seeded = false; -+ drbg->seeded = DRBG_SEED_STATE_UNSEEDED; - drbg->reseed_threshold = drbg_max_requests(drbg); - - ret = drbg_alloc_state(drbg); -@@ -1589,14 +1579,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, - if (ret) - goto free_everything; - -- if (IS_ERR(drbg->jent)) { -- ret = PTR_ERR(drbg->jent); -- drbg->jent = NULL; -- if (fips_enabled || ret != -ENOENT) -- goto free_everything; -- pr_info("DRBG: Continuing without Jitter RNG\n"); -- } -- - reseed = false; - } - -@@ -1629,11 +1611,6 @@ free_everything: - */ - static int drbg_uninstantiate(struct drbg_state *drbg) - { -- if (drbg->random_ready.func) { -- del_random_ready_callback(&drbg->random_ready); -- cancel_work_sync(&drbg->seed_work); -- } -- - if (!IS_ERR_OR_NULL(drbg->jent)) - crypto_free_rng(drbg->jent); - drbg->jent = NULL; -diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c -index 6a3fd09057d0c..f7ed430206720 100644 ---- a/crypto/ecrdsa.c -+++ b/crypto/ecrdsa.c -@@ -113,15 +113,15 @@ static int ecrdsa_verify(struct akcipher_request *req) - - /* Step 1: verify that 0 < r < q, 0 < s < q */ - if (vli_is_zero(r, ndigits) || -- vli_cmp(r, ctx->curve->n, ndigits) == 1 || -+ vli_cmp(r, ctx->curve->n, ndigits) >= 0 || - vli_is_zero(s, ndigits) || -- vli_cmp(s, ctx->curve->n, ndigits) == 1) -+ vli_cmp(s, ctx->curve->n, ndigits) >= 0) - return -EKEYREJECTED; - - /* Step 2: calculate hash (h) of the message (passed as input) */ - /* Step 3: calculate e = h \mod q */ - vli_from_le64(e, digest, ndigits); -- if (vli_cmp(e, ctx->curve->n, ndigits) == 1) -+ if (vli_cmp(e, ctx->curve->n, ndigits) >= 0) - vli_sub(e, e, ctx->curve->n, ndigits); - if (vli_is_zero(e, ndigits)) - e[0] = 1; -@@ -137,7 +137,7 @@ static int ecrdsa_verify(struct akcipher_request *req) - /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */ - ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key, - ctx->curve); -- if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1) -+ if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0) - vli_sub(cc.x, cc.x, ctx->curve->n, ndigits); - - /* Step 7: if R == r signature is valid */ -diff --git a/crypto/essiv.c b/crypto/essiv.c -index 8bcc5bdcb2a95..3505b071e6471 100644 ---- a/crypto/essiv.c -+++ b/crypto/essiv.c -@@ -171,7 +171,12 @@ static void essiv_aead_done(struct crypto_async_request *areq, int err) - struct aead_request *req = areq->data; - struct essiv_aead_request_ctx *rctx = aead_request_ctx(req); - -+ if (err == -EINPROGRESS) -+ goto out; -+ - kfree(rctx->assoc); -+ -+out: - aead_request_complete(req, err); - } - -@@ -247,7 +252,7 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc) - err = enc ? crypto_aead_encrypt(subreq) : - crypto_aead_decrypt(subreq); - -- if (rctx->assoc && err != -EINPROGRESS) -+ if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY) - kfree(rctx->assoc); - return err; - } -diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c -index a11b3208760f3..f6d3a84e3c214 100644 ---- a/crypto/jitterentropy.c -+++ b/crypto/jitterentropy.c -@@ -265,7 +265,6 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) - { - __u64 delta2 = jent_delta(ec->last_delta, current_delta); - __u64 delta3 = jent_delta(ec->last_delta2, delta2); -- unsigned int delta_masked = current_delta & JENT_APT_WORD_MASK; - - ec->last_delta = current_delta; - ec->last_delta2 = delta2; -@@ -274,7 +273,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) - * Insert the result of the comparison of two back-to-back time - * deltas. - */ -- jent_apt_insert(ec, delta_masked); -+ jent_apt_insert(ec, current_delta); - - if (!current_delta || !delta2 || !delta3) { - /* RCT with a stuck bit */ -diff --git a/crypto/memneq.c b/crypto/memneq.c -deleted file mode 100644 -index afed1bd16aee0..0000000000000 ---- a/crypto/memneq.c -+++ /dev/null -@@ -1,168 +0,0 @@ --/* -- * Constant-time equality testing of memory regions. -- * -- * Authors: -- * -- * James Yonan -- * Daniel Borkmann -- * -- * This file is provided under a dual BSD/GPLv2 license. When using or -- * redistributing this file, you may do so under either license. -- * -- * GPL LICENSE SUMMARY -- * -- * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. -- * -- * This program is free software; you can redistribute it and/or modify -- * it under the terms of version 2 of the GNU General Public License as -- * published by the Free Software Foundation. -- * -- * This program is distributed in the hope that it will be useful, but -- * WITHOUT ANY WARRANTY; without even the implied warranty of -- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -- * General Public License for more details. -- * -- * You should have received a copy of the GNU General Public License -- * along with this program; if not, write to the Free Software -- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -- * The full GNU General Public License is included in this distribution -- * in the file called LICENSE.GPL. -- * -- * BSD LICENSE -- * -- * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. -- * -- * Redistribution and use in source and binary forms, with or without -- * modification, are permitted provided that the following conditions -- * are met: -- * -- * * Redistributions of source code must retain the above copyright -- * notice, this list of conditions and the following disclaimer. -- * * Redistributions in binary form must reproduce the above copyright -- * notice, this list of conditions and the following disclaimer in -- * the documentation and/or other materials provided with the -- * distribution. -- * * Neither the name of OpenVPN Technologies nor the names of its -- * contributors may be used to endorse or promote products derived -- * from this software without specific prior written permission. -- * -- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -- */ -- --#include -- --#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ -- --/* Generic path for arbitrary size */ --static inline unsigned long --__crypto_memneq_generic(const void *a, const void *b, size_t size) --{ -- unsigned long neq = 0; -- --#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) -- while (size >= sizeof(unsigned long)) { -- neq |= *(unsigned long *)a ^ *(unsigned long *)b; -- OPTIMIZER_HIDE_VAR(neq); -- a += sizeof(unsigned long); -- b += sizeof(unsigned long); -- size -= sizeof(unsigned long); -- } --#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ -- while (size > 0) { -- neq |= *(unsigned char *)a ^ *(unsigned char *)b; -- OPTIMIZER_HIDE_VAR(neq); -- a += 1; -- b += 1; -- size -= 1; -- } -- return neq; --} -- --/* Loop-free fast-path for frequently used 16-byte size */ --static inline unsigned long __crypto_memneq_16(const void *a, const void *b) --{ -- unsigned long neq = 0; -- --#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -- if (sizeof(unsigned long) == 8) { -- neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8); -- OPTIMIZER_HIDE_VAR(neq); -- } else if (sizeof(unsigned int) == 4) { -- neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12); -- OPTIMIZER_HIDE_VAR(neq); -- } else --#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ -- { -- neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); -- OPTIMIZER_HIDE_VAR(neq); -- neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); -- OPTIMIZER_HIDE_VAR(neq); -- } -- -- return neq; --} -- --/* Compare two areas of memory without leaking timing information, -- * and with special optimizations for common sizes. Users should -- * not call this function directly, but should instead use -- * crypto_memneq defined in crypto/algapi.h. -- */ --noinline unsigned long __crypto_memneq(const void *a, const void *b, -- size_t size) --{ -- switch (size) { -- case 16: -- return __crypto_memneq_16(a, b); -- default: -- return __crypto_memneq_generic(a, b, size); -- } --} --EXPORT_SYMBOL(__crypto_memneq); -- --#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ -diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c -index d569c7ed6c800..9d10b846ccf73 100644 ---- a/crypto/pcrypt.c -+++ b/crypto/pcrypt.c -@@ -78,12 +78,14 @@ static void pcrypt_aead_enc(struct padata_priv *padata) - { - struct pcrypt_request *preq = pcrypt_padata_request(padata); - struct aead_request *req = pcrypt_request_ctx(preq); -+ int ret; - -- padata->info = crypto_aead_encrypt(req); -+ ret = crypto_aead_encrypt(req); - -- if (padata->info == -EINPROGRESS) -+ if (ret == -EINPROGRESS) - return; - -+ padata->info = ret; - padata_do_serial(padata); - } - -@@ -123,12 +125,14 @@ static void pcrypt_aead_dec(struct padata_priv *padata) - { - struct pcrypt_request *preq = pcrypt_padata_request(padata); - struct aead_request *req = pcrypt_request_ctx(preq); -+ int ret; - -- padata->info = crypto_aead_decrypt(req); -+ ret = crypto_aead_decrypt(req); - -- if (padata->info == -EINPROGRESS) -+ if (ret == -EINPROGRESS) - return; - -+ padata->info = ret; - padata_do_serial(padata); - } - -diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c -index 8ac3e73e8ea65..e2f4ccbd71dd8 100644 ---- a/crypto/rsa-pkcs1pad.c -+++ b/crypto/rsa-pkcs1pad.c -@@ -214,16 +214,14 @@ static void pkcs1pad_encrypt_sign_complete_cb( - struct crypto_async_request *child_async_req, int err) - { - struct akcipher_request *req = child_async_req->data; -- struct crypto_async_request async_req; - - if (err == -EINPROGRESS) -- return; -+ goto out; -+ -+ err = pkcs1pad_encrypt_sign_complete(req, err); - -- async_req.data = req->base.data; -- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); -- async_req.flags = child_async_req->flags; -- req->base.complete(&async_req, -- pkcs1pad_encrypt_sign_complete(req, err)); -+out: -+ akcipher_request_complete(req, err); - } - - static int pkcs1pad_encrypt(struct akcipher_request *req) -@@ -332,15 +330,14 @@ static void pkcs1pad_decrypt_complete_cb( - struct crypto_async_request *child_async_req, int err) - { - struct akcipher_request *req = child_async_req->data; -- struct crypto_async_request async_req; - - if (err == -EINPROGRESS) -- return; -+ goto out; - -- async_req.data = req->base.data; -- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); -- async_req.flags = child_async_req->flags; -- req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); -+ err = pkcs1pad_decrypt_complete(req, err); -+ -+out: -+ akcipher_request_complete(req, err); - } - - static int pkcs1pad_decrypt(struct akcipher_request *req) -@@ -476,6 +473,8 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) - pos++; - - if (digest_info) { -+ if (digest_info->size > dst_len - pos) -+ goto done; - if (crypto_memneq(out_buf + pos, digest_info->data, - digest_info->size)) - goto done; -@@ -495,7 +494,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) - sg_nents_for_len(req->src, - req->src_len + req->dst_len), - req_ctx->out_buf + ctx->key_size, -- req->dst_len, ctx->key_size); -+ req->dst_len, req->src_len); - /* Do the actual verification step. */ - if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos, - req->dst_len) != 0) -@@ -510,15 +509,14 @@ static void pkcs1pad_verify_complete_cb( - struct crypto_async_request *child_async_req, int err) - { - struct akcipher_request *req = child_async_req->data; -- struct crypto_async_request async_req; - - if (err == -EINPROGRESS) -- return; -+ goto out; - -- async_req.data = req->base.data; -- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); -- async_req.flags = child_async_req->flags; -- req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); -+ err = pkcs1pad_verify_complete(req, err); -+ -+out: -+ akcipher_request_complete(req, err); - } - - /* -@@ -538,7 +536,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) - - if (WARN_ON(req->dst) || - WARN_ON(!req->dst_len) || -- !ctx->key_size || req->src_len < ctx->key_size) -+ !ctx->key_size || req->src_len != ctx->key_size) - return -EINVAL; - - req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL); -@@ -576,6 +574,10 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) - return PTR_ERR(child_tfm); - - ctx->child = child_tfm; -+ -+ akcipher_set_reqsize(tfm, sizeof(struct pkcs1pad_request) + -+ crypto_akcipher_reqsize(child_tfm)); -+ - return 0; - } - -@@ -621,6 +623,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) - - rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn); - -+ if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) { -+ err = -EINVAL; -+ goto err_free_inst; -+ } -+ - err = -ENAMETOOLONG; - hash_name = crypto_attr_alg_name(tb[2]); - if (IS_ERR(hash_name)) { -@@ -666,7 +673,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) - inst->alg.set_pub_key = pkcs1pad_set_pub_key; - inst->alg.set_priv_key = pkcs1pad_set_priv_key; - inst->alg.max_size = pkcs1pad_get_max_size; -- inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize; - - inst->free = pkcs1pad_free; - -diff --git a/crypto/seqiv.c b/crypto/seqiv.c -index 0899d527c2845..b1bcfe537daf1 100644 ---- a/crypto/seqiv.c -+++ b/crypto/seqiv.c -@@ -23,7 +23,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) - struct aead_request *subreq = aead_request_ctx(req); - struct crypto_aead *geniv; - -- if (err == -EINPROGRESS) -+ if (err == -EINPROGRESS || err == -EBUSY) - return; - - if (err) -diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c -index 82b0400985a51..4ada7e7493904 100644 ---- a/crypto/tcrypt.c -+++ b/crypto/tcrypt.c -@@ -1295,15 +1295,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, - goto out_free_tfm; - } - -- -- for (i = 0; i < num_mb; ++i) -- if (testmgr_alloc_buf(data[i].xbuf)) { -- while (i--) -- testmgr_free_buf(data[i].xbuf); -- goto out_free_tfm; -- } -- -- - for (i = 0; i < num_mb; ++i) { - data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); - if (!data[i].req) { -@@ -1333,7 +1324,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, - - if (bs > XBUFSIZE * PAGE_SIZE) { - pr_err("template (%u) too big for buffer (%lu)\n", -- *b_size, XBUFSIZE * PAGE_SIZE); -+ bs, XBUFSIZE * PAGE_SIZE); - goto out; - } - -@@ -1386,8 +1377,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, - memset(cur->xbuf[p], 0xff, k); - - skcipher_request_set_crypt(cur->req, cur->sg, -- cur->sg, *b_size, -- iv); -+ cur->sg, bs, iv); - } - - if (secs) { -@@ -1864,10 +1854,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) - ret += tcrypt_test("rmd160"); - break; - -- case 41: -- ret += tcrypt_test("blake2s-256"); -- break; -- - case 42: - ret += tcrypt_test("blake2b-512"); - break; -@@ -2435,10 +2421,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) - test_hash_speed("rmd160", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; -- case 316: -- test_hash_speed("blake2s-256", sec, generic_hash_speed_template); -- if (mode > 300 && mode < 400) break; -- fallthrough; - case 317: - test_hash_speed("blake2b-512", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; -@@ -2547,10 +2529,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) - test_ahash_speed("rmd160", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; - fallthrough; -- case 416: -- test_ahash_speed("blake2s-256", sec, generic_hash_speed_template); -- if (mode > 400 && mode < 500) break; -- fallthrough; - case 417: - test_ahash_speed("blake2b-512", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; -diff --git a/crypto/testmgr.c b/crypto/testmgr.c -index 70f69f0910c9e..163a1283a866a 100644 ---- a/crypto/testmgr.c -+++ b/crypto/testmgr.c -@@ -4329,30 +4329,6 @@ static const struct alg_test_desc alg_test_descs[] = { - .suite = { - .hash = __VECS(blake2b_512_tv_template) - } -- }, { -- .alg = "blake2s-128", -- .test = alg_test_hash, -- .suite = { -- .hash = __VECS(blakes2s_128_tv_template) -- } -- }, { -- .alg = "blake2s-160", -- .test = alg_test_hash, -- .suite = { -- .hash = __VECS(blakes2s_160_tv_template) -- } -- }, { -- .alg = "blake2s-224", -- .test = alg_test_hash, -- .suite = { -- .hash = __VECS(blakes2s_224_tv_template) -- } -- }, { -- .alg = "blake2s-256", -- .test = alg_test_hash, -- .suite = { -- .hash = __VECS(blakes2s_256_tv_template) -- } - }, { - .alg = "cbc(aes)", - .test = alg_test_skcipher, -diff --git a/crypto/testmgr.h b/crypto/testmgr.h -index e6fca34b5b257..2be20a590a606 100644 ---- a/crypto/testmgr.h -+++ b/crypto/testmgr.h -@@ -32583,221 +32583,4 @@ static const struct hash_testvec blake2b_512_tv_template[] = {{ - 0xae, 0x15, 0x81, 0x15, 0xd0, 0x88, 0xa0, 0x3c, }, - }}; - --static const struct hash_testvec blakes2s_128_tv_template[] = {{ -- .digest = (u8[]){ 0x64, 0x55, 0x0d, 0x6f, 0xfe, 0x2c, 0x0a, 0x01, -- 0xa1, 0x4a, 0xba, 0x1e, 0xad, 0xe0, 0x20, 0x0c, }, --}, { -- .plaintext = blake2_ordered_sequence, -- .psize = 64, -- .digest = (u8[]){ 0xdc, 0x66, 0xca, 0x8f, 0x03, 0x86, 0x58, 0x01, -- 0xb0, 0xff, 0xe0, 0x6e, 0xd8, 0xa1, 0xa9, 0x0e, }, --}, { -- .ksize = 16, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 1, -- .digest = (u8[]){ 0x88, 0x1e, 0x42, 0xe7, 0xbb, 0x35, 0x80, 0x82, -- 0x63, 0x7c, 0x0a, 0x0f, 0xd7, 0xec, 0x6c, 0x2f, }, --}, { -- .ksize = 32, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 7, -- .digest = (u8[]){ 0xcf, 0x9e, 0x07, 0x2a, 0xd5, 0x22, 0xf2, 0xcd, -- 0xa2, 0xd8, 0x25, 0x21, 0x80, 0x86, 0x73, 0x1c, }, --}, { -- .ksize = 1, -- .key = "B", -- .plaintext = blake2_ordered_sequence, -- .psize = 15, -- .digest = (u8[]){ 0xf6, 0x33, 0x5a, 0x2c, 0x22, 0xa0, 0x64, 0xb2, -- 0xb6, 0x3f, 0xeb, 0xbc, 0xd1, 0xc3, 0xe5, 0xb2, }, --}, { -- .ksize = 16, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 247, -- .digest = (u8[]){ 0x72, 0x66, 0x49, 0x60, 0xf9, 0x4a, 0xea, 0xbe, -- 0x1f, 0xf4, 0x60, 0xce, 0xb7, 0x81, 0xcb, 0x09, }, --}, { -- .ksize = 32, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 256, -- .digest = (u8[]){ 0xd5, 0xa4, 0x0e, 0xc3, 0x16, 0xc7, 0x51, 0xa6, -- 0x3c, 0xd0, 0xd9, 0x11, 0x57, 0xfa, 0x1e, 0xbb, }, --}}; -- --static const struct hash_testvec blakes2s_160_tv_template[] = {{ -- .plaintext = blake2_ordered_sequence, -- .psize = 7, -- .digest = (u8[]){ 0xb4, 0xf2, 0x03, 0x49, 0x37, 0xed, 0xb1, 0x3e, -- 0x5b, 0x2a, 0xca, 0x64, 0x82, 0x74, 0xf6, 0x62, -- 0xe3, 0xf2, 0x84, 0xff, }, --}, { -- .plaintext = blake2_ordered_sequence, -- .psize = 256, -- .digest = (u8[]){ 0xaa, 0x56, 0x9b, 0xdc, 0x98, 0x17, 0x75, 0xf2, -- 0xb3, 0x68, 0x83, 0xb7, 0x9b, 0x8d, 0x48, 0xb1, -- 0x9b, 0x2d, 0x35, 0x05, }, --}, { -- .ksize = 1, -- .key = "B", -- .digest = (u8[]){ 0x50, 0x16, 0xe7, 0x0c, 0x01, 0xd0, 0xd3, 0xc3, -- 0xf4, 0x3e, 0xb1, 0x6e, 0x97, 0xa9, 0x4e, 0xd1, -- 0x79, 0x65, 0x32, 0x93, }, --}, { -- .ksize = 32, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 1, -- .digest = (u8[]){ 0x1c, 0x2b, 0xcd, 0x9a, 0x68, 0xca, 0x8c, 0x71, -- 0x90, 0x29, 0x6c, 0x54, 0xfa, 0x56, 0x4a, 0xef, -- 0xa2, 0x3a, 0x56, 0x9c, }, --}, { -- .ksize = 16, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 15, -- .digest = (u8[]){ 0x36, 0xc3, 0x5f, 0x9a, 0xdc, 0x7e, 0xbf, 0x19, -- 0x68, 0xaa, 0xca, 0xd8, 0x81, 0xbf, 0x09, 0x34, -- 0x83, 0x39, 0x0f, 0x30, }, --}, { -- .ksize = 1, -- .key = "B", -- .plaintext = blake2_ordered_sequence, -- .psize = 64, -- .digest = (u8[]){ 0x86, 0x80, 0x78, 0xa4, 0x14, 0xec, 0x03, 0xe5, -- 0xb6, 0x9a, 0x52, 0x0e, 0x42, 0xee, 0x39, 0x9d, -- 0xac, 0xa6, 0x81, 0x63, }, --}, { -- .ksize = 32, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 247, -- .digest = (u8[]){ 0x2d, 0xd8, 0xd2, 0x53, 0x66, 0xfa, 0xa9, 0x01, -- 0x1c, 0x9c, 0xaf, 0xa3, 0xe2, 0x9d, 0x9b, 0x10, -- 0x0a, 0xf6, 0x73, 0xe8, }, --}}; -- --static const struct hash_testvec blakes2s_224_tv_template[] = {{ -- .plaintext = blake2_ordered_sequence, -- .psize = 1, -- .digest = (u8[]){ 0x61, 0xb9, 0x4e, 0xc9, 0x46, 0x22, 0xa3, 0x91, -- 0xd2, 0xae, 0x42, 0xe6, 0x45, 0x6c, 0x90, 0x12, -- 0xd5, 0x80, 0x07, 0x97, 0xb8, 0x86, 0x5a, 0xfc, -- 0x48, 0x21, 0x97, 0xbb, }, --}, { -- .plaintext = blake2_ordered_sequence, -- .psize = 247, -- .digest = (u8[]){ 0x9e, 0xda, 0xc7, 0x20, 0x2c, 0xd8, 0x48, 0x2e, -- 0x31, 0x94, 0xab, 0x46, 0x6d, 0x94, 0xd8, 0xb4, -- 0x69, 0xcd, 0xae, 0x19, 0x6d, 0x9e, 0x41, 0xcc, -- 0x2b, 0xa4, 0xd5, 0xf6, }, --}, { -- .ksize = 16, -- .key = blake2_ordered_sequence, -- .digest = (u8[]){ 0x32, 0xc0, 0xac, 0xf4, 0x3b, 0xd3, 0x07, 0x9f, -- 0xbe, 0xfb, 0xfa, 0x4d, 0x6b, 0x4e, 0x56, 0xb3, -- 0xaa, 0xd3, 0x27, 0xf6, 0x14, 0xbf, 0xb9, 0x32, -- 0xa7, 0x19, 0xfc, 0xb8, }, --}, { -- .ksize = 1, -- .key = "B", -- .plaintext = blake2_ordered_sequence, -- .psize = 7, -- .digest = (u8[]){ 0x73, 0xad, 0x5e, 0x6d, 0xb9, 0x02, 0x8e, 0x76, -- 0xf2, 0x66, 0x42, 0x4b, 0x4c, 0xfa, 0x1f, 0xe6, -- 0x2e, 0x56, 0x40, 0xe5, 0xa2, 0xb0, 0x3c, 0xe8, -- 0x7b, 0x45, 0xfe, 0x05, }, --}, { -- .ksize = 32, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 15, -- .digest = (u8[]){ 0x16, 0x60, 0xfb, 0x92, 0x54, 0xb3, 0x6e, 0x36, -- 0x81, 0xf4, 0x16, 0x41, 0xc3, 0x3d, 0xd3, 0x43, -- 0x84, 0xed, 0x10, 0x6f, 0x65, 0x80, 0x7a, 0x3e, -- 0x25, 0xab, 0xc5, 0x02, }, --}, { -- .ksize = 16, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 64, -- .digest = (u8[]){ 0xca, 0xaa, 0x39, 0x67, 0x9c, 0xf7, 0x6b, 0xc7, -- 0xb6, 0x82, 0xca, 0x0e, 0x65, 0x36, 0x5b, 0x7c, -- 0x24, 0x00, 0xfa, 0x5f, 0xda, 0x06, 0x91, 0x93, -- 0x6a, 0x31, 0x83, 0xb5, }, --}, { -- .ksize = 1, -- .key = "B", -- .plaintext = blake2_ordered_sequence, -- .psize = 256, -- .digest = (u8[]){ 0x90, 0x02, 0x26, 0xb5, 0x06, 0x9c, 0x36, 0x86, -- 0x94, 0x91, 0x90, 0x1e, 0x7d, 0x2a, 0x71, 0xb2, -- 0x48, 0xb5, 0xe8, 0x16, 0xfd, 0x64, 0x33, 0x45, -- 0xb3, 0xd7, 0xec, 0xcc, }, --}}; -- --static const struct hash_testvec blakes2s_256_tv_template[] = {{ -- .plaintext = blake2_ordered_sequence, -- .psize = 15, -- .digest = (u8[]){ 0xd9, 0x7c, 0x82, 0x8d, 0x81, 0x82, 0xa7, 0x21, -- 0x80, 0xa0, 0x6a, 0x78, 0x26, 0x83, 0x30, 0x67, -- 0x3f, 0x7c, 0x4e, 0x06, 0x35, 0x94, 0x7c, 0x04, -- 0xc0, 0x23, 0x23, 0xfd, 0x45, 0xc0, 0xa5, 0x2d, }, --}, { -- .ksize = 32, -- .key = blake2_ordered_sequence, -- .digest = (u8[]){ 0x48, 0xa8, 0x99, 0x7d, 0xa4, 0x07, 0x87, 0x6b, -- 0x3d, 0x79, 0xc0, 0xd9, 0x23, 0x25, 0xad, 0x3b, -- 0x89, 0xcb, 0xb7, 0x54, 0xd8, 0x6a, 0xb7, 0x1a, -- 0xee, 0x04, 0x7a, 0xd3, 0x45, 0xfd, 0x2c, 0x49, }, --}, { -- .ksize = 1, -- .key = "B", -- .plaintext = blake2_ordered_sequence, -- .psize = 1, -- .digest = (u8[]){ 0x22, 0x27, 0xae, 0xaa, 0x6e, 0x81, 0x56, 0x03, -- 0xa7, 0xe3, 0xa1, 0x18, 0xa5, 0x9a, 0x2c, 0x18, -- 0xf4, 0x63, 0xbc, 0x16, 0x70, 0xf1, 0xe7, 0x4b, -- 0x00, 0x6d, 0x66, 0x16, 0xae, 0x9e, 0x74, 0x4e, }, --}, { -- .ksize = 16, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 7, -- .digest = (u8[]){ 0x58, 0x5d, 0xa8, 0x60, 0x1c, 0xa4, 0xd8, 0x03, -- 0x86, 0x86, 0x84, 0x64, 0xd7, 0xa0, 0x8e, 0x15, -- 0x2f, 0x05, 0xa2, 0x1b, 0xbc, 0xef, 0x7a, 0x34, -- 0xb3, 0xc5, 0xbc, 0x4b, 0xf0, 0x32, 0xeb, 0x12, }, --}, { -- .ksize = 32, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 64, -- .digest = (u8[]){ 0x89, 0x75, 0xb0, 0x57, 0x7f, 0xd3, 0x55, 0x66, -- 0xd7, 0x50, 0xb3, 0x62, 0xb0, 0x89, 0x7a, 0x26, -- 0xc3, 0x99, 0x13, 0x6d, 0xf0, 0x7b, 0xab, 0xab, -- 0xbd, 0xe6, 0x20, 0x3f, 0xf2, 0x95, 0x4e, 0xd4, }, --}, { -- .ksize = 1, -- .key = "B", -- .plaintext = blake2_ordered_sequence, -- .psize = 247, -- .digest = (u8[]){ 0x2e, 0x74, 0x1c, 0x1d, 0x03, 0xf4, 0x9d, 0x84, -- 0x6f, 0xfc, 0x86, 0x32, 0x92, 0x49, 0x7e, 0x66, -- 0xd7, 0xc3, 0x10, 0x88, 0xfe, 0x28, 0xb3, 0xe0, -- 0xbf, 0x50, 0x75, 0xad, 0x8e, 0xa4, 0xe6, 0xb2, }, --}, { -- .ksize = 16, -- .key = blake2_ordered_sequence, -- .plaintext = blake2_ordered_sequence, -- .psize = 256, -- .digest = (u8[]){ 0xb9, 0xd2, 0x81, 0x0e, 0x3a, 0xb1, 0x62, 0x9b, -- 0xad, 0x44, 0x05, 0xf4, 0x92, 0x2e, 0x99, 0xc1, -- 0x4a, 0x47, 0xbb, 0x5b, 0x6f, 0xb2, 0x96, 0xed, -- 0xd5, 0x06, 0xb5, 0x3a, 0x7c, 0x7a, 0x65, 0x1d, }, --}}; -- - #endif /* _CRYPTO_TESTMGR_H */ -diff --git a/crypto/xts.c b/crypto/xts.c -index 6c12f30dbdd6d..de6cbcf69bbd6 100644 ---- a/crypto/xts.c -+++ b/crypto/xts.c -@@ -203,12 +203,12 @@ static void xts_encrypt_done(struct crypto_async_request *areq, int err) - if (!err) { - struct xts_request_ctx *rctx = skcipher_request_ctx(req); - -- rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; -+ rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; - err = xts_xor_tweak_post(req, true); - - if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = xts_cts_final(req, crypto_skcipher_encrypt); -- if (err == -EINPROGRESS) -+ if (err == -EINPROGRESS || err == -EBUSY) - return; - } - } -@@ -223,12 +223,12 @@ static void xts_decrypt_done(struct crypto_async_request *areq, int err) - if (!err) { - struct xts_request_ctx *rctx = skcipher_request_ctx(req); - -- rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; -+ rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; - err = xts_xor_tweak_post(req, false); - - if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = xts_cts_final(req, crypto_skcipher_decrypt); -- if (err == -EINPROGRESS) -+ if (err == -EINPROGRESS || err == -EBUSY) - return; - } - } -@@ -466,3 +466,4 @@ MODULE_LICENSE("GPL"); - MODULE_DESCRIPTION("XTS block cipher mode"); - MODULE_ALIAS_CRYPTO("xts"); - MODULE_IMPORT_NS(CRYPTO_INTERNAL); -+MODULE_SOFTDEP("pre: ecb"); -diff --git a/drivers/Makefile b/drivers/Makefile -index be5d40ae14882..a110338c860c7 100644 ---- a/drivers/Makefile -+++ b/drivers/Makefile -@@ -41,8 +41,7 @@ obj-$(CONFIG_DMADEVICES) += dma/ - # SOC specific infrastructure drivers. - obj-y += soc/ - --obj-$(CONFIG_VIRTIO) += virtio/ --obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio/ -+obj-y += virtio/ - obj-$(CONFIG_VDPA) += vdpa/ - obj-$(CONFIG_XEN) += xen/ - -diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c -index d726537fa16ce..7b2016534162c 100644 ---- a/drivers/accessibility/speakup/main.c -+++ b/drivers/accessibility/speakup/main.c -@@ -1778,7 +1778,7 @@ static void speakup_con_update(struct vc_data *vc) - { - unsigned long flags; - -- if (!speakup_console[vc->vc_num] || spk_parked) -+ if (!speakup_console[vc->vc_num] || spk_parked || !synth) - return; - if (!spin_trylock_irqsave(&speakup_info.spinlock, flags)) - /* Speakup output, discard */ -diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c -index 580ec796816bc..78ca4987e619e 100644 ---- a/drivers/accessibility/speakup/speakup_dectlk.c -+++ b/drivers/accessibility/speakup/speakup_dectlk.c -@@ -44,6 +44,7 @@ static struct var_t vars[] = { - { CAPS_START, .u.s = {"[:dv ap 160] " } }, - { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, - { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, -+ { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, - { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, - { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, - { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, -diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c -index 0d1f397cd8961..07373b3debd1e 100644 ---- a/drivers/accessibility/speakup/spk_ttyio.c -+++ b/drivers/accessibility/speakup/spk_ttyio.c -@@ -88,7 +88,7 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty, - } - - if (!ldisc_data->buf_free) -- /* ttyio_in will tty_schedule_flip */ -+ /* ttyio_in will tty_flip_buffer_push */ - return 0; - - /* Make sure the consumer has read buf before we have seen -@@ -312,7 +312,7 @@ static unsigned char ttyio_in(struct spk_synth *in_synth, int timeout) - mb(); - ldisc_data->buf_free = true; - /* Let TTY push more characters */ -- tty_schedule_flip(tty->port); -+ tty_flip_buffer_push(tty->port); - - return rv; - } -@@ -354,6 +354,9 @@ void spk_ttyio_release(struct spk_synth *in_synth) - { - struct tty_struct *tty = in_synth->dev; - -+ if (tty == NULL) -+ return; -+ - tty_lock(tty); - - if (tty->ops->close) -diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c -index b0cb662233f1a..81aff651a0d49 100644 ---- a/drivers/acpi/ac.c -+++ b/drivers/acpi/ac.c -@@ -61,6 +61,7 @@ static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); - - static int ac_sleep_before_get_state_ms; - static int ac_check_pmic = 1; -+static int ac_only; - - static struct acpi_driver acpi_ac_driver = { - .name = "ac", -@@ -93,6 +94,11 @@ static int acpi_ac_get_state(struct acpi_ac *ac) - if (!ac) - return -EINVAL; - -+ if (ac_only) { -+ ac->state = 1; -+ return 0; -+ } -+ - status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, - &ac->state); - if (ACPI_FAILURE(status)) { -@@ -200,6 +206,12 @@ static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d) - return 0; - } - -+static int __init ac_only_quirk(const struct dmi_system_id *d) -+{ -+ ac_only = 1; -+ return 0; -+} -+ - /* Please keep this list alphabetically sorted */ - static const struct dmi_system_id ac_dmi_table[] __initconst = { - { -@@ -209,6 +221,13 @@ static const struct dmi_system_id ac_dmi_table[] __initconst = { - DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"), - }, - }, -+ { -+ /* Kodlix GK45 returning incorrect state */ -+ .callback = ac_only_quirk, -+ .matches = { -+ DMI_MATCH(DMI_PRODUCT_NAME, "GK45"), -+ }, -+ }, - { - /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */ - .callback = ac_do_not_check_pmic_quirk, -diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c -index 72f1fb77abcd0..e648158368a7d 100644 ---- a/drivers/acpi/acpi_extlog.c -+++ b/drivers/acpi/acpi_extlog.c -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, - int cpu = mce->extcpu; - struct acpi_hest_generic_status *estatus, *tmp; - struct acpi_hest_generic_data *gdata; -- const guid_t *fru_id = &guid_null; -- char *fru_text = ""; -+ const guid_t *fru_id; -+ char *fru_text; - guid_t *sec_type; - static u32 err_seq; - -@@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, - - /* log event via trace */ - err_seq++; -- gdata = (struct acpi_hest_generic_data *)(tmp + 1); -- if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) -- fru_id = (guid_t *)gdata->fru_id; -- if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) -- fru_text = gdata->fru_text; -- sec_type = (guid_t *)gdata->section_type; -- if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { -- struct cper_sec_mem_err *mem = (void *)(gdata + 1); -- if (gdata->error_data_length >= sizeof(*mem)) -- trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, -- (u8)gdata->error_severity); -+ apei_estatus_for_each_section(tmp, gdata) { -+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) -+ fru_id = (guid_t *)gdata->fru_id; -+ else -+ fru_id = &guid_null; -+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) -+ fru_text = gdata->fru_text; -+ else -+ fru_text = ""; -+ sec_type = (guid_t *)gdata->section_type; -+ if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { -+ struct cper_sec_mem_err *mem = (void *)(gdata + 1); -+ -+ if (gdata->error_data_length >= sizeof(*mem)) -+ trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, -+ (u8)gdata->error_severity); -+ } - } - - out: -diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c -index 6922a44b3ce70..a2056c4c8cb70 100644 ---- a/drivers/acpi/acpi_fpdt.c -+++ b/drivers/acpi/acpi_fpdt.c -@@ -143,6 +143,23 @@ static const struct attribute_group boot_attr_group = { - - static struct kobject *fpdt_kobj; - -+#if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT -+#include -+static bool fpdt_address_valid(u64 address) -+{ -+ /* -+ * On some systems the table contains invalid addresses -+ * with unsuppored high address bits set, check for this. -+ */ -+ return !(address >> boot_cpu_data.x86_phys_bits); -+} -+#else -+static bool fpdt_address_valid(u64 address) -+{ -+ return true; -+} -+#endif -+ - static int fpdt_process_subtable(u64 address, u32 subtable_type) - { - struct fpdt_subtable_header *subtable_header; -@@ -151,6 +168,11 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type) - u32 length, offset; - int result; - -+ if (!fpdt_address_valid(address)) { -+ pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address); -+ return -EINVAL; -+ } -+ - subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header)); - if (!subtable_header) - return -ENOMEM; -diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c -index 30b1f511c2af0..f609f9d62efdd 100644 ---- a/drivers/acpi/acpi_lpss.c -+++ b/drivers/acpi/acpi_lpss.c -@@ -403,6 +403,9 @@ static int register_device_clock(struct acpi_device *adev, - if (!lpss_clk_dev) - lpt_register_clock_device(); - -+ if (IS_ERR(lpss_clk_dev)) -+ return PTR_ERR(lpss_clk_dev); -+ - clk_data = platform_get_drvdata(lpss_clk_dev); - if (!clk_data) - return -ENODEV; -diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c -index 42ede059728ce..2b18b51f6351e 100644 ---- a/drivers/acpi/acpi_video.c -+++ b/drivers/acpi/acpi_video.c -@@ -73,6 +73,7 @@ module_param(device_id_scheme, bool, 0444); - static int only_lcd = -1; - module_param(only_lcd, int, 0444); - -+static bool may_report_brightness_keys; - static int register_count; - static DEFINE_MUTEX(register_count_mutex); - static DEFINE_MUTEX(video_list_lock); -@@ -495,6 +496,22 @@ static const struct dmi_system_id video_dmi_table[] = { - DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"), - }, - }, -+ { -+ .callback = video_disable_backlight_sysfs_if, -+ .ident = "Toshiba Satellite Z830", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Z830"), -+ }, -+ }, -+ { -+ .callback = video_disable_backlight_sysfs_if, -+ .ident = "Toshiba Portege Z830", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE Z830"), -+ }, -+ }, - /* - * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set - * but the IDs actually follow the Device ID Scheme. -@@ -1222,6 +1239,9 @@ acpi_video_bus_get_one_device(struct acpi_device *device, - acpi_video_device_bind(video, data); - acpi_video_device_find_cap(data); - -+ if (data->cap._BCM && data->cap._BCL) -+ may_report_brightness_keys = true; -+ - mutex_lock(&video->device_list_lock); - list_add_tail(&data->entry, &video->video_device_list); - mutex_unlock(&video->device_list_lock); -@@ -1689,6 +1709,9 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data) - break; - } - -+ if (keycode) -+ may_report_brightness_keys = true; -+ - acpi_notifier_call_chain(device, event, 0); - - if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) { -@@ -2251,6 +2274,7 @@ void acpi_video_unregister(void) - if (register_count) { - acpi_bus_unregister_driver(&acpi_video_bus); - register_count = 0; -+ may_report_brightness_keys = false; - } - mutex_unlock(®ister_count_mutex); - } -@@ -2272,13 +2296,7 @@ void acpi_video_unregister_backlight(void) - - bool acpi_video_handles_brightness_key_presses(void) - { -- bool have_video_busses; -- -- mutex_lock(&video_list_lock); -- have_video_busses = !list_empty(&video_bus_head); -- mutex_unlock(&video_list_lock); -- -- return have_video_busses && -+ return may_report_brightness_keys && - (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS); - } - EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses); -diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile -index 59700433a96e5..f919811156b1f 100644 ---- a/drivers/acpi/acpica/Makefile -+++ b/drivers/acpi/acpica/Makefile -@@ -3,7 +3,7 @@ - # Makefile for ACPICA Core interpreter - # - --ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA -+ccflags-y := -D_LINUX -DBUILDING_ACPICA - ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT - - # use acpi.o to put all files here into acpi.o modparam namespace -diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h -index d41b810e367c4..4366d36ef1198 100644 ---- a/drivers/acpi/acpica/acglobal.h -+++ b/drivers/acpi/acpica/acglobal.h -@@ -226,6 +226,8 @@ extern struct acpi_bit_register_info - acpi_gbl_bit_register_info[ACPI_NUM_BITREG]; - ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a); - ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b); -+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a_s0); -+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b_s0); - - /***************************************************************************** - * -diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h -index 810de0b4c1256..9c3ad33e926a6 100644 ---- a/drivers/acpi/acpica/achware.h -+++ b/drivers/acpi/acpica/achware.h -@@ -101,8 +101,6 @@ acpi_status - acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, - acpi_event_status *event_status); - --acpi_status acpi_hw_disable_all_gpes(void); -- - acpi_status acpi_hw_enable_all_runtime_gpes(void); - - acpi_status acpi_hw_enable_all_wakeup_gpes(void); -diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c -index 3615e1a6efd8a..b91155ea9c343 100644 ---- a/drivers/acpi/acpica/dbnames.c -+++ b/drivers/acpi/acpica/dbnames.c -@@ -652,6 +652,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg) - object_info = - ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info)); - -+ if (!object_info) -+ return (AE_NO_MEMORY); -+ - /* Walk the namespace from the root */ - - (void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, -diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c -index 8e011e59b9b48..ee1832ba39a24 100644 ---- a/drivers/acpi/acpica/dsmethod.c -+++ b/drivers/acpi/acpica/dsmethod.c -@@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, - info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); - if (!info) { - status = AE_NO_MEMORY; -- goto cleanup; -+ goto pop_walk_state; - } - - info->parameters = &this_walk_state->operands[0]; -@@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, - - ACPI_FREE(info); - if (ACPI_FAILURE(status)) { -- goto cleanup; -+ goto pop_walk_state; - } - - next_walk_state->method_nesting_depth = -@@ -575,6 +575,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, - - return_ACPI_STATUS(status); - -+pop_walk_state: -+ -+ /* On error, pop the walk state to be deleted from thread */ -+ -+ acpi_ds_pop_walk_state(thread); -+ - cleanup: - - /* On error, we must terminate the method properly */ -diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c -index fbe2ba05c82a6..1c862940cc5b2 100644 ---- a/drivers/acpi/acpica/dswstate.c -+++ b/drivers/acpi/acpica/dswstate.c -@@ -576,9 +576,14 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, - ACPI_FUNCTION_TRACE(ds_init_aml_walk); - - walk_state->parser_state.aml = -- walk_state->parser_state.aml_start = aml_start; -- walk_state->parser_state.aml_end = -- walk_state->parser_state.pkg_end = aml_start + aml_length; -+ walk_state->parser_state.aml_start = -+ walk_state->parser_state.aml_end = -+ walk_state->parser_state.pkg_end = aml_start; -+ /* Avoid undefined behavior: applying zero offset to null pointer */ -+ if (aml_length != 0) { -+ walk_state->parser_state.aml_end += aml_length; -+ walk_state->parser_state.pkg_end += aml_length; -+ } - - /* The next_op of the next_walk will be the beginning of the method */ - -diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c -index 06f3c9df1e22d..8618500f23b39 100644 ---- a/drivers/acpi/acpica/exfield.c -+++ b/drivers/acpi/acpica/exfield.c -@@ -330,12 +330,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, - obj_desc->field.base_byte_offset, - source_desc->buffer.pointer, data_length); - -- if ((obj_desc->field.region_obj->region.address == -- PCC_MASTER_SUBSPACE -- && MASTER_SUBSPACE_COMMAND(obj_desc->field. -- base_byte_offset)) -- || GENERIC_SUBSPACE_COMMAND(obj_desc->field. -- base_byte_offset)) { -+ if (MASTER_SUBSPACE_COMMAND(obj_desc->field.base_byte_offset)) { - - /* Perform the write */ - -diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c -index b639e930d6429..44b7c350ed5ca 100644 ---- a/drivers/acpi/acpica/exoparg1.c -+++ b/drivers/acpi/acpica/exoparg1.c -@@ -1007,7 +1007,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) - (walk_state, return_desc, - &temp_desc); - if (ACPI_FAILURE(status)) { -- goto cleanup; -+ return_ACPI_STATUS -+ (status); - } - - return_desc = temp_desc; -diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c -index 803402aefaeb6..7ee2939c08cd4 100644 ---- a/drivers/acpi/acpica/hwesleep.c -+++ b/drivers/acpi/acpica/hwesleep.c -@@ -104,7 +104,9 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state) - - /* Flush caches, as per ACPI specification */ - -- ACPI_FLUSH_CPU_CACHE(); -+ if (sleep_state < ACPI_STATE_S4) { -+ ACPI_FLUSH_CPU_CACHE(); -+ } - - status = acpi_os_enter_sleep(sleep_state, sleep_control, 0); - if (status == AE_CTRL_TERMINATE) { -@@ -147,17 +149,13 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state) - - acpi_status acpi_hw_extended_wake_prep(u8 sleep_state) - { -- acpi_status status; - u8 sleep_type_value; - - ACPI_FUNCTION_TRACE(hw_extended_wake_prep); - -- status = acpi_get_sleep_type_data(ACPI_STATE_S0, -- &acpi_gbl_sleep_type_a, -- &acpi_gbl_sleep_type_b); -- if (ACPI_SUCCESS(status)) { -+ if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) { - sleep_type_value = -- ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & -+ ((acpi_gbl_sleep_type_a_s0 << ACPI_X_SLEEP_TYPE_POSITION) & - ACPI_X_SLEEP_TYPE_MASK); - - (void)acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE), -diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c -index 14baa13bf8482..5efa3d8e483e0 100644 ---- a/drivers/acpi/acpica/hwsleep.c -+++ b/drivers/acpi/acpica/hwsleep.c -@@ -110,7 +110,9 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) - - /* Flush caches, as per ACPI specification */ - -- ACPI_FLUSH_CPU_CACHE(); -+ if (sleep_state < ACPI_STATE_S4) { -+ ACPI_FLUSH_CPU_CACHE(); -+ } - - status = acpi_os_enter_sleep(sleep_state, pm1a_control, pm1b_control); - if (status == AE_CTRL_TERMINATE) { -@@ -179,7 +181,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) - - acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) - { -- acpi_status status; -+ acpi_status status = AE_OK; - struct acpi_bit_register_info *sleep_type_reg_info; - struct acpi_bit_register_info *sleep_enable_reg_info; - u32 pm1a_control; -@@ -192,10 +194,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) - * This is unclear from the ACPI Spec, but it is required - * by some machines. - */ -- status = acpi_get_sleep_type_data(ACPI_STATE_S0, -- &acpi_gbl_sleep_type_a, -- &acpi_gbl_sleep_type_b); -- if (ACPI_SUCCESS(status)) { -+ if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) { - sleep_type_reg_info = - acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE); - sleep_enable_reg_info = -@@ -216,9 +215,9 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) - - /* Insert the SLP_TYP bits */ - -- pm1a_control |= (acpi_gbl_sleep_type_a << -+ pm1a_control |= (acpi_gbl_sleep_type_a_s0 << - sleep_type_reg_info->bit_position); -- pm1b_control |= (acpi_gbl_sleep_type_b << -+ pm1b_control |= (acpi_gbl_sleep_type_b_s0 << - sleep_type_reg_info->bit_position); - - /* Write the control registers and ignore any errors */ -diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c -index e15badf4077aa..c6716f90e013a 100644 ---- a/drivers/acpi/acpica/hwvalid.c -+++ b/drivers/acpi/acpica/hwvalid.c -@@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width); - * - * The table is used to implement the Microsoft port access rules that - * first appeared in Windows XP. Some ports are always illegal, and some -- * ports are only illegal if the BIOS calls _OSI with a win_XP string or -- * later (meaning that the BIOS itelf is post-XP.) -+ * ports are only illegal if the BIOS calls _OSI with nothing newer than -+ * the specific _OSI strings. - * - * This provides ACPICA with the desired port protections and - * Microsoft compatibility. -@@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) - - /* Port illegality may depend on the _OSI calls made by the BIOS */ - -- if (acpi_gbl_osi_data >= port_info->osi_dependency) { -+ if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL || -+ acpi_gbl_osi_data == port_info->osi_dependency) { - ACPI_DEBUG_PRINT((ACPI_DB_VALUES, - "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n", - ACPI_FORMAT_UINT64(address), -diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c -index 89b12afed564e..ba77598ee43e8 100644 ---- a/drivers/acpi/acpica/hwxfsleep.c -+++ b/drivers/acpi/acpica/hwxfsleep.c -@@ -162,8 +162,6 @@ acpi_status acpi_enter_sleep_state_s4bios(void) - return_ACPI_STATUS(status); - } - -- ACPI_FLUSH_CPU_CACHE(); -- - status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, - (u32)acpi_gbl_FADT.s4_bios_request, 8); - if (ACPI_FAILURE(status)) { -@@ -217,6 +215,13 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state) - return_ACPI_STATUS(status); - } - -+ status = acpi_get_sleep_type_data(ACPI_STATE_S0, -+ &acpi_gbl_sleep_type_a_s0, -+ &acpi_gbl_sleep_type_b_s0); -+ if (ACPI_FAILURE(status)) { -+ acpi_gbl_sleep_type_a_s0 = ACPI_SLEEP_TYPE_INVALID; -+ } -+ - /* Execute the _PTS method (Prepare To Sleep) */ - - arg_list.count = 1; -diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c -index 499067daa22c6..1b8677f2ced37 100644 ---- a/drivers/acpi/acpica/nsrepair.c -+++ b/drivers/acpi/acpica/nsrepair.c -@@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, - * Try to fix if there was no return object. Warning if failed to fix. - */ - if (!return_object) { -- if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) { -- if (package_index != ACPI_NOT_PACKAGE_ELEMENT) { -+ if (expected_btypes) { -+ if (!(expected_btypes & ACPI_RTYPE_NONE) && -+ package_index != ACPI_NOT_PACKAGE_ELEMENT) { - ACPI_WARN_PREDEFINED((AE_INFO, - info->full_pathname, - ACPI_WARN_ALWAYS, -@@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info, - if (ACPI_SUCCESS(status)) { - return (AE_OK); /* Repair was successful */ - } -- } else { -+ } -+ -+ if (expected_btypes != ACPI_RTYPE_NONE) { - ACPI_WARN_PREDEFINED((AE_INFO, - info->full_pathname, - ACPI_WARN_ALWAYS, - "Missing expected return value")); -+ return (AE_AML_NO_RETURN_VALUE); - } -- -- return (AE_AML_NO_RETURN_VALUE); - } - } - -diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c -index 915c2433463d7..e7c30ce06e189 100644 ---- a/drivers/acpi/acpica/nswalk.c -+++ b/drivers/acpi/acpica/nswalk.c -@@ -169,6 +169,9 @@ acpi_ns_walk_namespace(acpi_object_type type, - - if (start_node == ACPI_ROOT_OBJECT) { - start_node = acpi_gbl_root_node; -+ if (!start_node) { -+ return_ACPI_STATUS(AE_NO_NAMESPACE); -+ } - } - - /* Null child means "get first node" */ -diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c -index d9877153f4001..fdd503bb69c47 100644 ---- a/drivers/acpi/acpica/utcopy.c -+++ b/drivers/acpi/acpica/utcopy.c -@@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, - status = acpi_ut_walk_package_tree(source_obj, dest_obj, - acpi_ut_copy_ielement_to_ielement, - walk_state); -- if (ACPI_FAILURE(status)) { -- -- /* On failure, delete the destination package object */ -- -- acpi_ut_remove_reference(dest_obj); -- } -- - return_ACPI_STATUS(status); - } - -diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c -index e5ba9795ec696..8d7736d2d2699 100644 ---- a/drivers/acpi/acpica/utdelete.c -+++ b/drivers/acpi/acpica/utdelete.c -@@ -422,6 +422,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) - ACPI_WARNING((AE_INFO, - "Obj %p, Reference Count is already zero, cannot decrement\n", - object)); -+ return; - } - - ACPI_DEBUG_PRINT_RAW((ACPI_DB_ALLOCATIONS, -diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c -index 19e50fcbf4d6f..45973aa6e06d4 100644 ---- a/drivers/acpi/apei/bert.c -+++ b/drivers/acpi/apei/bert.c -@@ -30,14 +30,25 @@ - #undef pr_fmt - #define pr_fmt(fmt) "BERT: " fmt - -+#define ACPI_BERT_PRINT_MAX_RECORDS 5 -+#define ACPI_BERT_PRINT_MAX_LEN 1024 -+ - static int bert_disable; - -+/* -+ * Print "all" the error records in the BERT table, but avoid huge spam to -+ * the console if the BIOS included oversize records, or too many records. -+ * Skipping some records here does not lose anything because the full -+ * data is available to user tools in: -+ * /sys/firmware/acpi/tables/data/BERT -+ */ - static void __init bert_print_all(struct acpi_bert_region *region, - unsigned int region_len) - { - struct acpi_hest_generic_status *estatus = - (struct acpi_hest_generic_status *)region; - int remain = region_len; -+ int printed = 0, skipped = 0; - u32 estatus_len; - - while (remain >= sizeof(struct acpi_bert_region)) { -@@ -45,21 +56,26 @@ static void __init bert_print_all(struct acpi_bert_region *region, - if (remain < estatus_len) { - pr_err(FW_BUG "Truncated status block (length: %u).\n", - estatus_len); -- return; -+ break; - } - - /* No more error records. */ - if (!estatus->block_status) -- return; -+ break; - - if (cper_estatus_check(estatus)) { - pr_err(FW_BUG "Invalid error record.\n"); -- return; -+ break; - } - -- pr_info_once("Error records from previous boot:\n"); -- -- cper_estatus_print(KERN_INFO HW_ERR, estatus); -+ if (estatus_len < ACPI_BERT_PRINT_MAX_LEN && -+ printed < ACPI_BERT_PRINT_MAX_RECORDS) { -+ pr_info_once("Error records from previous boot:\n"); -+ cper_estatus_print(KERN_INFO HW_ERR, estatus); -+ printed++; -+ } else { -+ skipped++; -+ } - - /* - * Because the boot error source is "one-time polled" type, -@@ -71,13 +87,16 @@ static void __init bert_print_all(struct acpi_bert_region *region, - estatus = (void *)estatus + estatus_len; - remain -= estatus_len; - } -+ -+ if (skipped) -+ pr_info(HW_ERR "Skipped %d error records\n", skipped); - } - - static int __init setup_bert_disable(char *str) - { - bert_disable = 1; - -- return 0; -+ return 1; - } - __setup("bert_disable", setup_bert_disable); - -diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c -index 2882450c443ed..2e0ab898cce3b 100644 ---- a/drivers/acpi/apei/einj.c -+++ b/drivers/acpi/apei/einj.c -@@ -544,6 +544,8 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, - ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) - != REGION_INTERSECTS) && - (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY) -+ != REGION_INTERSECTS) && -+ (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED) - != REGION_INTERSECTS))) - return -EINVAL; - -diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c -index 242f3c2d55330..698d67cee0527 100644 ---- a/drivers/acpi/apei/erst.c -+++ b/drivers/acpi/apei/erst.c -@@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(erst_clear); - static int __init setup_erst_disable(char *str) - { - erst_disable = 1; -- return 0; -+ return 1; - } - - __setup("erst_disable", setup_erst_disable); -diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c -index 0c8330ed1ffd5..8678e162181f4 100644 ---- a/drivers/acpi/apei/ghes.c -+++ b/drivers/acpi/apei/ghes.c -@@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) - clear_fixmap(fixmap_idx); - } - --int ghes_estatus_pool_init(int num_ghes) -+int ghes_estatus_pool_init(unsigned int num_ghes) - { - unsigned long addr, len; - int rc; -@@ -985,7 +985,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) - ghes_estatus_cache_add(generic, estatus); - } - -- if (task_work_pending && current->mm != &init_mm) { -+ if (task_work_pending && current->mm) { - estatus_node->task_work.func = ghes_kick_task_work; - estatus_node->task_work_cpu = smp_processor_id(); - ret = task_work_add(current, &estatus_node->task_work, -@@ -1457,33 +1457,35 @@ static struct platform_driver ghes_platform_driver = { - .remove = ghes_remove, - }; - --static int __init ghes_init(void) -+void __init ghes_init(void) - { - int rc; - -+ sdei_init(); -+ - if (acpi_disabled) -- return -ENODEV; -+ return; - - switch (hest_disable) { - case HEST_NOT_FOUND: -- return -ENODEV; -+ return; - case HEST_DISABLED: - pr_info(GHES_PFX "HEST is not enabled!\n"); -- return -EINVAL; -+ return; - default: - break; - } - - if (ghes_disable) { - pr_info(GHES_PFX "GHES is not enabled!\n"); -- return -EINVAL; -+ return; - } - - ghes_nmi_init_cxt(); - - rc = platform_driver_register(&ghes_platform_driver); - if (rc) -- goto err; -+ return; - - rc = apei_osc_setup(); - if (rc == 0 && osc_sb_apei_support_acked) -@@ -1494,9 +1496,4 @@ static int __init ghes_init(void) - pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); - else - pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); -- -- return 0; --err: -- return rc; - } --device_initcall(ghes_init); -diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c -index 277f00b288d14..317bba602ad54 100644 ---- a/drivers/acpi/apei/hest.c -+++ b/drivers/acpi/apei/hest.c -@@ -223,7 +223,7 @@ err: - static int __init setup_hest_disable(char *str) - { - hest_disable = HEST_DISABLED; -- return 0; -+ return 1; - } - - __setup("hest_disable", setup_hest_disable); -diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c -index 3b23fb775ac45..f2f8f05662deb 100644 ---- a/drivers/acpi/arm64/iort.c -+++ b/drivers/acpi/arm64/iort.c -@@ -1361,9 +1361,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, - res[0].start = pmcg->page0_base_address; - res[0].end = pmcg->page0_base_address + SZ_4K - 1; - res[0].flags = IORESOURCE_MEM; -- res[1].start = pmcg->page1_base_address; -- res[1].end = pmcg->page1_base_address + SZ_4K - 1; -- res[1].flags = IORESOURCE_MEM; -+ /* -+ * The initial version in DEN0049C lacked a way to describe register -+ * page 1, which makes it broken for most PMCG implementations; in -+ * that case, just let the driver fail gracefully if it expects to -+ * find a second memory resource. -+ */ -+ if (node->revision > 0) { -+ res[1].start = pmcg->page1_base_address; -+ res[1].end = pmcg->page1_base_address + SZ_4K - 1; -+ res[1].flags = IORESOURCE_MEM; -+ } - - if (pmcg->overflow_gsiv) - acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", -diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c -index dae91f906cea9..c7569151fd02a 100644 ---- a/drivers/acpi/battery.c -+++ b/drivers/acpi/battery.c -@@ -53,12 +53,17 @@ static int battery_bix_broken_package; - static int battery_notification_delay_ms; - static int battery_ac_is_broken; - static int battery_check_pmic = 1; -+static int battery_quirk_notcharging; - static unsigned int cache_time = 1000; - module_param(cache_time, uint, 0644); - MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); - - static const struct acpi_device_id battery_device_ids[] = { - {"PNP0C0A", 0}, -+ -+ /* Microsoft Surface Go 3 */ -+ {"MSHW0146", 0}, -+ - {"", 0}, - }; - -@@ -169,7 +174,7 @@ static int acpi_battery_is_charged(struct acpi_battery *battery) - return 1; - - /* fallback to using design values for broken batteries */ -- if (battery->design_capacity == battery->capacity_now) -+ if (battery->design_capacity <= battery->capacity_now) - return 1; - - /* we don't do any sort of metric based on percentages */ -@@ -217,6 +222,8 @@ static int acpi_battery_get_property(struct power_supply *psy, - val->intval = POWER_SUPPLY_STATUS_CHARGING; - else if (acpi_battery_is_charged(battery)) - val->intval = POWER_SUPPLY_STATUS_FULL; -+ else if (battery_quirk_notcharging) -+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; - else - val->intval = POWER_SUPPLY_STATUS_UNKNOWN; - break; -@@ -442,7 +449,7 @@ static int extract_package(struct acpi_battery *battery, - - if (element->type == ACPI_TYPE_STRING || - element->type == ACPI_TYPE_BUFFER) -- strncpy(ptr, element->string.pointer, 32); -+ strscpy(ptr, element->string.pointer, 32); - else if (element->type == ACPI_TYPE_INTEGER) { - strncpy(ptr, (u8 *)&element->integer.value, - sizeof(u64)); -@@ -1111,6 +1118,12 @@ battery_do_not_check_pmic_quirk(const struct dmi_system_id *d) - return 0; - } - -+static int __init battery_quirk_not_charging(const struct dmi_system_id *d) -+{ -+ battery_quirk_notcharging = 1; -+ return 0; -+} -+ - static const struct dmi_system_id bat_dmi_table[] __initconst = { - { - /* NEC LZ750/LS */ -@@ -1155,6 +1168,27 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), - }, - }, -+ { -+ /* -+ * On Lenovo ThinkPads the BIOS specification defines -+ * a state when the bits for charging and discharging -+ * are both set to 0. That state is "Not Charging". -+ */ -+ .callback = battery_quirk_not_charging, -+ .ident = "Lenovo ThinkPad", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"), -+ }, -+ }, -+ { -+ /* Microsoft Surface Go 3 */ -+ .callback = battery_notification_delay_quirk, -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), -+ }, -+ }, - {}, - }; - -diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c -index fa923a9292244..7774b603a7962 100644 ---- a/drivers/acpi/bus.c -+++ b/drivers/acpi/bus.c -@@ -98,8 +98,8 @@ int acpi_bus_get_status(struct acpi_device *device) - acpi_status status; - unsigned long long sta; - -- if (acpi_device_always_present(device)) { -- acpi_set_device_status(device, ACPI_STA_DEFAULT); -+ if (acpi_device_override_status(device, &sta)) { -+ acpi_set_device_status(device, sta); - return 0; - } - -@@ -332,21 +332,32 @@ static void acpi_bus_osc_negotiate_platform_control(void) - if (ACPI_FAILURE(acpi_run_osc(handle, &context))) - return; - -- kfree(context.ret.pointer); -+ capbuf_ret = context.ret.pointer; -+ if (context.ret.length <= OSC_SUPPORT_DWORD) { -+ kfree(context.ret.pointer); -+ return; -+ } - -- /* Now run _OSC again with query flag clear */ -+ /* -+ * Now run _OSC again with query flag clear and with the caps -+ * supported by both the OS and the platform. -+ */ - capbuf[OSC_QUERY_DWORD] = 0; -+ capbuf[OSC_SUPPORT_DWORD] = capbuf_ret[OSC_SUPPORT_DWORD]; -+ kfree(context.ret.pointer); - - if (ACPI_FAILURE(acpi_run_osc(handle, &context))) - return; - - capbuf_ret = context.ret.pointer; -- osc_sb_apei_support_acked = -- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; -- osc_pc_lpi_support_confirmed = -- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT; -- osc_sb_native_usb4_support_confirmed = -- capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT; -+ if (context.ret.length > OSC_SUPPORT_DWORD) { -+ osc_sb_apei_support_acked = -+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT; -+ osc_pc_lpi_support_confirmed = -+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT; -+ osc_sb_native_usb4_support_confirmed = -+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT; -+ } - - kfree(context.ret.pointer); - } -@@ -1329,6 +1340,9 @@ static int __init acpi_init(void) - - pci_mmcfg_late_init(); - acpi_iort_init(); -+ acpi_viot_early_init(); -+ acpi_hest_init(); -+ ghes_init(); - acpi_scan_init(); - acpi_ec_init(); - acpi_debugfs_init(); -diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c -index bd482108310cf..7cc9183c8dc8e 100644 ---- a/drivers/acpi/cppc_acpi.c -+++ b/drivers/acpi/cppc_acpi.c -@@ -100,6 +100,16 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); - (cpc)->cpc_entry.reg.space_id == \ - ACPI_ADR_SPACE_PLATFORM_COMM) - -+/* Check if a CPC register is in SystemMemory */ -+#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ -+ (cpc)->cpc_entry.reg.space_id == \ -+ ACPI_ADR_SPACE_SYSTEM_MEMORY) -+ -+/* Check if a CPC register is in SystemIo */ -+#define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ -+ (cpc)->cpc_entry.reg.space_id == \ -+ ACPI_ADR_SPACE_SYSTEM_IO) -+ - /* Evaluates to True if reg is a NULL register descriptor */ - #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ - (reg)->address == 0 && \ -@@ -411,7 +421,7 @@ bool acpi_cpc_valid(void) - struct cpc_desc *cpc_ptr; - int cpu; - -- for_each_possible_cpu(cpu) { -+ for_each_present_cpu(cpu) { - cpc_ptr = per_cpu(cpc_desc_ptr, cpu); - if (!cpc_ptr) - return false; -@@ -587,33 +597,6 @@ static int pcc_data_alloc(int pcc_ss_id) - return 0; - } - --/* Check if CPPC revision + num_ent combination is supported */ --static bool is_cppc_supported(int revision, int num_ent) --{ -- int expected_num_ent; -- -- switch (revision) { -- case CPPC_V2_REV: -- expected_num_ent = CPPC_V2_NUM_ENT; -- break; -- case CPPC_V3_REV: -- expected_num_ent = CPPC_V3_NUM_ENT; -- break; -- default: -- pr_debug("Firmware exports unsupported CPPC revision: %d\n", -- revision); -- return false; -- } -- -- if (expected_num_ent != num_ent) { -- pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n", -- num_ent, expected_num_ent, revision); -- return false; -- } -- -- return true; --} -- - /* - * An example CPC table looks like the following. - * -@@ -703,12 +686,16 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) - cpc_obj = &out_obj->package.elements[0]; - if (cpc_obj->type == ACPI_TYPE_INTEGER) { - num_ent = cpc_obj->integer.value; -+ if (num_ent <= 1) { -+ pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", -+ num_ent, pr->id); -+ goto out_free; -+ } - } else { - pr_debug("Unexpected entry type(%d) for NumEntries\n", - cpc_obj->type); - goto out_free; - } -- cpc_ptr->num_entries = num_ent; - - /* Second entry should be revision. */ - cpc_obj = &out_obj->package.elements[1]; -@@ -719,10 +706,32 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) - cpc_obj->type); - goto out_free; - } -- cpc_ptr->version = cpc_rev; - -- if (!is_cppc_supported(cpc_rev, num_ent)) -+ if (cpc_rev < CPPC_V2_REV) { -+ pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev, -+ pr->id); -+ goto out_free; -+ } -+ -+ /* -+ * Disregard _CPC if the number of entries in the return pachage is not -+ * as expected, but support future revisions being proper supersets of -+ * the v3 and only causing more entries to be returned by _CPC. -+ */ -+ if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) || -+ (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) || -+ (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) { -+ pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n", -+ num_ent, pr->id); - goto out_free; -+ } -+ if (cpc_rev > CPPC_V3_REV) { -+ num_ent = CPPC_V3_NUM_ENT; -+ cpc_rev = CPPC_V3_REV; -+ } -+ -+ cpc_ptr->num_entries = num_ent; -+ cpc_ptr->version = cpc_rev; - - /* Iterate through remaining entries in _CPC */ - for (i = 2; i < num_ent; i++) { -@@ -1011,7 +1020,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) - static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf) - { - struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); -- struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx]; -+ struct cpc_register_resource *reg; -+ -+ if (!cpc_desc) { -+ pr_debug("No CPC descriptor for CPU:%d\n", cpunum); -+ return -ENODEV; -+ } -+ -+ reg = &cpc_desc->cpc_regs[reg_idx]; - - if (CPC_IN_PCC(reg)) { - int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); -@@ -1366,6 +1382,9 @@ EXPORT_SYMBOL_GPL(cppc_set_perf); - * transition latency for performance change requests. The closest we have - * is the timing information from the PCCT tables which provides the info - * on the number and frequency of PCC commands the platform can handle. -+ * -+ * If desired_reg is in the SystemMemory or SystemIo ACPI address space, -+ * then assume there is no latency. - */ - unsigned int cppc_get_transition_latency(int cpu_num) - { -@@ -1391,7 +1410,9 @@ unsigned int cppc_get_transition_latency(int cpu_num) - return CPUFREQ_ETERNAL; - - desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; -- if (!CPC_IN_PCC(desired_reg)) -+ if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg)) -+ return 0; -+ else if (!CPC_IN_PCC(desired_reg)) - return CPUFREQ_ETERNAL; - - if (pcc_ss_id < 0) -diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c -index e629e891d1bb3..472418a0e0cab 100644 ---- a/drivers/acpi/ec.c -+++ b/drivers/acpi/ec.c -@@ -166,6 +166,7 @@ struct acpi_ec_query { - struct transaction transaction; - struct work_struct work; - struct acpi_ec_query_handler *handler; -+ struct acpi_ec *ec; - }; - - static int acpi_ec_query(struct acpi_ec *ec, u8 *data); -@@ -182,7 +183,6 @@ static struct workqueue_struct *ec_wq; - static struct workqueue_struct *ec_query_wq; - - static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ --static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ - static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */ - static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ - -@@ -452,6 +452,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec) - ec_dbg_evt("Command(%s) submitted/blocked", - acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); - ec->nr_pending_queries++; -+ ec->events_in_progress++; - queue_work(ec_wq, &ec->work); - } - } -@@ -518,7 +519,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) - #ifdef CONFIG_PM_SLEEP - static void __acpi_ec_flush_work(void) - { -- drain_workqueue(ec_wq); /* flush ec->work */ -+ flush_workqueue(ec_wq); /* flush ec->work */ - flush_workqueue(ec_query_wq); /* flush queries */ - } - -@@ -1100,10 +1101,11 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, - void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) - { - acpi_ec_remove_query_handlers(ec, false, query_bit); -+ flush_workqueue(ec_query_wq); - } - EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); - --static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) -+static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval) - { - struct acpi_ec_query *q; - struct transaction *t; -@@ -1111,11 +1113,13 @@ static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) - q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL); - if (!q) - return NULL; -+ - INIT_WORK(&q->work, acpi_ec_event_processor); - t = &q->transaction; - t->command = ACPI_EC_COMMAND_QUERY; - t->rdata = pval; - t->rlen = 1; -+ q->ec = ec; - return q; - } - -@@ -1132,13 +1136,21 @@ static void acpi_ec_event_processor(struct work_struct *work) - { - struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); - struct acpi_ec_query_handler *handler = q->handler; -+ struct acpi_ec *ec = q->ec; - - ec_dbg_evt("Query(0x%02x) started", handler->query_bit); -+ - if (handler->func) - handler->func(handler->data); - else if (handler->handle) - acpi_evaluate_object(handler->handle, NULL, NULL, NULL); -+ - ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); -+ -+ spin_lock_irq(&ec->lock); -+ ec->queries_in_progress--; -+ spin_unlock_irq(&ec->lock); -+ - acpi_ec_delete_query(q); - } - -@@ -1148,7 +1160,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) - int result; - struct acpi_ec_query *q; - -- q = acpi_ec_create_query(&value); -+ q = acpi_ec_create_query(ec, &value); - if (!q) - return -ENOMEM; - -@@ -1170,19 +1182,20 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) - } - - /* -- * It is reported that _Qxx are evaluated in a parallel way on -- * Windows: -+ * It is reported that _Qxx are evaluated in a parallel way on Windows: - * https://bugzilla.kernel.org/show_bug.cgi?id=94411 - * -- * Put this log entry before schedule_work() in order to make -- * it appearing before any other log entries occurred during the -- * work queue execution. -+ * Put this log entry before queue_work() to make it appear in the log -+ * before any other messages emitted during workqueue handling. - */ - ec_dbg_evt("Query(0x%02x) scheduled", value); -- if (!queue_work(ec_query_wq, &q->work)) { -- ec_dbg_evt("Query(0x%02x) overlapped", value); -- result = -EBUSY; -- } -+ -+ spin_lock_irq(&ec->lock); -+ -+ ec->queries_in_progress++; -+ queue_work(ec_query_wq, &q->work); -+ -+ spin_unlock_irq(&ec->lock); - - err_exit: - if (result) -@@ -1240,6 +1253,10 @@ static void acpi_ec_event_handler(struct work_struct *work) - ec_dbg_evt("Event stopped"); - - acpi_ec_check_event(ec); -+ -+ spin_lock_irqsave(&ec->lock, flags); -+ ec->events_in_progress--; -+ spin_unlock_irqrestore(&ec->lock, flags); - } - - static void acpi_ec_handle_interrupt(struct acpi_ec *ec) -@@ -1375,24 +1392,16 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) - if (ec->data_addr == 0 || ec->command_addr == 0) - return AE_OK; - -- if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) { -- /* -- * Always inherit the GPE number setting from the ECDT -- * EC. -- */ -- ec->gpe = boot_ec->gpe; -- } else { -- /* Get GPE bit assignment (EC events). */ -- /* TODO: Add support for _GPE returning a package */ -- status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); -- if (ACPI_SUCCESS(status)) -- ec->gpe = tmp; -+ /* Get GPE bit assignment (EC events). */ -+ /* TODO: Add support for _GPE returning a package */ -+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); -+ if (ACPI_SUCCESS(status)) -+ ec->gpe = tmp; -+ /* -+ * Errors are non-fatal, allowing for ACPI Reduced Hardware -+ * platforms which use GpioInt instead of GPE. -+ */ - -- /* -- * Errors are non-fatal, allowing for ACPI Reduced Hardware -- * platforms which use GpioInt instead of GPE. -- */ -- } - /* Use the global lock for all EC transactions? */ - tmp = 0; - acpi_evaluate_integer(handle, "_GLK", NULL, &tmp); -@@ -1830,60 +1839,12 @@ static int ec_honor_dsdt_gpe(const struct dmi_system_id *id) - return 0; - } - --/* -- * Some DSDTs contain wrong GPE setting. -- * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD -- * https://bugzilla.kernel.org/show_bug.cgi?id=195651 -- */ --static int ec_honor_ecdt_gpe(const struct dmi_system_id *id) --{ -- pr_debug("Detected system needing ignore DSDT GPE setting.\n"); -- EC_FLAGS_IGNORE_DSDT_GPE = 1; -- return 0; --} -- - static const struct dmi_system_id ec_dmi_table[] __initconst = { - { - ec_correct_ecdt, "MSI MS-171F", { - DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), - DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL}, - { -- ec_honor_ecdt_gpe, "ASUS FX502VD", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -- DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL}, -- { -- ec_honor_ecdt_gpe, "ASUS FX502VE", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -- DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL}, -- { -- ec_honor_ecdt_gpe, "ASUS GL702VMK", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -- DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL}, -- { -- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -- DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL}, -- { -- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -- DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL}, -- { -- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -- DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL}, -- { -- ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -- DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL}, -- { -- ec_honor_ecdt_gpe, "ASUS X550VXK", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -- DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL}, -- { -- ec_honor_ecdt_gpe, "ASUS X580VD", { -- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -- DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL}, -- { - /* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */ - ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), -@@ -2021,6 +1982,7 @@ void acpi_ec_set_gpe_wake_mask(u8 action) - - bool acpi_ec_dispatch_gpe(void) - { -+ bool work_in_progress; - u32 ret; - - if (!first_ec) -@@ -2041,8 +2003,19 @@ bool acpi_ec_dispatch_gpe(void) - if (ret == ACPI_INTERRUPT_HANDLED) - pm_pr_dbg("ACPI EC GPE dispatched\n"); - -- /* Flush the event and query workqueues. */ -- acpi_ec_flush_work(); -+ /* Drain EC work. */ -+ do { -+ acpi_ec_flush_work(); -+ -+ pm_pr_dbg("ACPI EC work flushed\n"); -+ -+ spin_lock_irq(&first_ec->lock); -+ -+ work_in_progress = first_ec->events_in_progress + -+ first_ec->queries_in_progress > 0; -+ -+ spin_unlock_irq(&first_ec->lock); -+ } while (work_in_progress && !pm_wakeup_pending()); - - return false; - } -@@ -2138,13 +2111,6 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = { - DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), - }, - }, -- { -- .ident = "ThinkPad X1 Carbon 6th", -- .matches = { -- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -- DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"), -- }, -- }, - { - .ident = "ThinkPad X1 Yoga 3rd", - .matches = { -diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h -index d91b560e88674..54b2be94d23dc 100644 ---- a/drivers/acpi/internal.h -+++ b/drivers/acpi/internal.h -@@ -183,6 +183,8 @@ struct acpi_ec { - struct work_struct work; - unsigned long timestamp; - unsigned long nr_pending_queries; -+ unsigned int events_in_progress; -+ unsigned int queries_in_progress; - bool busy_polling; - unsigned int polling_guard; - }; -diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c -index 7dd80acf92c78..2575d6c51f898 100644 ---- a/drivers/acpi/nfit/core.c -+++ b/drivers/acpi/nfit/core.c -@@ -3676,8 +3676,8 @@ void acpi_nfit_shutdown(void *data) - - mutex_lock(&acpi_desc->init_mutex); - set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); -- cancel_delayed_work_sync(&acpi_desc->dwork); - mutex_unlock(&acpi_desc->init_mutex); -+ cancel_delayed_work_sync(&acpi_desc->dwork); - - /* - * Bounce the nvdimm bus lock to make sure any in-flight -diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c -index c3d783aca196f..b42653707fdcd 100644 ---- a/drivers/acpi/numa/hmat.c -+++ b/drivers/acpi/numa/hmat.c -@@ -563,17 +563,26 @@ static int initiator_cmp(void *priv, const struct list_head *a, - { - struct memory_initiator *ia; - struct memory_initiator *ib; -- unsigned long *p_nodes = priv; - - ia = list_entry(a, struct memory_initiator, node); - ib = list_entry(b, struct memory_initiator, node); - -- set_bit(ia->processor_pxm, p_nodes); -- set_bit(ib->processor_pxm, p_nodes); -- - return ia->processor_pxm - ib->processor_pxm; - } - -+static int initiators_to_nodemask(unsigned long *p_nodes) -+{ -+ struct memory_initiator *initiator; -+ -+ if (list_empty(&initiators)) -+ return -ENXIO; -+ -+ list_for_each_entry(initiator, &initiators, node) -+ set_bit(initiator->processor_pxm, p_nodes); -+ -+ return 0; -+} -+ - static void hmat_register_target_initiators(struct memory_target *target) - { - static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); -@@ -610,7 +619,10 @@ static void hmat_register_target_initiators(struct memory_target *target) - * initiators. - */ - bitmap_zero(p_nodes, MAX_NUMNODES); -- list_sort(p_nodes, &initiators, initiator_cmp); -+ list_sort(NULL, &initiators, initiator_cmp); -+ if (initiators_to_nodemask(p_nodes) < 0) -+ return; -+ - if (!access0done) { - for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { - loc = localities_types[i]; -@@ -644,8 +656,9 @@ static void hmat_register_target_initiators(struct memory_target *target) - - /* Access 1 ignores Generic Initiators */ - bitmap_zero(p_nodes, MAX_NUMNODES); -- list_sort(p_nodes, &initiators, initiator_cmp); -- best = 0; -+ if (initiators_to_nodemask(p_nodes) < 0) -+ return; -+ - for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { - loc = localities_types[i]; - if (!loc) -diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c -index 53cab975f612c..63b98eae5e75e 100644 ---- a/drivers/acpi/pci_mcfg.c -+++ b/drivers/acpi/pci_mcfg.c -@@ -41,6 +41,8 @@ struct mcfg_fixup { - static struct mcfg_fixup mcfg_quirks[] = { - /* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */ - -+#ifdef CONFIG_ARM64 -+ - #define AL_ECAM(table_id, rev, seg, ops) \ - { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops } - -@@ -169,6 +171,7 @@ static struct mcfg_fixup mcfg_quirks[] = { - ALTRA_ECAM_QUIRK(1, 13), - ALTRA_ECAM_QUIRK(1, 14), - ALTRA_ECAM_QUIRK(1, 15), -+#endif /* ARM64 */ - }; - - static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; -diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c -index d7deedf3548e0..223aa010dd8da 100644 ---- a/drivers/acpi/pci_root.c -+++ b/drivers/acpi/pci_root.c -@@ -22,8 +22,6 @@ - #include - #include - #include --#include /* for acpi_hest_init() */ -- - #include "internal.h" - - #define ACPI_PCI_ROOT_CLASS "pci_bridge" -@@ -938,7 +936,6 @@ out_release_info: - - void __init acpi_pci_root_init(void) - { -- acpi_hest_init(); - if (acpi_pci_disabled) - return; - -diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c -index a371f273f99dd..9cde299eba880 100644 ---- a/drivers/acpi/pmic/intel_pmic.c -+++ b/drivers/acpi/pmic/intel_pmic.c -@@ -211,31 +211,36 @@ static acpi_status intel_pmic_regs_handler(u32 function, - void *handler_context, void *region_context) - { - struct intel_pmic_opregion *opregion = region_context; -- int result = 0; -+ int result = -EINVAL; -+ -+ if (function == ACPI_WRITE) { -+ switch (address) { -+ case 0: -+ return AE_OK; -+ case 1: -+ opregion->ctx.addr |= (*value64 & 0xff) << 8; -+ return AE_OK; -+ case 2: -+ opregion->ctx.addr |= *value64 & 0xff; -+ return AE_OK; -+ case 3: -+ opregion->ctx.val = *value64 & 0xff; -+ return AE_OK; -+ case 4: -+ if (*value64) { -+ result = regmap_write(opregion->regmap, opregion->ctx.addr, -+ opregion->ctx.val); -+ } else { -+ result = regmap_read(opregion->regmap, opregion->ctx.addr, -+ &opregion->ctx.val); -+ } -+ opregion->ctx.addr = 0; -+ } -+ } - -- switch (address) { -- case 0: -- return AE_OK; -- case 1: -- opregion->ctx.addr |= (*value64 & 0xff) << 8; -- return AE_OK; -- case 2: -- opregion->ctx.addr |= *value64 & 0xff; -+ if (function == ACPI_READ && address == 3) { -+ *value64 = opregion->ctx.val; - return AE_OK; -- case 3: -- opregion->ctx.val = *value64 & 0xff; -- return AE_OK; -- case 4: -- if (*value64) { -- result = regmap_write(opregion->regmap, opregion->ctx.addr, -- opregion->ctx.val); -- } else { -- result = regmap_read(opregion->regmap, opregion->ctx.addr, -- &opregion->ctx.val); -- if (result == 0) -- *value64 = opregion->ctx.val; -- } -- memset(&opregion->ctx, 0x00, sizeof(opregion->ctx)); - } - - if (result < 0) { -diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c -index f0ed4414edb1f..c95eedd58f5bf 100644 ---- a/drivers/acpi/power.c -+++ b/drivers/acpi/power.c -@@ -52,7 +52,6 @@ struct acpi_power_resource { - u32 order; - unsigned int ref_count; - u8 state; -- bool wakeup_enabled; - struct mutex resource_lock; - struct list_head dependents; - }; -@@ -615,20 +614,19 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p) - - list_for_each_entry(entry, list, node) { - struct acpi_power_resource *resource = entry->resource; -- int result; - u8 state; - - mutex_lock(&resource->resource_lock); - -- result = acpi_power_get_state(resource, &state); -- if (result) { -- mutex_unlock(&resource->resource_lock); -- return result; -- } -- if (state == ACPI_POWER_RESOURCE_STATE_ON) { -- resource->ref_count++; -- resource->wakeup_enabled = true; -- } -+ /* -+ * Make sure that the power resource state and its reference -+ * counter value are consistent with each other. -+ */ -+ if (!resource->ref_count && -+ !acpi_power_get_state(resource, &state) && -+ state == ACPI_POWER_RESOURCE_STATE_ON) -+ __acpi_power_off(resource); -+ - if (system_level > resource->system_level) - system_level = resource->system_level; - -@@ -711,7 +709,6 @@ int acpi_device_sleep_wake(struct acpi_device *dev, - */ - int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) - { -- struct acpi_power_resource_entry *entry; - int err = 0; - - if (!dev || !dev->wakeup.flags.valid) -@@ -722,26 +719,13 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) - if (dev->wakeup.prepare_count++) - goto out; - -- list_for_each_entry(entry, &dev->wakeup.resources, node) { -- struct acpi_power_resource *resource = entry->resource; -- -- mutex_lock(&resource->resource_lock); -- -- if (!resource->wakeup_enabled) { -- err = acpi_power_on_unlocked(resource); -- if (!err) -- resource->wakeup_enabled = true; -- } -- -- mutex_unlock(&resource->resource_lock); -- -- if (err) { -- dev_err(&dev->dev, -- "Cannot turn wakeup power resources on\n"); -- dev->wakeup.flags.valid = 0; -- goto out; -- } -+ err = acpi_power_on_list(&dev->wakeup.resources); -+ if (err) { -+ dev_err(&dev->dev, "Cannot turn on wakeup power resources\n"); -+ dev->wakeup.flags.valid = 0; -+ goto out; - } -+ - /* - * Passing 3 as the third argument below means the device may be - * put into arbitrary power state afterward. -@@ -771,39 +755,31 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev) - - mutex_lock(&acpi_device_lock); - -- if (--dev->wakeup.prepare_count > 0) -+ /* Do nothing if wakeup power has not been enabled for this device. */ -+ if (dev->wakeup.prepare_count <= 0) - goto out; - -- /* -- * Executing the code below even if prepare_count is already zero when -- * the function is called may be useful, for example for initialisation. -- */ -- if (dev->wakeup.prepare_count < 0) -- dev->wakeup.prepare_count = 0; -+ if (--dev->wakeup.prepare_count > 0) -+ goto out; - - err = acpi_device_sleep_wake(dev, 0, 0, 0); - if (err) - goto out; - -+ /* -+ * All of the power resources in the list need to be turned off even if -+ * there are errors. -+ */ - list_for_each_entry(entry, &dev->wakeup.resources, node) { -- struct acpi_power_resource *resource = entry->resource; -- -- mutex_lock(&resource->resource_lock); -- -- if (resource->wakeup_enabled) { -- err = acpi_power_off_unlocked(resource); -- if (!err) -- resource->wakeup_enabled = false; -- } -- -- mutex_unlock(&resource->resource_lock); -+ int ret; - -- if (err) { -- dev_err(&dev->dev, -- "Cannot turn wakeup power resources off\n"); -- dev->wakeup.flags.valid = 0; -- break; -- } -+ ret = acpi_power_off(entry->resource); -+ if (ret && !err) -+ err = ret; -+ } -+ if (err) { -+ dev_err(&dev->dev, "Cannot turn off wakeup power resources\n"); -+ dev->wakeup.flags.valid = 0; - } - - out: -diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c -index 89c22bc550570..09c0af8a46f0a 100644 ---- a/drivers/acpi/prmt.c -+++ b/drivers/acpi/prmt.c -@@ -219,6 +219,11 @@ static acpi_status acpi_platformrt_space_handler(u32 function, - efi_status_t status; - struct prm_context_buffer context; - -+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) { -+ pr_err_ratelimited("PRM: EFI runtime services no longer available\n"); -+ return AE_NO_HANDLER; -+ } -+ - /* - * The returned acpi_status will always be AE_OK. Error values will be - * saved in the first byte of the PRM message buffer to be used by ASL. -@@ -308,6 +313,11 @@ void __init init_prmt(void) - - pr_info("PRM: found %u modules\n", mc); - -+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) { -+ pr_err("PRM: EFI runtime services unavailable\n"); -+ return; -+ } -+ - status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, - ACPI_ADR_SPACE_PLATFORM_RT, - &acpi_platformrt_space_handler, -diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c -index f37fba9e5ba0b..e9116db1e3527 100644 ---- a/drivers/acpi/processor_idle.c -+++ b/drivers/acpi/processor_idle.c -@@ -531,10 +531,27 @@ static void wait_for_freeze(void) - /* No delay is needed if we are in guest */ - if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) - return; -+ /* -+ * Modern (>=Nehalem) Intel systems use ACPI via intel_idle, -+ * not this code. Assume that any Intel systems using this -+ * are ancient and may need the dummy wait. This also assumes -+ * that the motivating chipset issue was Intel-only. -+ */ -+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) -+ return; - #endif -- /* Dummy wait op - must do something useless after P_LVL2 read -- because chipsets cannot guarantee that STPCLK# signal -- gets asserted in time to freeze execution properly. */ -+ /* -+ * Dummy wait op - must do something useless after P_LVL2 read -+ * because chipsets cannot guarantee that STPCLK# signal gets -+ * asserted in time to freeze execution properly -+ * -+ * This workaround has been in place since the original ACPI -+ * implementation was merged, circa 2002. -+ * -+ * If a profile is pointing to this instruction, please first -+ * consider moving your system to a more modern idle -+ * mechanism. -+ */ - inl(acpi_gbl_FADT.xpm_timer_block.address); - } - -@@ -604,7 +621,7 @@ static DEFINE_RAW_SPINLOCK(c3_lock); - * @cx: Target state context - * @index: index of target state - */ --static int acpi_idle_enter_bm(struct cpuidle_driver *drv, -+static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv, - struct acpi_processor *pr, - struct acpi_processor_cx *cx, - int index) -@@ -661,7 +678,7 @@ static int acpi_idle_enter_bm(struct cpuidle_driver *drv, - return index; - } - --static int acpi_idle_enter(struct cpuidle_device *dev, -+static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) - { - struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); -@@ -690,7 +707,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev, - return index; - } - --static int acpi_idle_enter_s2idle(struct cpuidle_device *dev, -+static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) - { - struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); -@@ -789,9 +806,11 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr) - state->enter = acpi_idle_enter; - - state->flags = 0; -- if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) { -+ if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 || -+ cx->type == ACPI_STATE_C3) { - state->enter_dead = acpi_idle_play_dead; -- drv->safe_state_index = count; -+ if (cx->type != ACPI_STATE_C3) -+ drv->safe_state_index = count; - } - /* - * Halt-induced C1 is not good for ->enter_s2idle, because it -@@ -1075,6 +1094,11 @@ static int flatten_lpi_states(struct acpi_processor *pr, - return 0; - } - -+int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) -+{ -+ return -EOPNOTSUPP; -+} -+ - static int acpi_processor_get_lpi_info(struct acpi_processor *pr) - { - int ret, i; -@@ -1083,6 +1107,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) - struct acpi_device *d = NULL; - struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; - -+ /* make sure our architecture has support */ -+ ret = acpi_processor_ffh_lpi_probe(pr->id); -+ if (ret == -EOPNOTSUPP) -+ return ret; -+ - if (!osc_pc_lpi_support_confirmed) - return -EOPNOTSUPP; - -@@ -1134,11 +1163,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) - return 0; - } - --int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) --{ -- return -ENODEV; --} -- - int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) - { - return -ENODEV; -diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c -index 8c3f82c9fff35..18fb04523f93b 100644 ---- a/drivers/acpi/processor_pdc.c -+++ b/drivers/acpi/processor_pdc.c -@@ -14,6 +14,8 @@ - #include - #include - -+#include -+ - #include "internal.h" - - static bool __init processor_physically_present(acpi_handle handle) -@@ -47,6 +49,15 @@ static bool __init processor_physically_present(acpi_handle handle) - return false; - } - -+ if (xen_initial_domain()) -+ /* -+ * When running as a Xen dom0 the number of processors Linux -+ * sees can be different from the real number of processors on -+ * the system, and we still need to execute _PDC for all of -+ * them. -+ */ -+ return xen_processor_present(acpi_id); -+ - type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; - cpuid = acpi_get_cpuid(handle, type, acpi_id); - -diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c -index 757a98f6d7a24..1696700fd2fb5 100644 ---- a/drivers/acpi/processor_perflib.c -+++ b/drivers/acpi/processor_perflib.c -@@ -53,6 +53,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) - { - acpi_status status = 0; - unsigned long long ppc = 0; -+ s32 qos_value; -+ int index; - int ret; - - if (!pr) -@@ -72,17 +74,30 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) - } - } - -+ index = ppc; -+ -+ if (pr->performance_platform_limit == index || -+ ppc >= pr->performance->state_count) -+ return 0; -+ - pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, -- (int)ppc, ppc ? "" : "not"); -+ index, index ? "is" : "is not"); - -- pr->performance_platform_limit = (int)ppc; -+ pr->performance_platform_limit = index; - -- if (ppc >= pr->performance->state_count || -- unlikely(!freq_qos_request_active(&pr->perflib_req))) -+ if (unlikely(!freq_qos_request_active(&pr->perflib_req))) - return 0; - -- ret = freq_qos_update_request(&pr->perflib_req, -- pr->performance->states[ppc].core_frequency * 1000); -+ /* -+ * If _PPC returns 0, it means that all of the available states can be -+ * used ("no limit"). -+ */ -+ if (index == 0) -+ qos_value = FREQ_QOS_MAX_DEFAULT_VALUE; -+ else -+ qos_value = pr->performance->states[index].core_frequency * 1000; -+ -+ ret = freq_qos_update_request(&pr->perflib_req, qos_value); - if (ret < 0) { - pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n", - pr->id, ret); -@@ -165,9 +180,16 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) - if (!pr) - continue; - -+ /* -+ * Reset performance_platform_limit in case there is a stale -+ * value in it, so as to make it match the "no limit" QoS value -+ * below. -+ */ -+ pr->performance_platform_limit = 0; -+ - ret = freq_qos_add_request(&policy->constraints, -- &pr->perflib_req, -- FREQ_QOS_MAX, INT_MAX); -+ &pr->perflib_req, FREQ_QOS_MAX, -+ FREQ_QOS_MAX_DEFAULT_VALUE); - if (ret < 0) - pr_err("Failed to add freq constraint for CPU%d (%d)\n", - cpu, ret); -diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c -index a3d34e3f9f94b..921a0b5a58e58 100644 ---- a/drivers/acpi/processor_thermal.c -+++ b/drivers/acpi/processor_thermal.c -@@ -144,7 +144,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) - unsigned int cpu; - - for_each_cpu(cpu, policy->related_cpus) { -- struct acpi_processor *pr = per_cpu(processors, policy->cpu); -+ struct acpi_processor *pr = per_cpu(processors, cpu); - - if (pr) - freq_qos_remove_request(&pr->thermal_req); -diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c -index e312ebaed8db4..488915328646e 100644 ---- a/drivers/acpi/property.c -+++ b/drivers/acpi/property.c -@@ -155,10 +155,10 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope, - return acpi_nondev_subnode_data_ok(handle, link, list, parent); - } - --static int acpi_add_nondev_subnodes(acpi_handle scope, -- const union acpi_object *links, -- struct list_head *list, -- struct fwnode_handle *parent) -+static bool acpi_add_nondev_subnodes(acpi_handle scope, -+ const union acpi_object *links, -+ struct list_head *list, -+ struct fwnode_handle *parent) - { - bool ret = false; - int i; -@@ -433,6 +433,16 @@ void acpi_init_properties(struct acpi_device *adev) - acpi_extract_apple_properties(adev); - } - -+static void acpi_free_device_properties(struct list_head *list) -+{ -+ struct acpi_device_properties *props, *tmp; -+ -+ list_for_each_entry_safe(props, tmp, list, list) { -+ list_del(&props->list); -+ kfree(props); -+ } -+} -+ - static void acpi_destroy_nondev_subnodes(struct list_head *list) - { - struct acpi_data_node *dn, *next; -@@ -445,22 +455,18 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list) - wait_for_completion(&dn->kobj_done); - list_del(&dn->sibling); - ACPI_FREE((void *)dn->data.pointer); -+ acpi_free_device_properties(&dn->data.properties); - kfree(dn); - } - } - - void acpi_free_properties(struct acpi_device *adev) - { -- struct acpi_device_properties *props, *tmp; -- - acpi_destroy_nondev_subnodes(&adev->data.subnodes); - ACPI_FREE((void *)adev->data.pointer); - adev->data.of_compatible = NULL; - adev->data.pointer = NULL; -- list_for_each_entry_safe(props, tmp, &adev->data.properties, list) { -- list_del(&props->list); -- kfree(props); -- } -+ acpi_free_device_properties(&adev->data.properties); - } - - /** -@@ -685,7 +691,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, - */ - if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) { - if (index) -- return -EINVAL; -+ return -ENOENT; - - ret = acpi_bus_get_device(obj->reference.handle, &device); - if (ret) -@@ -1090,15 +1096,10 @@ struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode) - /* All data nodes have parent pointer so just return that */ - return to_acpi_data_node(fwnode)->parent; - } else if (is_acpi_device_node(fwnode)) { -- acpi_handle handle, parent_handle; -+ struct device *dev = to_acpi_device_node(fwnode)->dev.parent; - -- handle = to_acpi_device_node(fwnode)->handle; -- if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) { -- struct acpi_device *adev; -- -- if (!acpi_bus_get_device(parent_handle, &adev)) -- return acpi_fwnode_handle(adev); -- } -+ if (dev) -+ return acpi_fwnode_handle(to_acpi_device(dev)); - } - - return NULL; -diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c -index ee78a210c6068..b0c7ae50a8d79 100644 ---- a/drivers/acpi/resource.c -+++ b/drivers/acpi/resource.c -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - - #ifdef CONFIG_X86 - #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) -@@ -380,9 +381,157 @@ unsigned int acpi_dev_get_irq_type(int triggering, int polarity) - } - EXPORT_SYMBOL_GPL(acpi_dev_get_irq_type); - -+static const struct dmi_system_id medion_laptop[] = { -+ { -+ .ident = "MEDION P15651", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), -+ DMI_MATCH(DMI_BOARD_NAME, "M15T"), -+ }, -+ }, -+ { -+ .ident = "MEDION S17405", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), -+ DMI_MATCH(DMI_BOARD_NAME, "M17T"), -+ }, -+ }, -+ { -+ .ident = "MEDION S17413", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), -+ DMI_MATCH(DMI_BOARD_NAME, "M1xA"), -+ }, -+ }, -+ { } -+}; -+ -+static const struct dmi_system_id asus_laptop[] = { -+ { -+ .ident = "Asus Vivobook K3402ZA", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -+ DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"), -+ }, -+ }, -+ { -+ .ident = "Asus Vivobook K3502ZA", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -+ DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"), -+ }, -+ }, -+ { } -+}; -+ -+static const struct dmi_system_id lenovo_laptop[] = { -+ { -+ .ident = "LENOVO IdeaPad Flex 5 14ALC7", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "82R9"), -+ }, -+ }, -+ { -+ .ident = "LENOVO IdeaPad Flex 5 16ALC7", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "82RA"), -+ }, -+ }, -+ { } -+}; -+ -+static const struct dmi_system_id tongfang_gm_rg[] = { -+ { -+ .ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), -+ }, -+ }, -+ { } -+}; -+ -+static const struct dmi_system_id maingear_laptop[] = { -+ { -+ .ident = "MAINGEAR Vector Pro 2 15", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"), -+ } -+ }, -+ { -+ .ident = "MAINGEAR Vector Pro 2 17", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"), -+ }, -+ }, -+ { } -+}; -+ -+static const struct dmi_system_id lg_laptop[] = { -+ { -+ .ident = "LG Electronics 17U70P", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), -+ DMI_MATCH(DMI_BOARD_NAME, "17U70P"), -+ }, -+ }, -+ { } -+}; -+ -+struct irq_override_cmp { -+ const struct dmi_system_id *system; -+ unsigned char irq; -+ unsigned char triggering; -+ unsigned char polarity; -+ unsigned char shareable; -+ bool override; -+}; -+ -+static const struct irq_override_cmp override_table[] = { -+ { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, -+ { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, -+ { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, -+ { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, -+ { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, -+ { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, -+ { lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, -+}; -+ -+static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, -+ u8 shareable) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(override_table); i++) { -+ const struct irq_override_cmp *entry = &override_table[i]; -+ -+ if (dmi_check_system(entry->system) && -+ entry->irq == gsi && -+ entry->triggering == triggering && -+ entry->polarity == polarity && -+ entry->shareable == shareable) -+ return entry->override; -+ } -+ -+#ifdef CONFIG_X86 -+ /* -+ * IRQ override isn't needed on modern AMD Zen systems and -+ * this override breaks active low IRQs on AMD Ryzen 6000 and -+ * newer systems. Skip it. -+ */ -+ if (boot_cpu_has(X86_FEATURE_ZEN)) -+ return false; -+#endif -+ -+ return true; -+} -+ - static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, - u8 triggering, u8 polarity, u8 shareable, -- bool legacy) -+ bool check_override) - { - int irq, p, t; - -@@ -401,7 +550,9 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, - * using extended IRQ descriptors we take the IRQ configuration - * from _CRS directly. - */ -- if (legacy && !acpi_get_override_irq(gsi, &t, &p)) { -+ if (check_override && -+ acpi_dev_irq_override(gsi, triggering, polarity, shareable) && -+ !acpi_get_override_irq(gsi, &t, &p)) { - u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; - u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; - -@@ -656,6 +807,23 @@ int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list) - } - EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources); - -+/** -+ * acpi_dev_get_memory_resources - Get current memory resources of a device. -+ * @adev: ACPI device node to get the resources for. -+ * @list: Head of the resultant list of resources (must be empty). -+ * -+ * This is a helper function that locates all memory type resources of @adev -+ * with acpi_dev_get_resources(). -+ * -+ * The number of resources in the output list is returned on success, an error -+ * code reflecting the error condition is returned otherwise. -+ */ -+int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list) -+{ -+ return acpi_dev_get_resources(adev, list, is_memory, NULL); -+} -+EXPORT_SYMBOL_GPL(acpi_dev_get_memory_resources); -+ - /** - * acpi_dev_filter_resource_type - Filter ACPI resource according to resource - * types -diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c -index 5b54c80b9d32a..ae74720888dbf 100644 ---- a/drivers/acpi/scan.c -+++ b/drivers/acpi/scan.c -@@ -793,6 +793,7 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info, - static const char * const acpi_ignore_dep_ids[] = { - "PNP0D80", /* Windows-compatible System Power Management Controller */ - "INT33BD", /* Intel Baytrail Mailbox Device */ -+ "LATT2021", /* Lattice FW Update Client Driver */ - NULL - }; - -@@ -1690,6 +1691,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) - { - struct list_head resource_list; - bool is_serial_bus_slave = false; -+ static const struct acpi_device_id ignore_serial_bus_ids[] = { - /* - * These devices have multiple I2cSerialBus resources and an i2c-client - * must be instantiated for each, each with its own i2c_device_id. -@@ -1698,11 +1700,18 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) - * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows - * which i2c_device_id to use for each resource. - */ -- static const struct acpi_device_id i2c_multi_instantiate_ids[] = { - {"BSG1160", }, - {"BSG2150", }, - {"INT33FE", }, - {"INT3515", }, -+ /* -+ * HIDs of device with an UartSerialBusV2 resource for which userspace -+ * expects a regular tty cdev to be created (instead of the in kernel -+ * serdev) and which have a kernel driver which expects a platform_dev -+ * such as the rfkill-gpio driver. -+ */ -+ {"BCM4752", }, -+ {"LNV4752", }, - {} - }; - -@@ -1716,8 +1725,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) - fwnode_property_present(&device->fwnode, "baud"))) - return true; - -- /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */ -- if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids)) -+ if (!acpi_match_device_ids(device, ignore_serial_bus_ids)) - return false; - - INIT_LIST_HEAD(&resource_list); -diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c -index 3023224515abe..b277e25b276ce 100644 ---- a/drivers/acpi/sleep.c -+++ b/drivers/acpi/sleep.c -@@ -361,6 +361,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { - DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), - }, - }, -+ { -+ .callback = init_nvs_save_s3, -+ .ident = "Lenovo G40-45", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ DMI_MATCH(DMI_PRODUCT_NAME, "80E1"), -+ }, -+ }, - /* - * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using - * the Low Power S0 Idle firmware interface (see -@@ -374,6 +382,18 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { - DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"), - }, - }, -+ /* -+ * ASUS B1400CEAE hangs on resume from suspend (see -+ * https://bugzilla.kernel.org/show_bug.cgi?id=215742). -+ */ -+ { -+ .callback = init_default_s3, -+ .ident = "ASUS B1400CEAE", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"), -+ }, -+ }, - {}, - }; - -@@ -615,11 +635,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state) - } - - /* -- * Disable and clear GPE status before interrupt is enabled. Some GPEs -- * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. -- * acpi_leave_sleep_state will reenable specific GPEs later -+ * Disable all GPE and clear their status bits before interrupts are -+ * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can -+ * prevent them from producing spurious interrups. -+ * -+ * acpi_leave_sleep_state() will reenable specific GPEs later. -+ * -+ * Because this code runs on one CPU with disabled interrupts (all of -+ * the other CPUs are offline at this time), it need not acquire any -+ * sleeping locks which may trigger an implicit preemption point even -+ * if there is no contention, so avoid doing that by using a low-level -+ * library routine here. - */ -- acpi_disable_all_gpes(); -+ acpi_hw_disable_all_gpes(); - /* Allow EC transactions to happen. */ - acpi_ec_unblock_transactions(); - -@@ -767,6 +795,7 @@ bool acpi_s2idle_wake(void) - return true; - } - -+ pm_wakeup_clear(acpi_sci_irq); - rearm_wake_irq(acpi_sci_irq); - } - -diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c -index 00c0ebaab29f7..6e23b76aef5dc 100644 ---- a/drivers/acpi/sysfs.c -+++ b/drivers/acpi/sysfs.c -@@ -415,19 +415,30 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, - loff_t offset, size_t count) - { - struct acpi_data_attr *data_attr; -- void *base; -- ssize_t rc; -+ void __iomem *base; -+ ssize_t size; - - data_attr = container_of(bin_attr, struct acpi_data_attr, attr); -+ size = data_attr->attr.size; -+ -+ if (offset < 0) -+ return -EINVAL; -+ -+ if (offset >= size) -+ return 0; - -- base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); -+ if (count > size - offset) -+ count = size - offset; -+ -+ base = acpi_os_map_iomem(data_attr->addr, size); - if (!base) - return -ENOMEM; -- rc = memory_read_from_buffer(buf, count, &offset, base, -- data_attr->attr.size); -- acpi_os_unmap_memory(base, data_attr->attr.size); - -- return rc; -+ memcpy_fromio(buf, base + offset, count); -+ -+ acpi_os_unmap_iomem(base, size); -+ -+ return count; - } - - static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) -diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c -index 95105db642b98..809e12b941235 100644 ---- a/drivers/acpi/thermal.c -+++ b/drivers/acpi/thermal.c -@@ -59,10 +59,6 @@ static int tzp; - module_param(tzp, int, 0444); - MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds."); - --static int nocrt; --module_param(nocrt, int, 0); --MODULE_PARM_DESC(nocrt, "Set to take no action upon ACPI thermal zone critical trips points."); -- - static int off; - module_param(off, int, 0); - MODULE_PARM_DESC(off, "Set to disable ACPI thermal support."); -@@ -1098,8 +1094,6 @@ static int acpi_thermal_resume(struct device *dev) - return -EINVAL; - - for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { -- if (!(&tz->trips.active[i])) -- break; - if (!tz->trips.active[i].flags.valid) - break; - tz->trips.active[i].flags.enabled = 1; -@@ -1134,7 +1128,7 @@ static int thermal_nocrt(const struct dmi_system_id *d) { - - pr_notice("%s detected: disabling all critical thermal trip point actions.\n", - d->ident); -- nocrt = 1; -+ crt = -1; - return 0; - } - static int thermal_tzp(const struct dmi_system_id *d) { -diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c -index 33474fd969913..038542b3a80a7 100644 ---- a/drivers/acpi/video_detect.c -+++ b/drivers/acpi/video_detect.c -@@ -313,7 +313,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = { - .ident = "Lenovo Ideapad Z570", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -- DMI_MATCH(DMI_PRODUCT_NAME, "102434U"), -+ DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"), - }, - }, - { -@@ -409,7 +409,161 @@ static const struct dmi_system_id video_detect_dmi_table[] = { - DMI_MATCH(DMI_PRODUCT_NAME, "GA503"), - }, - }, -- -+ /* -+ * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a -+ * working native and video interface. However the default detection -+ * mechanism first registers the video interface before unregistering -+ * it again and switching to the native interface during boot. This -+ * results in a dangling SBIOS request for backlight change for some -+ * reason, causing the backlight to switch to ~2% once per boot on the -+ * first power cord connect or disconnect event. Setting the native -+ * interface explicitly circumvents this buggy behaviour, by avoiding -+ * the unregistering process. -+ */ -+ { -+ .callback = video_detect_force_native, -+ .ident = "Clevo NL5xRU", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "Clevo NL5xRU", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), -+ DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "Clevo NL5xRU", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), -+ DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "Clevo NL5xNU", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), -+ }, -+ }, -+ /* -+ * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10, -+ * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo -+ * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description -+ * above. -+ */ -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang PF5PU1G", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang PF4NU1F", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang PF4NU1F", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), -+ DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang PF5NU1G", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang PF5NU1G", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), -+ DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang PF5LUXG", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"), -+ }, -+ }, -+ /* -+ * More Tongfang devices with the same issue as the Clevo NL5xRU and -+ * NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above. -+ */ -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang GKxNRxx", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang GKxNRxx", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), -+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang GKxNRxx", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), -+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang GKxNRxx", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), -+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang GKxNRxx", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), -+ DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang GMxNGxx", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang GMxZGxx", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"), -+ }, -+ }, -+ { -+ .callback = video_detect_force_native, -+ .ident = "TongFang GMxRGxx", -+ .matches = { -+ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), -+ }, -+ }, - /* - * Desktops which falsely report a backlight and which our heuristics - * for this do not catch. -diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c -index d2256326c73ae..fe4b66dae01b5 100644 ---- a/drivers/acpi/viot.c -+++ b/drivers/acpi/viot.c -@@ -248,6 +248,26 @@ err_free: - return ret; - } - -+/** -+ * acpi_viot_early_init - Test the presence of VIOT and enable ACS -+ * -+ * If the VIOT does exist, ACS must be enabled. This cannot be -+ * done in acpi_viot_init() which is called after the bus scan -+ */ -+void __init acpi_viot_early_init(void) -+{ -+#ifdef CONFIG_PCI -+ acpi_status status; -+ struct acpi_table_header *hdr; -+ -+ status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr); -+ if (ACPI_FAILURE(status)) -+ return; -+ pci_request_acs(); -+ acpi_put_table(hdr); -+#endif -+} -+ - /** - * acpi_viot_init - Parse the VIOT table - * -@@ -309,6 +329,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data) - { - u32 epid; - struct viot_endpoint *ep; -+ struct device *aliased_dev = data; - u32 domain_nr = pci_domain_nr(pdev->bus); - - list_for_each_entry(ep, &viot_pci_ranges, list) { -@@ -319,13 +340,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data) - epid = ((domain_nr - ep->segment_start) << 16) + - dev_id - ep->bdf_start + ep->endpoint_id; - -- /* -- * If we found a PCI range managed by the viommu, we're -- * the one that has to request ACS. -- */ -- pci_request_acs(); -- -- return viot_dev_iommu_init(&pdev->dev, ep->viommu, -+ return viot_dev_iommu_init(aliased_dev, ep->viommu, - epid); - } - } -@@ -359,7 +374,7 @@ int viot_iommu_configure(struct device *dev) - { - if (dev_is_pci(dev)) - return pci_for_each_dma_alias(to_pci_dev(dev), -- viot_pci_dev_iommu_init, NULL); -+ viot_pci_dev_iommu_init, dev); - else if (dev_is_platform(dev)) - return viot_mmio_dev_iommu_init(to_platform_device(dev)); - return -ENODEV; -diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c -index 1c48358b43ba3..946e0160ad3bf 100644 ---- a/drivers/acpi/x86/s2idle.c -+++ b/drivers/acpi/x86/s2idle.c -@@ -86,6 +86,8 @@ struct lpi_device_constraint_amd { - int min_dstate; - }; - -+static LIST_HEAD(lps0_s2idle_devops_head); -+ - static struct lpi_constraints *lpi_constraints_table; - static int lpi_constraints_table_size; - static int rev_id; -@@ -119,17 +121,16 @@ static void lpi_device_get_constraints_amd(void) - acpi_handle_debug(lps0_device_handle, - "LPI: constraints list begin:\n"); - -- for (j = 0; j < package->package.count; ++j) { -+ for (j = 0; j < package->package.count; j++) { - union acpi_object *info_obj = &package->package.elements[j]; - struct lpi_device_constraint_amd dev_info = {}; - struct lpi_constraints *list; - acpi_status status; - -- for (k = 0; k < info_obj->package.count; ++k) { -- union acpi_object *obj = &info_obj->package.elements[k]; -+ list = &lpi_constraints_table[lpi_constraints_table_size]; - -- list = &lpi_constraints_table[lpi_constraints_table_size]; -- list->min_dstate = -1; -+ for (k = 0; k < info_obj->package.count; k++) { -+ union acpi_object *obj = &info_obj->package.elements[k]; - - switch (k) { - case 0: -@@ -145,27 +146,21 @@ static void lpi_device_get_constraints_amd(void) - dev_info.min_dstate = obj->integer.value; - break; - } -+ } - -- if (!dev_info.enabled || !dev_info.name || -- !dev_info.min_dstate) -- continue; -+ if (!dev_info.enabled || !dev_info.name || -+ !dev_info.min_dstate) -+ continue; - -- status = acpi_get_handle(NULL, dev_info.name, -- &list->handle); -- if (ACPI_FAILURE(status)) -- continue; -+ status = acpi_get_handle(NULL, dev_info.name, &list->handle); -+ if (ACPI_FAILURE(status)) -+ continue; - -- acpi_handle_debug(lps0_device_handle, -- "Name:%s\n", dev_info.name); -+ acpi_handle_debug(lps0_device_handle, -+ "Name:%s\n", dev_info.name); - -- list->min_dstate = dev_info.min_dstate; -+ list->min_dstate = dev_info.min_dstate; - -- if (list->min_dstate < 0) { -- acpi_handle_debug(lps0_device_handle, -- "Incomplete constraint defined\n"); -- continue; -- } -- } - lpi_constraints_table_size++; - } - } -@@ -210,7 +205,7 @@ static void lpi_device_get_constraints(void) - if (!package) - continue; - -- for (j = 0; j < package->package.count; ++j) { -+ for (j = 0; j < package->package.count; j++) { - union acpi_object *element = - &(package->package.elements[j]); - -@@ -242,7 +237,7 @@ static void lpi_device_get_constraints(void) - - constraint->min_dstate = -1; - -- for (j = 0; j < package_count; ++j) { -+ for (j = 0; j < package_count; j++) { - union acpi_object *info_obj = &info.package[j]; - union acpi_object *cnstr_pkg; - union acpi_object *obj; -@@ -378,16 +373,13 @@ static int lps0_device_attach(struct acpi_device *adev, - * AMDI0006: - * - should use rev_id 0x0 - * - function mask = 0x3: Should use Microsoft method -- * AMDI0007: -- * - Should use rev_id 0x2 -- * - Should only use AMD method - */ - const char *hid = acpi_device_hid(adev); -- rev_id = strcmp(hid, "AMDI0007") ? 0 : 2; -+ rev_id = 0; - lps0_dsm_func_mask = validate_dsm(adev->handle, - ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid); - lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle, -- ACPI_LPS0_DSM_UUID_MICROSOFT, 0, -+ ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id, - &lps0_dsm_guid_microsoft); - if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") || - !strcmp(hid, "AMD0005") || -@@ -395,9 +387,6 @@ static int lps0_device_attach(struct acpi_device *adev, - lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; - acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", - ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); -- } else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) { -- lps0_dsm_func_mask_microsoft = -EINVAL; -- acpi_handle_debug(adev->handle, "_DSM Using AMD method\n"); - } - } else { - rev_id = 1; -@@ -424,15 +413,11 @@ static int lps0_device_attach(struct acpi_device *adev, - mem_sleep_current = PM_SUSPEND_TO_IDLE; - - /* -- * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't -- * use intel-hid or intel-vbtn but require the EC GPE to be enabled while -- * suspended for certain wakeup devices to work, so mark it as wakeup-capable. -- * -- * Only enable on !AMD as enabling this universally causes problems for a number -- * of AMD based systems. -+ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the -+ * EC GPE to be enabled while suspended for certain wakeup devices to -+ * work, so mark it as wakeup-capable. - */ -- if (!acpi_s2idle_vendor_amd()) -- acpi_ec_mark_gpe_for_wake(); -+ acpi_ec_mark_gpe_for_wake(); - - return 0; - } -@@ -444,6 +429,8 @@ static struct acpi_scan_handler lps0_handler = { - - int acpi_s2idle_prepare_late(void) - { -+ struct acpi_s2idle_dev_ops *handler; -+ - if (!lps0_device_handle || sleep_no_lps0) - return 0; - -@@ -474,14 +461,26 @@ int acpi_s2idle_prepare_late(void) - acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, - lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); - } -+ -+ list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) { -+ if (handler->prepare) -+ handler->prepare(); -+ } -+ - return 0; - } - - void acpi_s2idle_restore_early(void) - { -+ struct acpi_s2idle_dev_ops *handler; -+ - if (!lps0_device_handle || sleep_no_lps0) - return; - -+ list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) -+ if (handler->restore) -+ handler->restore(); -+ - /* Modern standby exit */ - if (lps0_dsm_func_mask_microsoft > 0) - acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, -@@ -524,4 +523,28 @@ void acpi_s2idle_setup(void) - s2idle_set_ops(&acpi_s2idle_ops_lps0); - } - -+int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg) -+{ -+ if (!lps0_device_handle || sleep_no_lps0) -+ return -ENODEV; -+ -+ lock_system_sleep(); -+ list_add(&arg->list_node, &lps0_s2idle_devops_head); -+ unlock_system_sleep(); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(acpi_register_lps0_dev); -+ -+void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg) -+{ -+ if (!lps0_device_handle || sleep_no_lps0) -+ return; -+ -+ lock_system_sleep(); -+ list_del(&arg->list_node); -+ unlock_system_sleep(); -+} -+EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev); -+ - #endif /* CONFIG_SUSPEND */ -diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c -index f22f23933063b..f1dd086d0b87d 100644 ---- a/drivers/acpi/x86/utils.c -+++ b/drivers/acpi/x86/utils.c -@@ -22,58 +22,71 @@ - * Some BIOS-es (temporarily) hide specific APCI devices to work around Windows - * driver bugs. We use DMI matching to match known cases of this. - * -- * We work around this by always reporting ACPI_STA_DEFAULT for these -- * devices. Note this MUST only be done for devices where this is safe. -+ * Likewise sometimes some not-actually present devices are sometimes -+ * reported as present, which may cause issues. - * -- * This forcing of devices to be present is limited to specific CPU (SoC) -- * models both to avoid potentially causing trouble on other models and -- * because some HIDs are re-used on different SoCs for completely -- * different devices. -+ * We work around this by using the below quirk list to override the status -+ * reported by the _STA method with a fixed value (ACPI_STA_DEFAULT or 0). -+ * Note this MUST only be done for devices where this is safe. -+ * -+ * This status overriding is limited to specific CPU (SoC) models both to -+ * avoid potentially causing trouble on other models and because some HIDs -+ * are re-used on different SoCs for completely different devices. - */ --struct always_present_id { -+struct override_status_id { - struct acpi_device_id hid[2]; - struct x86_cpu_id cpu_ids[2]; - struct dmi_system_id dmi_ids[2]; /* Optional */ - const char *uid; -+ const char *path; -+ unsigned long long status; - }; - --#define X86_MATCH(model) X86_MATCH_INTEL_FAM6_MODEL(model, NULL) -- --#define ENTRY(hid, uid, cpu_models, dmi...) { \ -+#define ENTRY(status, hid, uid, path, cpu_model, dmi...) { \ - { { hid, }, {} }, \ -- { cpu_models, {} }, \ -+ { X86_MATCH_INTEL_FAM6_MODEL(cpu_model, NULL), {} }, \ - { { .matches = dmi }, {} }, \ - uid, \ -+ path, \ -+ status, \ - } - --static const struct always_present_id always_present_ids[] = { -+#define PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ -+ ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_model, dmi) -+ -+#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ -+ ENTRY(0, hid, uid, NULL, cpu_model, dmi) -+ -+#define PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ -+ ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_model, dmi) -+ -+#define NOT_PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ -+ ENTRY(0, "", NULL, path, cpu_model, dmi) -+ -+static const struct override_status_id override_status_ids[] = { - /* - * Bay / Cherry Trail PWM directly poked by GPU driver in win10, - * but Linux uses a separate PWM driver, harmless if not used. - */ -- ENTRY("80860F09", "1", X86_MATCH(ATOM_SILVERMONT), {}), -- ENTRY("80862288", "1", X86_MATCH(ATOM_AIRMONT), {}), -+ PRESENT_ENTRY_HID("80860F09", "1", ATOM_SILVERMONT, {}), -+ PRESENT_ENTRY_HID("80862288", "1", ATOM_AIRMONT, {}), - -- /* Lenovo Yoga Book uses PWM2 for keyboard backlight control */ -- ENTRY("80862289", "2", X86_MATCH(ATOM_AIRMONT), { -- DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), -- }), - /* - * The INT0002 device is necessary to clear wakeup interrupt sources - * on Cherry Trail devices, without it we get nobody cared IRQ msgs. - */ -- ENTRY("INT0002", "1", X86_MATCH(ATOM_AIRMONT), {}), -+ PRESENT_ENTRY_HID("INT0002", "1", ATOM_AIRMONT, {}), - /* - * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides - * the touchscreen ACPI device until a certain time - * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed - * *and* _STA has been called at least 3 times since. - */ -- ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), { -+ PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), - }), -- ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), { -+ PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"), - }), -@@ -81,54 +94,83 @@ static const struct always_present_id always_present_ids[] = { - /* - * The GPD win BIOS dated 20170221 has disabled the accelerometer, the - * drivers sometimes cause crashes under Windows and this is how the -- * manufacturer has solved this :| Note that the the DMI data is less -- * generic then it seems, a board_vendor of "AMI Corporation" is quite -- * rare and a board_name of "Default String" also is rare. -+ * manufacturer has solved this :| The DMI match may not seem unique, -+ * but it is. In the 67000+ DMI decode dumps from linux-hardware.org -+ * only 116 have board_vendor set to "AMI Corporation" and of those 116 -+ * only the GPD win and pocket entries' board_name is "Default string". - * - * Unfortunately the GPD pocket also uses these strings and its BIOS - * was copy-pasted from the GPD win, so it has a disabled KIOX000A - * node which we should not enable, thus we also check the BIOS date. - */ -- ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { -+ PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { - DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), - DMI_MATCH(DMI_BOARD_NAME, "Default string"), - DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), - DMI_MATCH(DMI_BIOS_DATE, "02/21/2017") - }), -- ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { -+ PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { - DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), - DMI_MATCH(DMI_BOARD_NAME, "Default string"), - DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), - DMI_MATCH(DMI_BIOS_DATE, "03/20/2017") - }), -- ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { -+ PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { - DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), - DMI_MATCH(DMI_BOARD_NAME, "Default string"), - DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), - DMI_MATCH(DMI_BIOS_DATE, "05/25/2017") - }), -+ -+ /* -+ * The GPD win/pocket have a PCI wifi card, but its DSDT has the SDIO -+ * mmc controller enabled and that has a child-device which _PS3 -+ * method sets a GPIO causing the PCI wifi card to turn off. -+ * See above remark about uniqueness of the DMI match. -+ */ -+ NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", ATOM_AIRMONT, { -+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), -+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), -+ DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), -+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), -+ }), - }; - --bool acpi_device_always_present(struct acpi_device *adev) -+bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status) - { - bool ret = false; - unsigned int i; - -- for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) { -- if (acpi_match_device_ids(adev, always_present_ids[i].hid)) -+ for (i = 0; i < ARRAY_SIZE(override_status_ids); i++) { -+ if (!x86_match_cpu(override_status_ids[i].cpu_ids)) - continue; - -- if (!adev->pnp.unique_id || -- strcmp(adev->pnp.unique_id, always_present_ids[i].uid)) -+ if (override_status_ids[i].dmi_ids[0].matches[0].slot && -+ !dmi_check_system(override_status_ids[i].dmi_ids)) - continue; - -- if (!x86_match_cpu(always_present_ids[i].cpu_ids)) -- continue; -+ if (override_status_ids[i].path) { -+ struct acpi_buffer path = { ACPI_ALLOCATE_BUFFER, NULL }; -+ bool match; - -- if (always_present_ids[i].dmi_ids[0].matches[0].slot && -- !dmi_check_system(always_present_ids[i].dmi_ids)) -- continue; -+ if (acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &path)) -+ continue; - -+ match = strcmp((char *)path.pointer, override_status_ids[i].path) == 0; -+ kfree(path.pointer); -+ -+ if (!match) -+ continue; -+ } else { -+ if (acpi_match_device_ids(adev, override_status_ids[i].hid)) -+ continue; -+ -+ if (!adev->pnp.unique_id || -+ strcmp(adev->pnp.unique_id, override_status_ids[i].uid)) -+ continue; -+ } -+ -+ *status = override_status_ids[i].status; - ret = true; - break; - } -@@ -149,10 +191,22 @@ bool acpi_device_always_present(struct acpi_device *adev) - * a hardcoded allowlist for D3 support, which was used for these platforms. - * - * This allows quirking on Linux in a similar fashion. -+ * -+ * Cezanne systems shouldn't *normally* need this as the BIOS includes -+ * StorageD3Enable. But for two reasons we have added it. -+ * 1) The BIOS on a number of Dell systems have ambiguity -+ * between the same value used for _ADR on ACPI nodes GPP1.DEV0 and GPP1.NVME. -+ * GPP1.NVME is needed to get StorageD3Enable node set properly. -+ * https://bugzilla.kernel.org/show_bug.cgi?id=216440 -+ * https://bugzilla.kernel.org/show_bug.cgi?id=216773 -+ * https://bugzilla.kernel.org/show_bug.cgi?id=217003 -+ * 2) On at least one HP system StorageD3Enable is missing on the second NVME -+ disk in the system. - */ - static const struct x86_cpu_id storage_d3_cpu_ids[] = { - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */ -+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */ - {} - }; - -diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c -index 962041148482c..1af5ff9231eb0 100644 ---- a/drivers/amba/bus.c -+++ b/drivers/amba/bus.c -@@ -366,6 +366,7 @@ static void amba_device_release(struct device *dev) - { - struct amba_device *d = to_amba_device(dev); - -+ of_node_put(d->dev.of_node); - if (d->res.parent) - release_resource(&d->res); - kfree(d); -@@ -377,9 +378,6 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent) - void __iomem *tmp; - int i, ret; - -- WARN_ON(dev->irq[0] == (unsigned int)-1); -- WARN_ON(dev->irq[1] == (unsigned int)-1); -- - ret = request_resource(parent, &dev->res); - if (ret) - goto err_out; -diff --git a/drivers/android/binder.c b/drivers/android/binder.c -index 9edacc8b97688..cbbed43baf056 100644 ---- a/drivers/android/binder.c -+++ b/drivers/android/binder.c -@@ -170,8 +170,32 @@ static inline void binder_stats_created(enum binder_stat_types type) - atomic_inc(&binder_stats.obj_created[type]); - } - --struct binder_transaction_log binder_transaction_log; --struct binder_transaction_log binder_transaction_log_failed; -+struct binder_transaction_log_entry { -+ int debug_id; -+ int debug_id_done; -+ int call_type; -+ int from_proc; -+ int from_thread; -+ int target_handle; -+ int to_proc; -+ int to_thread; -+ int to_node; -+ int data_size; -+ int offsets_size; -+ int return_error_line; -+ uint32_t return_error; -+ uint32_t return_error_param; -+ char context_name[BINDERFS_MAX_NAME + 1]; -+}; -+ -+struct binder_transaction_log { -+ atomic_t cur; -+ bool full; -+ struct binder_transaction_log_entry entry[32]; -+}; -+ -+static struct binder_transaction_log binder_transaction_log; -+static struct binder_transaction_log binder_transaction_log_failed; - - static struct binder_transaction_log_entry *binder_transaction_log_add( - struct binder_transaction_log *log) -@@ -1334,6 +1358,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, - } - ret = binder_inc_ref_olocked(ref, strong, target_list); - *rdata = ref->data; -+ if (ret && ref == new_ref) { -+ /* -+ * Cleanup the failed reference here as the target -+ * could now be dead and have already released its -+ * references by now. Calling on the new reference -+ * with strong=0 and a tmp_refs will not decrement -+ * the node. The new_ref gets kfree'd below. -+ */ -+ binder_cleanup_ref_olocked(new_ref); -+ ref = NULL; -+ } -+ - binder_proc_unlock(proc); - if (new_ref && ref != new_ref) - /* -@@ -1608,15 +1644,21 @@ static void binder_cleanup_transaction(struct binder_transaction *t, - /** - * binder_get_object() - gets object and checks for valid metadata - * @proc: binder_proc owning the buffer -+ * @u: sender's user pointer to base of buffer - * @buffer: binder_buffer that we're parsing. - * @offset: offset in the @buffer at which to validate an object. - * @object: struct binder_object to read into - * -- * Return: If there's a valid metadata object at @offset in @buffer, the -+ * Copy the binder object at the given offset into @object. If @u is -+ * provided then the copy is from the sender's buffer. If not, then -+ * it is copied from the target's @buffer. -+ * -+ * Return: If there's a valid metadata object at @offset, the - * size of that object. Otherwise, it returns zero. The object - * is read into the struct binder_object pointed to by @object. - */ - static size_t binder_get_object(struct binder_proc *proc, -+ const void __user *u, - struct binder_buffer *buffer, - unsigned long offset, - struct binder_object *object) -@@ -1626,10 +1668,16 @@ static size_t binder_get_object(struct binder_proc *proc, - size_t object_size = 0; - - read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); -- if (offset > buffer->data_size || read_size < sizeof(*hdr) || -- binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, -- offset, read_size)) -+ if (offset > buffer->data_size || read_size < sizeof(*hdr)) - return 0; -+ if (u) { -+ if (copy_from_user(object, u + offset, read_size)) -+ return 0; -+ } else { -+ if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, -+ offset, read_size)) -+ return 0; -+ } - - /* Ok, now see if we read a complete object. */ - hdr = &object->hdr; -@@ -1702,7 +1750,7 @@ static struct binder_buffer_object *binder_validate_ptr( - b, buffer_offset, - sizeof(object_offset))) - return NULL; -- object_size = binder_get_object(proc, b, object_offset, object); -+ object_size = binder_get_object(proc, NULL, b, object_offset, object); - if (!object_size || object->hdr.type != BINDER_TYPE_PTR) - return NULL; - if (object_offsetp) -@@ -1767,7 +1815,8 @@ static bool binder_validate_fixup(struct binder_proc *proc, - unsigned long buffer_offset; - struct binder_object last_object; - struct binder_buffer_object *last_bbo; -- size_t object_size = binder_get_object(proc, b, last_obj_offset, -+ size_t object_size = binder_get_object(proc, NULL, b, -+ last_obj_offset, - &last_object); - if (object_size != sizeof(*last_bbo)) - return false; -@@ -1854,24 +1903,23 @@ static void binder_deferred_fd_close(int fd) - static void binder_transaction_buffer_release(struct binder_proc *proc, - struct binder_thread *thread, - struct binder_buffer *buffer, -- binder_size_t failed_at, -+ binder_size_t off_end_offset, - bool is_failure) - { - int debug_id = buffer->debug_id; -- binder_size_t off_start_offset, buffer_offset, off_end_offset; -+ binder_size_t off_start_offset, buffer_offset; - - binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %llx\n", - proc->pid, buffer->debug_id, - buffer->data_size, buffer->offsets_size, -- (unsigned long long)failed_at); -+ (unsigned long long)off_end_offset); - - if (buffer->target_node) - binder_dec_node(buffer->target_node, 1, 0); - - off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); -- off_end_offset = is_failure ? failed_at : -- off_start_offset + buffer->offsets_size; -+ - for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; - buffer_offset += sizeof(binder_size_t)) { - struct binder_object_header *hdr; -@@ -1882,7 +1930,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, - if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, - buffer, buffer_offset, - sizeof(object_offset))) -- object_size = binder_get_object(proc, buffer, -+ object_size = binder_get_object(proc, NULL, buffer, - object_offset, &object); - if (object_size == 0) { - pr_err("transaction release %d bad object at offset %lld, size %zd\n", -@@ -1956,9 +2004,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, - binder_size_t fd_buf_size; - binder_size_t num_valid; - -- if (proc->tsk != current->group_leader) { -+ if (is_failure) { - /* -- * Nothing to do if running in sender context - * The fd fixups have not been applied so no - * fds need to be closed. - */ -@@ -2032,6 +2079,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, - } - } - -+/* Clean up all the objects in the buffer */ -+static inline void binder_release_entire_buffer(struct binder_proc *proc, -+ struct binder_thread *thread, -+ struct binder_buffer *buffer, -+ bool is_failure) -+{ -+ binder_size_t off_end_offset; -+ -+ off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); -+ off_end_offset += buffer->offsets_size; -+ -+ binder_transaction_buffer_release(proc, thread, buffer, -+ off_end_offset, is_failure); -+} -+ - static int binder_translate_binder(struct flat_binder_object *fp, - struct binder_transaction *t, - struct binder_thread *thread) -@@ -2056,7 +2118,7 @@ static int binder_translate_binder(struct flat_binder_object *fp, - ret = -EINVAL; - goto done; - } -- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { -+ if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { - ret = -EPERM; - goto done; - } -@@ -2102,7 +2164,7 @@ static int binder_translate_handle(struct flat_binder_object *fp, - proc->pid, thread->pid, fp->handle); - return -EINVAL; - } -- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { -+ if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { - ret = -EPERM; - goto done; - } -@@ -2190,7 +2252,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset, - ret = -EBADF; - goto err_fget; - } -- ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); -+ ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); - if (ret < 0) { - ret = -EPERM; - goto err_security; -@@ -2221,16 +2283,266 @@ err_fd_not_accepted: - return ret; - } - --static int binder_translate_fd_array(struct binder_fd_array_object *fda, -+/** -+ * struct binder_ptr_fixup - data to be fixed-up in target buffer -+ * @offset offset in target buffer to fixup -+ * @skip_size bytes to skip in copy (fixup will be written later) -+ * @fixup_data data to write at fixup offset -+ * @node list node -+ * -+ * This is used for the pointer fixup list (pf) which is created and consumed -+ * during binder_transaction() and is only accessed locally. No -+ * locking is necessary. -+ * -+ * The list is ordered by @offset. -+ */ -+struct binder_ptr_fixup { -+ binder_size_t offset; -+ size_t skip_size; -+ binder_uintptr_t fixup_data; -+ struct list_head node; -+}; -+ -+/** -+ * struct binder_sg_copy - scatter-gather data to be copied -+ * @offset offset in target buffer -+ * @sender_uaddr user address in source buffer -+ * @length bytes to copy -+ * @node list node -+ * -+ * This is used for the sg copy list (sgc) which is created and consumed -+ * during binder_transaction() and is only accessed locally. No -+ * locking is necessary. -+ * -+ * The list is ordered by @offset. -+ */ -+struct binder_sg_copy { -+ binder_size_t offset; -+ const void __user *sender_uaddr; -+ size_t length; -+ struct list_head node; -+}; -+ -+/** -+ * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data -+ * @alloc: binder_alloc associated with @buffer -+ * @buffer: binder buffer in target process -+ * @sgc_head: list_head of scatter-gather copy list -+ * @pf_head: list_head of pointer fixup list -+ * -+ * Processes all elements of @sgc_head, applying fixups from @pf_head -+ * and copying the scatter-gather data from the source process' user -+ * buffer to the target's buffer. It is expected that the list creation -+ * and processing all occurs during binder_transaction() so these lists -+ * are only accessed in local context. -+ * -+ * Return: 0=success, else -errno -+ */ -+static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, -+ struct binder_buffer *buffer, -+ struct list_head *sgc_head, -+ struct list_head *pf_head) -+{ -+ int ret = 0; -+ struct binder_sg_copy *sgc, *tmpsgc; -+ struct binder_ptr_fixup *tmppf; -+ struct binder_ptr_fixup *pf = -+ list_first_entry_or_null(pf_head, struct binder_ptr_fixup, -+ node); -+ -+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { -+ size_t bytes_copied = 0; -+ -+ while (bytes_copied < sgc->length) { -+ size_t copy_size; -+ size_t bytes_left = sgc->length - bytes_copied; -+ size_t offset = sgc->offset + bytes_copied; -+ -+ /* -+ * We copy up to the fixup (pointed to by pf) -+ */ -+ copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) -+ : bytes_left; -+ if (!ret && copy_size) -+ ret = binder_alloc_copy_user_to_buffer( -+ alloc, buffer, -+ offset, -+ sgc->sender_uaddr + bytes_copied, -+ copy_size); -+ bytes_copied += copy_size; -+ if (copy_size != bytes_left) { -+ BUG_ON(!pf); -+ /* we stopped at a fixup offset */ -+ if (pf->skip_size) { -+ /* -+ * we are just skipping. This is for -+ * BINDER_TYPE_FDA where the translated -+ * fds will be fixed up when we get -+ * to target context. -+ */ -+ bytes_copied += pf->skip_size; -+ } else { -+ /* apply the fixup indicated by pf */ -+ if (!ret) -+ ret = binder_alloc_copy_to_buffer( -+ alloc, buffer, -+ pf->offset, -+ &pf->fixup_data, -+ sizeof(pf->fixup_data)); -+ bytes_copied += sizeof(pf->fixup_data); -+ } -+ list_del(&pf->node); -+ kfree(pf); -+ pf = list_first_entry_or_null(pf_head, -+ struct binder_ptr_fixup, node); -+ } -+ } -+ list_del(&sgc->node); -+ kfree(sgc); -+ } -+ list_for_each_entry_safe(pf, tmppf, pf_head, node) { -+ BUG_ON(pf->skip_size == 0); -+ list_del(&pf->node); -+ kfree(pf); -+ } -+ BUG_ON(!list_empty(sgc_head)); -+ -+ return ret > 0 ? -EINVAL : ret; -+} -+ -+/** -+ * binder_cleanup_deferred_txn_lists() - free specified lists -+ * @sgc_head: list_head of scatter-gather copy list -+ * @pf_head: list_head of pointer fixup list -+ * -+ * Called to clean up @sgc_head and @pf_head if there is an -+ * error. -+ */ -+static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, -+ struct list_head *pf_head) -+{ -+ struct binder_sg_copy *sgc, *tmpsgc; -+ struct binder_ptr_fixup *pf, *tmppf; -+ -+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { -+ list_del(&sgc->node); -+ kfree(sgc); -+ } -+ list_for_each_entry_safe(pf, tmppf, pf_head, node) { -+ list_del(&pf->node); -+ kfree(pf); -+ } -+} -+ -+/** -+ * binder_defer_copy() - queue a scatter-gather buffer for copy -+ * @sgc_head: list_head of scatter-gather copy list -+ * @offset: binder buffer offset in target process -+ * @sender_uaddr: user address in source process -+ * @length: bytes to copy -+ * -+ * Specify a scatter-gather block to be copied. The actual copy must -+ * be deferred until all the needed fixups are identified and queued. -+ * Then the copy and fixups are done together so un-translated values -+ * from the source are never visible in the target buffer. -+ * -+ * We are guaranteed that repeated calls to this function will have -+ * monotonically increasing @offset values so the list will naturally -+ * be ordered. -+ * -+ * Return: 0=success, else -errno -+ */ -+static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, -+ const void __user *sender_uaddr, size_t length) -+{ -+ struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); -+ -+ if (!bc) -+ return -ENOMEM; -+ -+ bc->offset = offset; -+ bc->sender_uaddr = sender_uaddr; -+ bc->length = length; -+ INIT_LIST_HEAD(&bc->node); -+ -+ /* -+ * We are guaranteed that the deferred copies are in-order -+ * so just add to the tail. -+ */ -+ list_add_tail(&bc->node, sgc_head); -+ -+ return 0; -+} -+ -+/** -+ * binder_add_fixup() - queue a fixup to be applied to sg copy -+ * @pf_head: list_head of binder ptr fixup list -+ * @offset: binder buffer offset in target process -+ * @fixup: bytes to be copied for fixup -+ * @skip_size: bytes to skip when copying (fixup will be applied later) -+ * -+ * Add the specified fixup to a list ordered by @offset. When copying -+ * the scatter-gather buffers, the fixup will be copied instead of -+ * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup -+ * will be applied later (in target process context), so we just skip -+ * the bytes specified by @skip_size. If @skip_size is 0, we copy the -+ * value in @fixup. -+ * -+ * This function is called *mostly* in @offset order, but there are -+ * exceptions. Since out-of-order inserts are relatively uncommon, -+ * we insert the new element by searching backward from the tail of -+ * the list. -+ * -+ * Return: 0=success, else -errno -+ */ -+static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, -+ binder_uintptr_t fixup, size_t skip_size) -+{ -+ struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); -+ struct binder_ptr_fixup *tmppf; -+ -+ if (!pf) -+ return -ENOMEM; -+ -+ pf->offset = offset; -+ pf->fixup_data = fixup; -+ pf->skip_size = skip_size; -+ INIT_LIST_HEAD(&pf->node); -+ -+ /* Fixups are *mostly* added in-order, but there are some -+ * exceptions. Look backwards through list for insertion point. -+ */ -+ list_for_each_entry_reverse(tmppf, pf_head, node) { -+ if (tmppf->offset < pf->offset) { -+ list_add(&pf->node, &tmppf->node); -+ return 0; -+ } -+ } -+ /* -+ * if we get here, then the new offset is the lowest so -+ * insert at the head -+ */ -+ list_add(&pf->node, pf_head); -+ return 0; -+} -+ -+static int binder_translate_fd_array(struct list_head *pf_head, -+ struct binder_fd_array_object *fda, -+ const void __user *sender_ubuffer, - struct binder_buffer_object *parent, -+ struct binder_buffer_object *sender_uparent, - struct binder_transaction *t, - struct binder_thread *thread, - struct binder_transaction *in_reply_to) - { - binder_size_t fdi, fd_buf_size; - binder_size_t fda_offset; -+ const void __user *sender_ufda_base; - struct binder_proc *proc = thread->proc; -- struct binder_proc *target_proc = t->to_proc; -+ int ret; -+ -+ if (fda->num_fds == 0) -+ return 0; - - fd_buf_size = sizeof(u32) * fda->num_fds; - if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { -@@ -2254,29 +2566,36 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, - */ - fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + - fda->parent_offset; -- if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { -+ sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + -+ fda->parent_offset; -+ -+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || -+ !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { - binder_user_error("%d:%d parent offset not aligned correctly.\n", - proc->pid, thread->pid); - return -EINVAL; - } -+ ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); -+ if (ret) -+ return ret; -+ - for (fdi = 0; fdi < fda->num_fds; fdi++) { - u32 fd; -- int ret; - binder_size_t offset = fda_offset + fdi * sizeof(fd); -+ binder_size_t sender_uoffset = fdi * sizeof(fd); - -- ret = binder_alloc_copy_from_buffer(&target_proc->alloc, -- &fd, t->buffer, -- offset, sizeof(fd)); -+ ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); - if (!ret) - ret = binder_translate_fd(fd, offset, t, thread, - in_reply_to); -- if (ret < 0) -- return ret; -+ if (ret) -+ return ret > 0 ? -EINVAL : ret; - } - return 0; - } - --static int binder_fixup_parent(struct binder_transaction *t, -+static int binder_fixup_parent(struct list_head *pf_head, -+ struct binder_transaction *t, - struct binder_thread *thread, - struct binder_buffer_object *bp, - binder_size_t off_start_offset, -@@ -2322,14 +2641,7 @@ static int binder_fixup_parent(struct binder_transaction *t, - } - buffer_offset = bp->parent_offset + - (uintptr_t)parent->buffer - (uintptr_t)b->user_data; -- if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, -- &bp->buffer, sizeof(bp->buffer))) { -- binder_user_error("%d:%d got transaction with invalid parent offset\n", -- proc->pid, thread->pid); -- return -EINVAL; -- } -- -- return 0; -+ return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); - } - - /** -@@ -2456,6 +2768,7 @@ static void binder_transaction(struct binder_proc *proc, - binder_size_t off_start_offset, off_end_offset; - binder_size_t off_min; - binder_size_t sg_buf_offset, sg_buf_end_offset; -+ binder_size_t user_offset = 0; - struct binder_proc *target_proc = NULL; - struct binder_thread *target_thread = NULL; - struct binder_node *target_node = NULL; -@@ -2470,6 +2783,12 @@ static void binder_transaction(struct binder_proc *proc, - int t_debug_id = atomic_inc_return(&binder_last_id); - char *secctx = NULL; - u32 secctx_sz = 0; -+ struct list_head sgc_head; -+ struct list_head pf_head; -+ const void __user *user_buffer = (const void __user *) -+ (uintptr_t)tr->data.ptr.buffer; -+ INIT_LIST_HEAD(&sgc_head); -+ INIT_LIST_HEAD(&pf_head); - - e = binder_transaction_log_add(&binder_transaction_log); - e->debug_id = t_debug_id; -@@ -2595,8 +2914,8 @@ static void binder_transaction(struct binder_proc *proc, - return_error_line = __LINE__; - goto err_invalid_target_handle; - } -- if (security_binder_transaction(proc->tsk, -- target_proc->tsk) < 0) { -+ if (security_binder_transaction(proc->cred, -+ target_proc->cred) < 0) { - return_error = BR_FAILED_REPLY; - return_error_param = -EPERM; - return_error_line = __LINE__; -@@ -2722,16 +3041,7 @@ static void binder_transaction(struct binder_proc *proc, - u32 secid; - size_t added_size; - -- /* -- * Arguably this should be the task's subjective LSM secid but -- * we can't reliably access the subjective creds of a task -- * other than our own so we must use the objective creds, which -- * are safe to access. The downside is that if a task is -- * temporarily overriding it's creds it will not be reflected -- * here; however, it isn't clear that binder would handle that -- * case well anyway. -- */ -- security_task_getsecid_obj(proc->tsk, &secid); -+ security_cred_getsecid(proc->cred, &secid); - ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); - if (ret) { - return_error = BR_FAILED_REPLY; -@@ -2790,19 +3100,6 @@ static void binder_transaction(struct binder_proc *proc, - t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); - trace_binder_transaction_alloc_buf(t->buffer); - -- if (binder_alloc_copy_user_to_buffer( -- &target_proc->alloc, -- t->buffer, 0, -- (const void __user *) -- (uintptr_t)tr->data.ptr.buffer, -- tr->data_size)) { -- binder_user_error("%d:%d got transaction with invalid data ptr\n", -- proc->pid, thread->pid); -- return_error = BR_FAILED_REPLY; -- return_error_param = -EFAULT; -- return_error_line = __LINE__; -- goto err_copy_data_failed; -- } - if (binder_alloc_copy_user_to_buffer( - &target_proc->alloc, - t->buffer, -@@ -2847,6 +3144,7 @@ static void binder_transaction(struct binder_proc *proc, - size_t object_size; - struct binder_object object; - binder_size_t object_offset; -+ binder_size_t copy_size; - - if (binder_alloc_copy_from_buffer(&target_proc->alloc, - &object_offset, -@@ -2858,8 +3156,27 @@ static void binder_transaction(struct binder_proc *proc, - return_error_line = __LINE__; - goto err_bad_offset; - } -- object_size = binder_get_object(target_proc, t->buffer, -- object_offset, &object); -+ -+ /* -+ * Copy the source user buffer up to the next object -+ * that will be processed. -+ */ -+ copy_size = object_offset - user_offset; -+ if (copy_size && (user_offset > object_offset || -+ binder_alloc_copy_user_to_buffer( -+ &target_proc->alloc, -+ t->buffer, user_offset, -+ user_buffer + user_offset, -+ copy_size))) { -+ binder_user_error("%d:%d got transaction with invalid data ptr\n", -+ proc->pid, thread->pid); -+ return_error = BR_FAILED_REPLY; -+ return_error_param = -EFAULT; -+ return_error_line = __LINE__; -+ goto err_copy_data_failed; -+ } -+ object_size = binder_get_object(target_proc, user_buffer, -+ t->buffer, object_offset, &object); - if (object_size == 0 || object_offset < off_min) { - binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", - proc->pid, thread->pid, -@@ -2871,6 +3188,11 @@ static void binder_transaction(struct binder_proc *proc, - return_error_line = __LINE__; - goto err_bad_offset; - } -+ /* -+ * Set offset to the next buffer fragment to be -+ * copied -+ */ -+ user_offset = object_offset + object_size; - - hdr = &object.hdr; - off_min = object_offset + object_size; -@@ -2933,6 +3255,8 @@ static void binder_transaction(struct binder_proc *proc, - case BINDER_TYPE_FDA: { - struct binder_object ptr_object; - binder_size_t parent_offset; -+ struct binder_object user_object; -+ size_t user_parent_size; - struct binder_fd_array_object *fda = - to_binder_fd_array_object(hdr); - size_t num_valid = (buffer_offset - off_start_offset) / -@@ -2964,11 +3288,35 @@ static void binder_transaction(struct binder_proc *proc, - return_error_line = __LINE__; - goto err_bad_parent; - } -- ret = binder_translate_fd_array(fda, parent, t, thread, -- in_reply_to); -- if (ret < 0) { -+ /* -+ * We need to read the user version of the parent -+ * object to get the original user offset -+ */ -+ user_parent_size = -+ binder_get_object(proc, user_buffer, t->buffer, -+ parent_offset, &user_object); -+ if (user_parent_size != sizeof(user_object.bbo)) { -+ binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", -+ proc->pid, thread->pid, -+ user_parent_size, -+ sizeof(user_object.bbo)); - return_error = BR_FAILED_REPLY; -- return_error_param = ret; -+ return_error_param = -EINVAL; -+ return_error_line = __LINE__; -+ goto err_bad_parent; -+ } -+ ret = binder_translate_fd_array(&pf_head, fda, -+ user_buffer, parent, -+ &user_object.bbo, t, -+ thread, in_reply_to); -+ if (!ret) -+ ret = binder_alloc_copy_to_buffer(&target_proc->alloc, -+ t->buffer, -+ object_offset, -+ fda, sizeof(*fda)); -+ if (ret) { -+ return_error = BR_FAILED_REPLY; -+ return_error_param = ret > 0 ? -EINVAL : ret; - return_error_line = __LINE__; - goto err_translate_failed; - } -@@ -2990,19 +3338,14 @@ static void binder_transaction(struct binder_proc *proc, - return_error_line = __LINE__; - goto err_bad_offset; - } -- if (binder_alloc_copy_user_to_buffer( -- &target_proc->alloc, -- t->buffer, -- sg_buf_offset, -- (const void __user *) -- (uintptr_t)bp->buffer, -- bp->length)) { -- binder_user_error("%d:%d got transaction with invalid offsets ptr\n", -- proc->pid, thread->pid); -- return_error_param = -EFAULT; -+ ret = binder_defer_copy(&sgc_head, sg_buf_offset, -+ (const void __user *)(uintptr_t)bp->buffer, -+ bp->length); -+ if (ret) { - return_error = BR_FAILED_REPLY; -+ return_error_param = ret; - return_error_line = __LINE__; -- goto err_copy_data_failed; -+ goto err_translate_failed; - } - /* Fixup buffer pointer to target proc address space */ - bp->buffer = (uintptr_t) -@@ -3011,7 +3354,8 @@ static void binder_transaction(struct binder_proc *proc, - - num_valid = (buffer_offset - off_start_offset) / - sizeof(binder_size_t); -- ret = binder_fixup_parent(t, thread, bp, -+ ret = binder_fixup_parent(&pf_head, t, -+ thread, bp, - off_start_offset, - num_valid, - last_fixup_obj_off, -@@ -3038,6 +3382,30 @@ static void binder_transaction(struct binder_proc *proc, - goto err_bad_object_type; - } - } -+ /* Done processing objects, copy the rest of the buffer */ -+ if (binder_alloc_copy_user_to_buffer( -+ &target_proc->alloc, -+ t->buffer, user_offset, -+ user_buffer + user_offset, -+ tr->data_size - user_offset)) { -+ binder_user_error("%d:%d got transaction with invalid data ptr\n", -+ proc->pid, thread->pid); -+ return_error = BR_FAILED_REPLY; -+ return_error_param = -EFAULT; -+ return_error_line = __LINE__; -+ goto err_copy_data_failed; -+ } -+ -+ ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, -+ &sgc_head, &pf_head); -+ if (ret) { -+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n", -+ proc->pid, thread->pid); -+ return_error = BR_FAILED_REPLY; -+ return_error_param = ret; -+ return_error_line = __LINE__; -+ goto err_copy_data_failed; -+ } - if (t->buffer->oneway_spam_suspect) - tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; - else -@@ -3111,6 +3479,7 @@ err_bad_object_type: - err_bad_offset: - err_bad_parent: - err_copy_data_failed: -+ binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); - binder_free_txn_fixups(t); - trace_binder_transaction_failed_buffer_release(t->buffer); - binder_transaction_buffer_release(target_proc, NULL, t->buffer, -@@ -3185,6 +3554,7 @@ err_invalid_target_handle: - * binder_free_buf() - free the specified buffer - * @proc: binder proc that owns buffer - * @buffer: buffer to be freed -+ * @is_failure: failed to send transaction - * - * If buffer for an async transaction, enqueue the next async - * transaction from the node. -@@ -3194,7 +3564,7 @@ err_invalid_target_handle: - static void - binder_free_buf(struct binder_proc *proc, - struct binder_thread *thread, -- struct binder_buffer *buffer) -+ struct binder_buffer *buffer, bool is_failure) - { - binder_inner_proc_lock(proc); - if (buffer->transaction) { -@@ -3222,7 +3592,7 @@ binder_free_buf(struct binder_proc *proc, - binder_node_inner_unlock(buf_node); - } - trace_binder_transaction_buffer_release(buffer); -- binder_transaction_buffer_release(proc, thread, buffer, 0, false); -+ binder_release_entire_buffer(proc, thread, buffer, is_failure); - binder_alloc_free_buf(&proc->alloc, buffer); - } - -@@ -3424,7 +3794,7 @@ static int binder_thread_write(struct binder_proc *proc, - proc->pid, thread->pid, (u64)data_ptr, - buffer->debug_id, - buffer->transaction ? "active" : "finished"); -- binder_free_buf(proc, thread, buffer); -+ binder_free_buf(proc, thread, buffer, false); - break; - } - -@@ -4117,7 +4487,7 @@ retry: - buffer->transaction = NULL; - binder_cleanup_transaction(t, "fd fixups failed", - BR_FAILED_REPLY); -- binder_free_buf(proc, thread, buffer); -+ binder_free_buf(proc, thread, buffer, true); - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", - proc->pid, thread->pid, -@@ -4353,6 +4723,7 @@ static void binder_free_proc(struct binder_proc *proc) - } - binder_alloc_deferred_release(&proc->alloc); - put_task_struct(proc->tsk); -+ put_cred(proc->cred); - binder_stats_deleted(BINDER_STAT_PROC); - kfree(proc); - } -@@ -4430,23 +4801,20 @@ static int binder_thread_release(struct binder_proc *proc, - __release(&t->lock); - - /* -- * If this thread used poll, make sure we remove the waitqueue -- * from any epoll data structures holding it with POLLFREE. -- * waitqueue_active() is safe to use here because we're holding -- * the inner lock. -+ * If this thread used poll, make sure we remove the waitqueue from any -+ * poll data structures holding it. - */ -- if ((thread->looper & BINDER_LOOPER_STATE_POLL) && -- waitqueue_active(&thread->wait)) { -- wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); -- } -+ if (thread->looper & BINDER_LOOPER_STATE_POLL) -+ wake_up_pollfree(&thread->wait); - - binder_inner_proc_unlock(thread->proc); - - /* -- * This is needed to avoid races between wake_up_poll() above and -- * and ep_remove_waitqueue() called for other reasons (eg the epoll file -- * descriptor being closed); ep_remove_waitqueue() holds an RCU read -- * lock, so we can be sure it's done after calling synchronize_rcu(). -+ * This is needed to avoid races between wake_up_pollfree() above and -+ * someone else removing the last entry from the queue for other reasons -+ * (e.g. ep_remove_wait_queue() being called due to an epoll file -+ * descriptor being closed). Such other users hold an RCU read lock, so -+ * we can be sure they're done after we call synchronize_rcu(). - */ - if (thread->looper & BINDER_LOOPER_STATE_POLL) - synchronize_rcu(); -@@ -4564,7 +4932,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, - ret = -EBUSY; - goto out; - } -- ret = security_binder_set_context_mgr(proc->tsk); -+ ret = security_binder_set_context_mgr(proc->cred); - if (ret < 0) - goto out; - if (uid_valid(context->binder_context_mgr_uid)) { -@@ -5055,6 +5423,7 @@ static int binder_open(struct inode *nodp, struct file *filp) - spin_lock_init(&proc->outer_lock); - get_task_struct(current->group_leader); - proc->tsk = current->group_leader; -+ proc->cred = get_cred(filp->f_cred); - INIT_LIST_HEAD(&proc->todo); - init_waitqueue_head(&proc->freeze_wait); - proc->default_priority = task_nice(current); -@@ -5765,8 +6134,7 @@ static void print_binder_proc_stats(struct seq_file *m, - print_binder_stats(m, " ", &proc->stats); - } - -- --int binder_state_show(struct seq_file *m, void *unused) -+static int state_show(struct seq_file *m, void *unused) - { - struct binder_proc *proc; - struct binder_node *node; -@@ -5805,7 +6173,7 @@ int binder_state_show(struct seq_file *m, void *unused) - return 0; - } - --int binder_stats_show(struct seq_file *m, void *unused) -+static int stats_show(struct seq_file *m, void *unused) - { - struct binder_proc *proc; - -@@ -5821,7 +6189,7 @@ int binder_stats_show(struct seq_file *m, void *unused) - return 0; - } - --int binder_transactions_show(struct seq_file *m, void *unused) -+static int transactions_show(struct seq_file *m, void *unused) - { - struct binder_proc *proc; - -@@ -5877,7 +6245,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m, - "\n" : " (incomplete)\n"); - } - --int binder_transaction_log_show(struct seq_file *m, void *unused) -+static int transaction_log_show(struct seq_file *m, void *unused) - { - struct binder_transaction_log *log = m->private; - unsigned int log_cur = atomic_read(&log->cur); -@@ -5909,6 +6277,45 @@ const struct file_operations binder_fops = { - .release = binder_release, - }; - -+DEFINE_SHOW_ATTRIBUTE(state); -+DEFINE_SHOW_ATTRIBUTE(stats); -+DEFINE_SHOW_ATTRIBUTE(transactions); -+DEFINE_SHOW_ATTRIBUTE(transaction_log); -+ -+const struct binder_debugfs_entry binder_debugfs_entries[] = { -+ { -+ .name = "state", -+ .mode = 0444, -+ .fops = &state_fops, -+ .data = NULL, -+ }, -+ { -+ .name = "stats", -+ .mode = 0444, -+ .fops = &stats_fops, -+ .data = NULL, -+ }, -+ { -+ .name = "transactions", -+ .mode = 0444, -+ .fops = &transactions_fops, -+ .data = NULL, -+ }, -+ { -+ .name = "transaction_log", -+ .mode = 0444, -+ .fops = &transaction_log_fops, -+ .data = &binder_transaction_log, -+ }, -+ { -+ .name = "failed_transaction_log", -+ .mode = 0444, -+ .fops = &transaction_log_fops, -+ .data = &binder_transaction_log_failed, -+ }, -+ {} /* terminator */ -+}; -+ - static int __init init_binder_device(const char *name) - { - int ret; -@@ -5954,36 +6361,18 @@ static int __init binder_init(void) - atomic_set(&binder_transaction_log_failed.cur, ~0U); - - binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); -- if (binder_debugfs_dir_entry_root) -+ if (binder_debugfs_dir_entry_root) { -+ const struct binder_debugfs_entry *db_entry; -+ -+ binder_for_each_debugfs_entry(db_entry) -+ debugfs_create_file(db_entry->name, -+ db_entry->mode, -+ binder_debugfs_dir_entry_root, -+ db_entry->data, -+ db_entry->fops); -+ - binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", - binder_debugfs_dir_entry_root); -- -- if (binder_debugfs_dir_entry_root) { -- debugfs_create_file("state", -- 0444, -- binder_debugfs_dir_entry_root, -- NULL, -- &binder_state_fops); -- debugfs_create_file("stats", -- 0444, -- binder_debugfs_dir_entry_root, -- NULL, -- &binder_stats_fops); -- debugfs_create_file("transactions", -- 0444, -- binder_debugfs_dir_entry_root, -- NULL, -- &binder_transactions_fops); -- debugfs_create_file("transaction_log", -- 0444, -- binder_debugfs_dir_entry_root, -- &binder_transaction_log, -- &binder_transaction_log_fops); -- debugfs_create_file("failed_transaction_log", -- 0444, -- binder_debugfs_dir_entry_root, -- &binder_transaction_log_failed, -- &binder_transaction_log_fops); - } - - if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && -@@ -6023,6 +6412,7 @@ err_init_binder_device_failed: - - err_alloc_device_names_failed: - debugfs_remove_recursive(binder_debugfs_dir_entry_root); -+ binder_alloc_shrinker_exit(); - - return ret; - } -diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c -index 340515f54498c..54cee2b31c8e5 100644 ---- a/drivers/android/binder_alloc.c -+++ b/drivers/android/binder_alloc.c -@@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - mm = alloc->vma_vm_mm; - - if (mm) { -- mmap_read_lock(mm); -+ mmap_write_lock(mm); - vma = alloc->vma; - } - -@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - trace_binder_alloc_page_end(alloc, index); - } - if (mm) { -- mmap_read_unlock(mm); -+ mmap_write_unlock(mm); - mmput(mm); - } - return 0; -@@ -303,39 +303,24 @@ err_page_ptr_cleared: - } - err_no_vma: - if (mm) { -- mmap_read_unlock(mm); -+ mmap_write_unlock(mm); - mmput(mm); - } - return vma ? -ENOMEM : -ESRCH; - } - -- - static inline void binder_alloc_set_vma(struct binder_alloc *alloc, - struct vm_area_struct *vma) - { -- if (vma) -- alloc->vma_vm_mm = vma->vm_mm; -- /* -- * If we see alloc->vma is not NULL, buffer data structures set up -- * completely. Look at smp_rmb side binder_alloc_get_vma. -- * We also want to guarantee new alloc->vma_vm_mm is always visible -- * if alloc->vma is set. -- */ -- smp_wmb(); -- alloc->vma = vma; -+ /* pairs with smp_load_acquire in binder_alloc_get_vma() */ -+ smp_store_release(&alloc->vma, vma); - } - - static inline struct vm_area_struct *binder_alloc_get_vma( - struct binder_alloc *alloc) - { -- struct vm_area_struct *vma = NULL; -- -- if (alloc->vma) { -- /* Look at description in binder_alloc_set_vma */ -- smp_rmb(); -- vma = alloc->vma; -- } -- return vma; -+ /* pairs with smp_store_release in binder_alloc_set_vma() */ -+ return smp_load_acquire(&alloc->vma); - } - - static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) -@@ -398,6 +383,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( - size_t size, data_offsets_size; - int ret; - -+ /* Check binder_alloc is fully initialized */ - if (!binder_alloc_get_vma(alloc)) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf, no vma\n", -@@ -671,7 +657,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, - BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); - - if (buffer->async_transaction) { -- alloc->free_async_space += size + sizeof(struct binder_buffer); -+ alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); - - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, - "%d: binder_free_buf size %zd async free %zd\n", -@@ -754,6 +740,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, - const char *failure_string; - struct binder_buffer *buffer; - -+ if (unlikely(vma->vm_mm != alloc->vma_vm_mm)) { -+ ret = -EINVAL; -+ failure_string = "invalid vma->vm_mm"; -+ goto err_invalid_mm; -+ } -+ - mutex_lock(&binder_alloc_mmap_lock); - if (alloc->buffer_size) { - ret = -EBUSY; -@@ -787,8 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, - buffer->free = 1; - binder_insert_free_buffer(alloc, buffer); - alloc->free_async_space = alloc->buffer_size / 2; -+ -+ /* Signal binder_alloc is fully initialized */ - binder_alloc_set_vma(alloc, vma); -- mmgrab(alloc->vma_vm_mm); - - return 0; - -@@ -801,6 +794,7 @@ err_alloc_pages_failed: - alloc->buffer_size = 0; - err_already_mapped: - mutex_unlock(&binder_alloc_mmap_lock); -+err_invalid_mm: - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%s: %d %lx-%lx %s failed %d\n", __func__, - alloc->pid, vma->vm_start, vma->vm_end, -@@ -1079,6 +1073,8 @@ static struct shrinker binder_shrinker = { - void binder_alloc_init(struct binder_alloc *alloc) - { - alloc->pid = current->group_leader->pid; -+ alloc->vma_vm_mm = current->mm; -+ mmgrab(alloc->vma_vm_mm); - mutex_init(&alloc->mutex); - INIT_LIST_HEAD(&alloc->buffers); - } -@@ -1095,6 +1091,12 @@ int binder_alloc_shrinker_init(void) - return ret; - } - -+void binder_alloc_shrinker_exit(void) -+{ -+ unregister_shrinker(&binder_shrinker); -+ list_lru_destroy(&binder_alloc_lru); -+} -+ - /** - * check_buffer() - verify that buffer/offset is safe to access - * @alloc: binder_alloc for this proc -diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h -index 7dea57a84c79b..399f2b269f2c5 100644 ---- a/drivers/android/binder_alloc.h -+++ b/drivers/android/binder_alloc.h -@@ -131,6 +131,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, - int pid); - extern void binder_alloc_init(struct binder_alloc *alloc); - extern int binder_alloc_shrinker_init(void); -+extern void binder_alloc_shrinker_exit(void); - extern void binder_alloc_vma_close(struct binder_alloc *alloc); - extern struct binder_buffer * - binder_alloc_prepare_to_free(struct binder_alloc *alloc, -diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h -index 402c4d4362a83..1ade9799c8d58 100644 ---- a/drivers/android/binder_internal.h -+++ b/drivers/android/binder_internal.h -@@ -107,41 +107,19 @@ static inline int __init init_binderfs(void) - } - #endif - --int binder_stats_show(struct seq_file *m, void *unused); --DEFINE_SHOW_ATTRIBUTE(binder_stats); -- --int binder_state_show(struct seq_file *m, void *unused); --DEFINE_SHOW_ATTRIBUTE(binder_state); -- --int binder_transactions_show(struct seq_file *m, void *unused); --DEFINE_SHOW_ATTRIBUTE(binder_transactions); -- --int binder_transaction_log_show(struct seq_file *m, void *unused); --DEFINE_SHOW_ATTRIBUTE(binder_transaction_log); -- --struct binder_transaction_log_entry { -- int debug_id; -- int debug_id_done; -- int call_type; -- int from_proc; -- int from_thread; -- int target_handle; -- int to_proc; -- int to_thread; -- int to_node; -- int data_size; -- int offsets_size; -- int return_error_line; -- uint32_t return_error; -- uint32_t return_error_param; -- char context_name[BINDERFS_MAX_NAME + 1]; -+struct binder_debugfs_entry { -+ const char *name; -+ umode_t mode; -+ const struct file_operations *fops; -+ void *data; - }; - --struct binder_transaction_log { -- atomic_t cur; -- bool full; -- struct binder_transaction_log_entry entry[32]; --}; -+extern const struct binder_debugfs_entry binder_debugfs_entries[]; -+ -+#define binder_for_each_debugfs_entry(entry) \ -+ for ((entry) = binder_debugfs_entries; \ -+ (entry)->name; \ -+ (entry)++) - - enum binder_stat_types { - BINDER_STAT_PROC, -@@ -364,6 +342,9 @@ struct binder_ref { - * (invariant after initialized) - * @tsk task_struct for group_leader of process - * (invariant after initialized) -+ * @cred struct cred associated with the `struct file` -+ * in binder_open() -+ * (invariant after initialized) - * @deferred_work_node: element for binder_deferred_list - * (protected by binder_deferred_lock) - * @deferred_work: bitmap of deferred work to perform -@@ -426,6 +407,7 @@ struct binder_proc { - struct list_head waiting_threads; - int pid; - struct task_struct *tsk; -+ const struct cred *cred; - struct hlist_node deferred_work_node; - int deferred_work; - int outstanding_txns; -@@ -571,6 +553,4 @@ struct binder_object { - }; - }; - --extern struct binder_transaction_log binder_transaction_log; --extern struct binder_transaction_log binder_transaction_log_failed; - #endif /* _LINUX_BINDER_INTERNAL_H */ -diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c -index e3605cdd43357..6d717ed76766e 100644 ---- a/drivers/android/binderfs.c -+++ b/drivers/android/binderfs.c -@@ -621,6 +621,7 @@ static int init_binder_features(struct super_block *sb) - static int init_binder_logs(struct super_block *sb) - { - struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir; -+ const struct binder_debugfs_entry *db_entry; - struct binderfs_info *info; - int ret = 0; - -@@ -631,43 +632,15 @@ static int init_binder_logs(struct super_block *sb) - goto out; - } - -- dentry = binderfs_create_file(binder_logs_root_dir, "stats", -- &binder_stats_fops, NULL); -- if (IS_ERR(dentry)) { -- ret = PTR_ERR(dentry); -- goto out; -- } -- -- dentry = binderfs_create_file(binder_logs_root_dir, "state", -- &binder_state_fops, NULL); -- if (IS_ERR(dentry)) { -- ret = PTR_ERR(dentry); -- goto out; -- } -- -- dentry = binderfs_create_file(binder_logs_root_dir, "transactions", -- &binder_transactions_fops, NULL); -- if (IS_ERR(dentry)) { -- ret = PTR_ERR(dentry); -- goto out; -- } -- -- dentry = binderfs_create_file(binder_logs_root_dir, -- "transaction_log", -- &binder_transaction_log_fops, -- &binder_transaction_log); -- if (IS_ERR(dentry)) { -- ret = PTR_ERR(dentry); -- goto out; -- } -- -- dentry = binderfs_create_file(binder_logs_root_dir, -- "failed_transaction_log", -- &binder_transaction_log_fops, -- &binder_transaction_log_failed); -- if (IS_ERR(dentry)) { -- ret = PTR_ERR(dentry); -- goto out; -+ binder_for_each_debugfs_entry(db_entry) { -+ dentry = binderfs_create_file(binder_logs_root_dir, -+ db_entry->name, -+ db_entry->fops, -+ db_entry->data); -+ if (IS_ERR(dentry)) { -+ ret = PTR_ERR(dentry); -+ goto out; -+ } - } - - proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc"); -diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c -index 2a04e8abd3977..26e0eb537b4f5 100644 ---- a/drivers/ata/acard-ahci.c -+++ b/drivers/ata/acard-ahci.c -@@ -267,7 +267,7 @@ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) - if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && - !(qc->flags & ATA_QCFLAG_FAILED)) { - ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); -- qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; -+ qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15]; - } else - ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); - -diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c -index 186cbf90c8ead..149ee16fd0225 100644 ---- a/drivers/ata/ahci.c -+++ b/drivers/ata/ahci.c -@@ -83,6 +83,7 @@ enum board_ids { - static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); - static void ahci_remove_one(struct pci_dev *dev); - static void ahci_shutdown_one(struct pci_dev *dev); -+static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv); - static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, - unsigned long deadline); - static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, -@@ -442,6 +443,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { - /* AMD */ - { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ - { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ -+ { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */ - /* AMD is using RAID class only for ahci controllers */ - { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, -@@ -667,6 +669,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, - ahci_save_initial_config(&pdev->dev, hpriv); - } - -+static int ahci_pci_reset_controller(struct ata_host *host) -+{ -+ struct pci_dev *pdev = to_pci_dev(host->dev); -+ struct ahci_host_priv *hpriv = host->private_data; -+ int rc; -+ -+ rc = ahci_reset_controller(host); -+ if (rc) -+ return rc; -+ -+ /* -+ * If platform firmware failed to enable ports, try to enable -+ * them here. -+ */ -+ ahci_intel_pcs_quirk(pdev, hpriv); -+ -+ return 0; -+} -+ - static void ahci_pci_init_controller(struct ata_host *host) - { - struct ahci_host_priv *hpriv = host->private_data; -@@ -734,7 +755,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, - - /* clear D2H reception area to properly wait for D2H FIS */ - ata_tf_init(link->device, &tf); -- tf.command = ATA_BUSY; -+ tf.status = ATA_BUSY; - ata_tf_to_fis(&tf, 0, 0, d2h_fis); - - rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), -@@ -805,7 +826,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, - - /* clear D2H reception area to properly wait for D2H FIS */ - ata_tf_init(link->device, &tf); -- tf.command = ATA_BUSY; -+ tf.status = ATA_BUSY; - ata_tf_to_fis(&tf, 0, 0, d2h_fis); - - rc = sata_link_hardreset(link, timing, deadline, &online, -@@ -868,7 +889,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev) - struct ata_host *host = pci_get_drvdata(pdev); - int rc; - -- rc = ahci_reset_controller(host); -+ rc = ahci_pci_reset_controller(host); - if (rc) - return rc; - ahci_pci_init_controller(host); -@@ -903,7 +924,7 @@ static int ahci_pci_device_resume(struct device *dev) - ahci_mcp89_apple_enable(pdev); - - if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { -- rc = ahci_reset_controller(host); -+ rc = ahci_pci_reset_controller(host); - if (rc) - return rc; - -@@ -1788,12 +1809,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) - /* save initial config */ - ahci_pci_save_initial_config(pdev, hpriv); - -- /* -- * If platform firmware failed to enable ports, try to enable -- * them here. -- */ -- ahci_intel_pcs_quirk(pdev, hpriv); -- - /* prepare host */ - if (hpriv->cap & HOST_CAP_NCQ) { - pi.flags |= ATA_FLAG_NCQ; -@@ -1903,7 +1918,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) - if (rc) - return rc; - -- rc = ahci_reset_controller(host); -+ rc = ahci_pci_reset_controller(host); - if (rc) - return rc; - -diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h -index 2e89499bd9c3d..dcc2d92cf6b62 100644 ---- a/drivers/ata/ahci.h -+++ b/drivers/ata/ahci.h -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - - /* Enclosure Management Control */ - #define EM_CTRL_MSG_TYPE 0x000f0000 -@@ -54,12 +55,12 @@ enum { - AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ + - AHCI_CMD_TBL_AR_SZ + - (AHCI_RX_FIS_SZ * 16), -- AHCI_IRQ_ON_SG = (1 << 31), -- AHCI_CMD_ATAPI = (1 << 5), -- AHCI_CMD_WRITE = (1 << 6), -- AHCI_CMD_PREFETCH = (1 << 7), -- AHCI_CMD_RESET = (1 << 8), -- AHCI_CMD_CLR_BUSY = (1 << 10), -+ AHCI_IRQ_ON_SG = BIT(31), -+ AHCI_CMD_ATAPI = BIT(5), -+ AHCI_CMD_WRITE = BIT(6), -+ AHCI_CMD_PREFETCH = BIT(7), -+ AHCI_CMD_RESET = BIT(8), -+ AHCI_CMD_CLR_BUSY = BIT(10), - - RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */ - RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ -@@ -77,37 +78,37 @@ enum { - HOST_CAP2 = 0x24, /* host capabilities, extended */ - - /* HOST_CTL bits */ -- HOST_RESET = (1 << 0), /* reset controller; self-clear */ -- HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ -- HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */ -- HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ -+ HOST_RESET = BIT(0), /* reset controller; self-clear */ -+ HOST_IRQ_EN = BIT(1), /* global IRQ enable */ -+ HOST_MRSM = BIT(2), /* MSI Revert to Single Message */ -+ HOST_AHCI_EN = BIT(31), /* AHCI enabled */ - - /* HOST_CAP bits */ -- HOST_CAP_SXS = (1 << 5), /* Supports External SATA */ -- HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ -- HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */ -- HOST_CAP_PART = (1 << 13), /* Partial state capable */ -- HOST_CAP_SSC = (1 << 14), /* Slumber state capable */ -- HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */ -- HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */ -- HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ -- HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */ -- HOST_CAP_CLO = (1 << 24), /* Command List Override support */ -- HOST_CAP_LED = (1 << 25), /* Supports activity LED */ -- HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ -- HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ -- HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */ -- HOST_CAP_SNTF = (1 << 29), /* SNotification register */ -- HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ -- HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ -+ HOST_CAP_SXS = BIT(5), /* Supports External SATA */ -+ HOST_CAP_EMS = BIT(6), /* Enclosure Management support */ -+ HOST_CAP_CCC = BIT(7), /* Command Completion Coalescing */ -+ HOST_CAP_PART = BIT(13), /* Partial state capable */ -+ HOST_CAP_SSC = BIT(14), /* Slumber state capable */ -+ HOST_CAP_PIO_MULTI = BIT(15), /* PIO multiple DRQ support */ -+ HOST_CAP_FBS = BIT(16), /* FIS-based switching support */ -+ HOST_CAP_PMP = BIT(17), /* Port Multiplier support */ -+ HOST_CAP_ONLY = BIT(18), /* Supports AHCI mode only */ -+ HOST_CAP_CLO = BIT(24), /* Command List Override support */ -+ HOST_CAP_LED = BIT(25), /* Supports activity LED */ -+ HOST_CAP_ALPM = BIT(26), /* Aggressive Link PM support */ -+ HOST_CAP_SSS = BIT(27), /* Staggered Spin-up */ -+ HOST_CAP_MPS = BIT(28), /* Mechanical presence switch */ -+ HOST_CAP_SNTF = BIT(29), /* SNotification register */ -+ HOST_CAP_NCQ = BIT(30), /* Native Command Queueing */ -+ HOST_CAP_64 = BIT(31), /* PCI DAC (64-bit DMA) support */ - - /* HOST_CAP2 bits */ -- HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */ -- HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */ -- HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */ -- HOST_CAP2_SDS = (1 << 3), /* Support device sleep */ -- HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */ -- HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */ -+ HOST_CAP2_BOH = BIT(0), /* BIOS/OS handoff supported */ -+ HOST_CAP2_NVMHCI = BIT(1), /* NVMHCI supported */ -+ HOST_CAP2_APST = BIT(2), /* Automatic partial to slumber */ -+ HOST_CAP2_SDS = BIT(3), /* Support device sleep */ -+ HOST_CAP2_SADM = BIT(4), /* Support aggressive DevSlp */ -+ HOST_CAP2_DESO = BIT(5), /* DevSlp from slumber only */ - - /* registers for each SATA port */ - PORT_LST_ADDR = 0x00, /* command list DMA addr */ -@@ -129,24 +130,24 @@ enum { - PORT_DEVSLP = 0x44, /* device sleep */ - - /* PORT_IRQ_{STAT,MASK} bits */ -- PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ -- PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ -- PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ -- PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ -- PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ -- PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ -- PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ -- PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ -- -- PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ -- PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ -- PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ -- PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ -- PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ -- PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ -- PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ -- PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ -- PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ -+ PORT_IRQ_COLD_PRES = BIT(31), /* cold presence detect */ -+ PORT_IRQ_TF_ERR = BIT(30), /* task file error */ -+ PORT_IRQ_HBUS_ERR = BIT(29), /* host bus fatal error */ -+ PORT_IRQ_HBUS_DATA_ERR = BIT(28), /* host bus data error */ -+ PORT_IRQ_IF_ERR = BIT(27), /* interface fatal error */ -+ PORT_IRQ_IF_NONFATAL = BIT(26), /* interface non-fatal error */ -+ PORT_IRQ_OVERFLOW = BIT(24), /* xfer exhausted available S/G */ -+ PORT_IRQ_BAD_PMP = BIT(23), /* incorrect port multiplier */ -+ -+ PORT_IRQ_PHYRDY = BIT(22), /* PhyRdy changed */ -+ PORT_IRQ_DEV_ILCK = BIT(7), /* device interlock */ -+ PORT_IRQ_CONNECT = BIT(6), /* port connect change status */ -+ PORT_IRQ_SG_DONE = BIT(5), /* descriptor processed */ -+ PORT_IRQ_UNK_FIS = BIT(4), /* unknown FIS rx'd */ -+ PORT_IRQ_SDB_FIS = BIT(3), /* Set Device Bits FIS rx'd */ -+ PORT_IRQ_DMAS_FIS = BIT(2), /* DMA Setup FIS rx'd */ -+ PORT_IRQ_PIOS_FIS = BIT(1), /* PIO Setup FIS rx'd */ -+ PORT_IRQ_D2H_REG_FIS = BIT(0), /* D2H Register FIS rx'd */ - - PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | - PORT_IRQ_IF_ERR | -@@ -162,34 +163,34 @@ enum { - PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, - - /* PORT_CMD bits */ -- PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ -- PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ -- PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ -- PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */ -- PORT_CMD_ESP = (1 << 21), /* External Sata Port */ -- PORT_CMD_HPCP = (1 << 18), /* HotPlug Capable Port */ -- PORT_CMD_PMP = (1 << 17), /* PMP attached */ -- PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ -- PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ -- PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ -- PORT_CMD_CLO = (1 << 3), /* Command list override */ -- PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ -- PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ -- PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ -- -- PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ -- PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ -- PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ -- PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ -+ PORT_CMD_ASP = BIT(27), /* Aggressive Slumber/Partial */ -+ PORT_CMD_ALPE = BIT(26), /* Aggressive Link PM enable */ -+ PORT_CMD_ATAPI = BIT(24), /* Device is ATAPI */ -+ PORT_CMD_FBSCP = BIT(22), /* FBS Capable Port */ -+ PORT_CMD_ESP = BIT(21), /* External Sata Port */ -+ PORT_CMD_HPCP = BIT(18), /* HotPlug Capable Port */ -+ PORT_CMD_PMP = BIT(17), /* PMP attached */ -+ PORT_CMD_LIST_ON = BIT(15), /* cmd list DMA engine running */ -+ PORT_CMD_FIS_ON = BIT(14), /* FIS DMA engine running */ -+ PORT_CMD_FIS_RX = BIT(4), /* Enable FIS receive DMA engine */ -+ PORT_CMD_CLO = BIT(3), /* Command list override */ -+ PORT_CMD_POWER_ON = BIT(2), /* Power up device */ -+ PORT_CMD_SPIN_UP = BIT(1), /* Spin up device */ -+ PORT_CMD_START = BIT(0), /* Enable port DMA engine */ -+ -+ PORT_CMD_ICC_MASK = (0xfu << 28), /* i/f ICC state mask */ -+ PORT_CMD_ICC_ACTIVE = (0x1u << 28), /* Put i/f in active state */ -+ PORT_CMD_ICC_PARTIAL = (0x2u << 28), /* Put i/f in partial state */ -+ PORT_CMD_ICC_SLUMBER = (0x6u << 28), /* Put i/f in slumber state */ - - /* PORT_FBS bits */ - PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */ - PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */ - PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */ - PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */ -- PORT_FBS_SDE = (1 << 2), /* FBS single device error */ -- PORT_FBS_DEC = (1 << 1), /* FBS device error clear */ -- PORT_FBS_EN = (1 << 0), /* Enable FBS */ -+ PORT_FBS_SDE = BIT(2), /* FBS single device error */ -+ PORT_FBS_DEC = BIT(1), /* FBS device error clear */ -+ PORT_FBS_EN = BIT(0), /* Enable FBS */ - - /* PORT_DEVSLP bits */ - PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */ -@@ -197,52 +198,52 @@ enum { - PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */ - PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */ - PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */ -- PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */ -- PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */ -+ PORT_DEVSLP_DSP = BIT(1), /* DevSlp present */ -+ PORT_DEVSLP_ADSE = BIT(0), /* Aggressive DevSlp enable */ - - /* hpriv->flags bits */ - - #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) - -- AHCI_HFLAG_NO_NCQ = (1 << 0), -- AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ -- AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ -- AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ -- AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ -- AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ -- AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ -- AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ -- AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ -- AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ -- AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as -- link offline */ -- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ -- AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ -- AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */ -- AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on -- port start (wait until -- error-handling stage) */ -- AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ -- AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ -+ AHCI_HFLAG_NO_NCQ = BIT(0), -+ AHCI_HFLAG_IGN_IRQ_IF_ERR = BIT(1), /* ignore IRQ_IF_ERR */ -+ AHCI_HFLAG_IGN_SERR_INTERNAL = BIT(2), /* ignore SERR_INTERNAL */ -+ AHCI_HFLAG_32BIT_ONLY = BIT(3), /* force 32bit */ -+ AHCI_HFLAG_MV_PATA = BIT(4), /* PATA port */ -+ AHCI_HFLAG_NO_MSI = BIT(5), /* no PCI MSI */ -+ AHCI_HFLAG_NO_PMP = BIT(6), /* no PMP */ -+ AHCI_HFLAG_SECT255 = BIT(8), /* max 255 sectors */ -+ AHCI_HFLAG_YES_NCQ = BIT(9), /* force NCQ cap on */ -+ AHCI_HFLAG_NO_SUSPEND = BIT(10), /* don't suspend */ -+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = BIT(11), /* treat SRST timeout as -+ link offline */ -+ AHCI_HFLAG_NO_SNTF = BIT(12), /* no sntf */ -+ AHCI_HFLAG_NO_FPDMA_AA = BIT(13), /* no FPDMA AA */ -+ AHCI_HFLAG_YES_FBS = BIT(14), /* force FBS cap on */ -+ AHCI_HFLAG_DELAY_ENGINE = BIT(15), /* do not start engine on -+ port start (wait until -+ error-handling stage) */ -+ AHCI_HFLAG_NO_DEVSLP = BIT(17), /* no device sleep */ -+ AHCI_HFLAG_NO_FBS = BIT(18), /* no FBS */ - - #ifdef CONFIG_PCI_MSI -- AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */ -+ AHCI_HFLAG_MULTI_MSI = BIT(20), /* per-port MSI(-X) */ - #else - /* compile out MSI infrastructure */ - AHCI_HFLAG_MULTI_MSI = 0, - #endif -- AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ -- AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */ -- AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read -- only registers */ -- AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use -- SATA_MOBILE_LPM_POLICY -- as default lpm_policy */ -- AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during -- suspend/resume */ -- AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP -- from phy_power_on() */ -- AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */ -+ AHCI_HFLAG_WAKE_BEFORE_STOP = BIT(22), /* wake before DMA stop */ -+ AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */ -+ AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read -+ only registers */ -+ AHCI_HFLAG_IS_MOBILE = BIT(25), /* mobile chipset, use -+ SATA_MOBILE_LPM_POLICY -+ as default lpm_policy */ -+ AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during -+ suspend/resume */ -+ AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = BIT(27), /* ignore -EOPNOTSUPP -+ from phy_power_on() */ -+ AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */ - - /* ap->flags bits */ - -@@ -254,26 +255,26 @@ enum { - PCS_7 = 0x94, /* 7+ port PCS (Denverton) */ - - /* em constants */ -- EM_MAX_SLOTS = 8, -+ EM_MAX_SLOTS = SATA_PMP_MAX_PORTS, - EM_MAX_RETRY = 5, - - /* em_ctl bits */ -- EM_CTL_RST = (1 << 9), /* Reset */ -- EM_CTL_TM = (1 << 8), /* Transmit Message */ -- EM_CTL_MR = (1 << 0), /* Message Received */ -- EM_CTL_ALHD = (1 << 26), /* Activity LED */ -- EM_CTL_XMT = (1 << 25), /* Transmit Only */ -- EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ -- EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */ -- EM_CTL_SES = (1 << 18), /* SES-2 messages supported */ -- EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */ -- EM_CTL_LED = (1 << 16), /* LED messages supported */ -+ EM_CTL_RST = BIT(9), /* Reset */ -+ EM_CTL_TM = BIT(8), /* Transmit Message */ -+ EM_CTL_MR = BIT(0), /* Message Received */ -+ EM_CTL_ALHD = BIT(26), /* Activity LED */ -+ EM_CTL_XMT = BIT(25), /* Transmit Only */ -+ EM_CTL_SMB = BIT(24), /* Single Message Buffer */ -+ EM_CTL_SGPIO = BIT(19), /* SGPIO messages supported */ -+ EM_CTL_SES = BIT(18), /* SES-2 messages supported */ -+ EM_CTL_SAFTE = BIT(17), /* SAF-TE messages supported */ -+ EM_CTL_LED = BIT(16), /* LED messages supported */ - - /* em message type */ -- EM_MSG_TYPE_LED = (1 << 0), /* LED */ -- EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */ -- EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */ -- EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */ -+ EM_MSG_TYPE_LED = BIT(0), /* LED */ -+ EM_MSG_TYPE_SAFTE = BIT(1), /* SAF-TE */ -+ EM_MSG_TYPE_SES2 = BIT(2), /* SES-2 */ -+ EM_MSG_TYPE_SGPIO = BIT(3), /* SGPIO */ - }; - - struct ahci_cmd_hdr { -diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c -index 388baf528fa81..189f75d537414 100644 ---- a/drivers/ata/ahci_imx.c -+++ b/drivers/ata/ahci_imx.c -@@ -1230,4 +1230,4 @@ module_platform_driver(imx_ahci_driver); - MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver"); - MODULE_AUTHOR("Richard Zhu "); - MODULE_LICENSE("GPL"); --MODULE_ALIAS("ahci:imx"); -+MODULE_ALIAS("platform:" DRV_NAME); -diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c -index 5b46fc9aeb4a0..e5ac3d1c214c0 100644 ---- a/drivers/ata/ahci_qoriq.c -+++ b/drivers/ata/ahci_qoriq.c -@@ -125,7 +125,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, - - /* clear D2H reception area to properly wait for D2H FIS */ - ata_tf_init(link->device, &tf); -- tf.command = ATA_BUSY; -+ tf.status = ATA_BUSY; - ata_tf_to_fis(&tf, 0, 0, d2h_fis); - - rc = sata_link_hardreset(link, timing, deadline, &online, -diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c -index dffc432b9d54a..292099410cf68 100644 ---- a/drivers/ata/ahci_xgene.c -+++ b/drivers/ata/ahci_xgene.c -@@ -365,7 +365,7 @@ static int xgene_ahci_do_hardreset(struct ata_link *link, - do { - /* clear D2H reception area to properly wait for D2H FIS */ - ata_tf_init(link->device, &tf); -- tf.command = ATA_BUSY; -+ tf.status = ATA_BUSY; - ata_tf_to_fis(&tf, 0, 0, d2h_fis); - rc = sata_link_hardreset(link, timing, deadline, online, - ahci_check_ready); -diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c -index 5b3fa2cbe7223..192115a45dd78 100644 ---- a/drivers/ata/libahci.c -+++ b/drivers/ata/libahci.c -@@ -1552,7 +1552,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, - - /* clear D2H reception area to properly wait for D2H FIS */ - ata_tf_init(link->device, &tf); -- tf.command = ATA_BUSY; -+ tf.status = ATA_BUSY; - ata_tf_to_fis(&tf, 0, 0, d2h_fis); - - rc = sata_link_hardreset(link, timing, deadline, online, -@@ -2038,7 +2038,7 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) - if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && - !(qc->flags & ATA_QCFLAG_FAILED)) { - ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); -- qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; -+ qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15]; - } else - ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); - -@@ -2305,6 +2305,18 @@ int ahci_port_resume(struct ata_port *ap) - EXPORT_SYMBOL_GPL(ahci_port_resume); - - #ifdef CONFIG_PM -+static void ahci_handle_s2idle(struct ata_port *ap) -+{ -+ void __iomem *port_mmio = ahci_port_base(ap); -+ u32 devslp; -+ -+ if (pm_suspend_via_firmware()) -+ return; -+ devslp = readl(port_mmio + PORT_DEVSLP); -+ if ((devslp & PORT_DEVSLP_ADSE)) -+ ata_msleep(ap, devslp_idle_timeout); -+} -+ - static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) - { - const char *emsg = NULL; -@@ -2318,6 +2330,9 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) - ata_port_freeze(ap); - } - -+ if (acpi_storage_d3(ap->host->dev)) -+ ahci_handle_s2idle(ap); -+ - ahci_rpm_put_port(ap); - return rc; - } -diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c -index 0910441321f72..64d6da0a53035 100644 ---- a/drivers/ata/libahci_platform.c -+++ b/drivers/ata/libahci_platform.c -@@ -451,14 +451,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, - } - } - -- hpriv->nports = child_nodes = of_get_child_count(dev->of_node); -+ /* -+ * Too many sub-nodes most likely means having something wrong with -+ * the firmware. -+ */ -+ child_nodes = of_get_child_count(dev->of_node); -+ if (child_nodes > AHCI_MAX_PORTS) { -+ rc = -EINVAL; -+ goto err_out; -+ } - - /* - * If no sub-node was found, we still need to set nports to - * one in order to be able to use the - * ahci_platform_[en|dis]able_[phys|regulators] functions. - */ -- if (!child_nodes) -+ if (child_nodes) -+ hpriv->nports = child_nodes; -+ else - hpriv->nports = 1; - - hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL); -diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c -index 7a7d6642edcc5..d15f3e908ea4a 100644 ---- a/drivers/ata/libata-acpi.c -+++ b/drivers/ata/libata-acpi.c -@@ -554,13 +554,13 @@ static void ata_acpi_gtf_to_tf(struct ata_device *dev, - - tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; - tf->protocol = ATA_PROT_NODATA; -- tf->feature = gtf->tf[0]; /* 0x1f1 */ -+ tf->error = gtf->tf[0]; /* 0x1f1 */ - tf->nsect = gtf->tf[1]; /* 0x1f2 */ - tf->lbal = gtf->tf[2]; /* 0x1f3 */ - tf->lbam = gtf->tf[3]; /* 0x1f4 */ - tf->lbah = gtf->tf[4]; /* 0x1f5 */ - tf->device = gtf->tf[5]; /* 0x1f6 */ -- tf->command = gtf->tf[6]; /* 0x1f7 */ -+ tf->status = gtf->tf[6]; /* 0x1f7 */ - } - - static int ata_acpi_filter_tf(struct ata_device *dev, -@@ -650,9 +650,7 @@ static int ata_acpi_run_tf(struct ata_device *dev, - struct ata_taskfile *pptf = NULL; - struct ata_taskfile tf, ptf, rtf; - unsigned int err_mask; -- const char *level; - const char *descr; -- char msg[60]; - int rc; - - if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0) -@@ -666,6 +664,10 @@ static int ata_acpi_run_tf(struct ata_device *dev, - pptf = &ptf; - } - -+ descr = ata_get_cmd_descript(tf.command); -+ if (!descr) -+ descr = "unknown"; -+ - if (!ata_acpi_filter_tf(dev, &tf, pptf)) { - rtf = tf; - err_mask = ata_exec_internal(dev, &rtf, NULL, -@@ -673,40 +675,42 @@ static int ata_acpi_run_tf(struct ata_device *dev, - - switch (err_mask) { - case 0: -- level = KERN_DEBUG; -- snprintf(msg, sizeof(msg), "succeeded"); -+ ata_dev_dbg(dev, -+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" -+ "(%s) succeeded\n", -+ tf.command, tf.feature, tf.nsect, tf.lbal, -+ tf.lbam, tf.lbah, tf.device, descr); - rc = 1; - break; - - case AC_ERR_DEV: -- level = KERN_INFO; -- snprintf(msg, sizeof(msg), -- "rejected by device (Stat=0x%02x Err=0x%02x)", -- rtf.command, rtf.feature); -+ ata_dev_info(dev, -+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" -+ "(%s) rejected by device (Stat=0x%02x Err=0x%02x)", -+ tf.command, tf.feature, tf.nsect, tf.lbal, -+ tf.lbam, tf.lbah, tf.device, descr, -+ rtf.status, rtf.error); - rc = 0; - break; - - default: -- level = KERN_ERR; -- snprintf(msg, sizeof(msg), -- "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", -- err_mask, rtf.command, rtf.feature); -+ ata_dev_err(dev, -+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" -+ "(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", -+ tf.command, tf.feature, tf.nsect, tf.lbal, -+ tf.lbam, tf.lbah, tf.device, descr, -+ err_mask, rtf.status, rtf.error); - rc = -EIO; - break; - } - } else { -- level = KERN_INFO; -- snprintf(msg, sizeof(msg), "filtered out"); -+ ata_dev_info(dev, -+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x" -+ "(%s) filtered out\n", -+ tf.command, tf.feature, tf.nsect, tf.lbal, -+ tf.lbam, tf.lbah, tf.device, descr); - rc = 0; - } -- descr = ata_get_cmd_descript(tf.command); -- -- ata_dev_printk(dev, level, -- "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n", -- tf.command, tf.feature, tf.nsect, tf.lbal, -- tf.lbam, tf.lbah, tf.device, -- (descr ? descr : "unknown"), msg); -- - return rc; - } - -diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c -index eed65311b5d1d..025260b80a94c 100644 ---- a/drivers/ata/libata-core.c -+++ b/drivers/ata/libata-core.c -@@ -1185,7 +1185,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) - ata_dev_warn(dev, - "failed to read native max address (err_mask=0x%x)\n", - err_mask); -- if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) -+ if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED)) - return -EACCES; - return -EIO; - } -@@ -1249,7 +1249,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) - "failed to set max address (err_mask=0x%x)\n", - err_mask); - if (err_mask == AC_ERR_DEV && -- (tf.feature & (ATA_ABORTED | ATA_IDNF))) -+ (tf.error & (ATA_ABORTED | ATA_IDNF))) - return -EACCES; - return -EIO; - } -@@ -1616,7 +1616,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, - - /* perform minimal error analysis */ - if (qc->flags & ATA_QCFLAG_FAILED) { -- if (qc->result_tf.command & (ATA_ERR | ATA_DF)) -+ if (qc->result_tf.status & (ATA_ERR | ATA_DF)) - qc->err_mask |= AC_ERR_DEV; - - if (!qc->err_mask) -@@ -1625,7 +1625,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, - if (qc->err_mask & ~AC_ERR_OTHER) - qc->err_mask &= ~AC_ERR_OTHER; - } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) { -- qc->result_tf.command |= ATA_SENSE; -+ qc->result_tf.status |= ATA_SENSE; - } - - /* finish up */ -@@ -1848,7 +1848,7 @@ retry: - return 0; - } - -- if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { -+ if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) { - /* Device or controller might have reported - * the wrong device class. Give a shot at the - * other IDENTIFY if the current one is -@@ -2007,7 +2007,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, - - retry: - ata_tf_init(dev, &tf); -- if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && -+ if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) && - !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) { - tf.command = ATA_CMD_READ_LOG_DMA_EXT; - tf.protocol = ATA_PROT_DMA; -@@ -2031,8 +2031,9 @@ retry: - dev->horkage |= ATA_HORKAGE_NO_DMA_LOG; - goto retry; - } -- ata_dev_err(dev, "Read log page 0x%02x failed, Emask 0x%x\n", -- (unsigned int)page, err_mask); -+ ata_dev_err(dev, -+ "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n", -+ (unsigned int)log, (unsigned int)page, err_mask); - } - - return err_mask; -@@ -2166,6 +2167,9 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev) - struct ata_port *ap = dev->link->ap; - unsigned int err_mask; - -+ if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS)) -+ return; -+ - err_mask = ata_read_log_page(dev, - ATA_LOG_IDENTIFY_DEVICE, - ATA_LOG_SATA_SETTINGS, -@@ -2442,7 +2446,8 @@ static void ata_dev_config_devslp(struct ata_device *dev) - * Check device sleep capability. Get DevSlp timing variables - * from SATA Settings page of Identify Device Data Log. - */ -- if (!ata_id_has_devslp(dev->id)) -+ if (!ata_id_has_devslp(dev->id) || -+ !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS)) - return; - - err_mask = ata_read_log_page(dev, -@@ -3071,7 +3076,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) - */ - if (spd > 1) - mask &= (1 << (spd - 1)) - 1; -- else -+ else if (link->sata_spd) - return -EINVAL; - - /* were we already at the bottom? */ -@@ -3851,6 +3856,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { - { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, - /* Odd clown on sil3726/4726 PMPs */ - { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, -+ /* Similar story with ASMedia 1092 */ -+ { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE }, - - /* Weird ATAPI devices */ - { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, -@@ -3954,6 +3961,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { - { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, - { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, - -+ /* These specific Pioneer models have LPM issues */ -+ { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM }, -+ { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM }, -+ - /* Crucial BX100 SSD 500GB has broken LPM support */ - { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, - -@@ -3992,6 +4003,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { - ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM, }, -+ { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | -+ ATA_HORKAGE_NO_DMA_LOG | -+ ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | -@@ -4007,6 +4021,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { - - /* devices that don't properly handle TRIM commands */ - { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, -+ { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, }, - - /* - * As defined, the DRAT (Deterministic Read After Trim) and RZAT -@@ -4356,7 +4371,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, - /* A clean abort indicates an original or just out of spec drive - and we should continue as we issue the setup based on the - drive reported working geometry */ -- if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) -+ if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED)) - err_mask = 0; - - DPRINTK("EXIT, err_mask=%x\n", err_mask); -@@ -5489,7 +5504,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, - const struct ata_port_info * const * ppi, - int n_ports) - { -- const struct ata_port_info *pi; -+ const struct ata_port_info *pi = &ata_dummy_port_info; - struct ata_host *host; - int i, j; - -@@ -5497,7 +5512,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, - if (!host) - return NULL; - -- for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { -+ for (i = 0, j = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; - - if (ppi[j]) -@@ -6482,67 +6497,6 @@ const struct ata_port_info ata_dummy_port_info = { - }; - EXPORT_SYMBOL_GPL(ata_dummy_port_info); - --/* -- * Utility print functions -- */ --void ata_port_printk(const struct ata_port *ap, const char *level, -- const char *fmt, ...) --{ -- struct va_format vaf; -- va_list args; -- -- va_start(args, fmt); -- -- vaf.fmt = fmt; -- vaf.va = &args; -- -- printk("%sata%u: %pV", level, ap->print_id, &vaf); -- -- va_end(args); --} --EXPORT_SYMBOL(ata_port_printk); -- --void ata_link_printk(const struct ata_link *link, const char *level, -- const char *fmt, ...) --{ -- struct va_format vaf; -- va_list args; -- -- va_start(args, fmt); -- -- vaf.fmt = fmt; -- vaf.va = &args; -- -- if (sata_pmp_attached(link->ap) || link->ap->slave_link) -- printk("%sata%u.%02u: %pV", -- level, link->ap->print_id, link->pmp, &vaf); -- else -- printk("%sata%u: %pV", -- level, link->ap->print_id, &vaf); -- -- va_end(args); --} --EXPORT_SYMBOL(ata_link_printk); -- --void ata_dev_printk(const struct ata_device *dev, const char *level, -- const char *fmt, ...) --{ -- struct va_format vaf; -- va_list args; -- -- va_start(args, fmt); -- -- vaf.fmt = fmt; -- vaf.va = &args; -- -- printk("%sata%u.%02u: %pV", -- level, dev->link->ap->print_id, dev->link->pmp + dev->devno, -- &vaf); -- -- va_end(args); --} --EXPORT_SYMBOL(ata_dev_printk); -- - void ata_print_version(const struct device *dev, const char *version) - { - dev_printk(KERN_DEBUG, dev, "version %s\n", version); -diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c -index bf9c4b6c5c3d4..8350abc172908 100644 ---- a/drivers/ata/libata-eh.c -+++ b/drivers/ata/libata-eh.c -@@ -93,6 +93,12 @@ static const unsigned long ata_eh_identify_timeouts[] = { - ULONG_MAX, - }; - -+static const unsigned long ata_eh_revalidate_timeouts[] = { -+ 15000, /* Some drives are slow to read log pages when waking-up */ -+ 15000, /* combined time till here is enough even for media access */ -+ ULONG_MAX, -+}; -+ - static const unsigned long ata_eh_flush_timeouts[] = { - 15000, /* be generous with flush */ - 15000, /* ditto */ -@@ -129,6 +135,8 @@ static const struct ata_eh_cmd_timeout_ent - ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { - { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), - .timeouts = ata_eh_identify_timeouts, }, -+ { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT), -+ .timeouts = ata_eh_revalidate_timeouts, }, - { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), - .timeouts = ata_eh_other_timeouts, }, - { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), -@@ -1378,7 +1386,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) - - err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); - if (err_mask == AC_ERR_DEV) -- *r_sense_key = tf.feature >> 4; -+ *r_sense_key = tf.error >> 4; - return err_mask; - } - -@@ -1423,12 +1431,12 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc, - - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); - /* Ignore err_mask; ATA_ERR might be set */ -- if (tf.command & ATA_SENSE) { -+ if (tf.status & ATA_SENSE) { - ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); - qc->flags |= ATA_QCFLAG_SENSE_VALID; - } else { - ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", -- tf.command, err_mask); -+ tf.status, err_mask); - } - } - -@@ -1553,7 +1561,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, - const struct ata_taskfile *tf) - { - unsigned int tmp, action = 0; -- u8 stat = tf->command, err = tf->feature; -+ u8 stat = tf->status, err = tf->error; - - if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { - qc->err_mask |= AC_ERR_HSM; -@@ -1590,7 +1598,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, - if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { - tmp = atapi_eh_request_sense(qc->dev, - qc->scsicmd->sense_buffer, -- qc->result_tf.feature >> 4); -+ qc->result_tf.error >> 4); - if (!tmp) - qc->flags |= ATA_QCFLAG_SENSE_VALID; - else -@@ -2122,6 +2130,7 @@ const char *ata_get_cmd_descript(u8 command) - { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, - { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, - { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, -+ { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" }, - { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, - { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, - { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, -@@ -2363,7 +2372,7 @@ static void ata_eh_link_report(struct ata_link *link) - cmd->hob_feature, cmd->hob_nsect, - cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, - cmd->device, qc->tag, data_buf, cdb_buf, -- res->command, res->feature, res->nsect, -+ res->status, res->error, res->nsect, - res->lbal, res->lbam, res->lbah, - res->hob_feature, res->hob_nsect, - res->hob_lbal, res->hob_lbam, res->hob_lbah, -@@ -2371,28 +2380,28 @@ static void ata_eh_link_report(struct ata_link *link) - qc->err_mask & AC_ERR_NCQ ? " " : ""); - - #ifdef CONFIG_ATA_VERBOSE_ERROR -- if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | -- ATA_SENSE | ATA_ERR)) { -- if (res->command & ATA_BUSY) -+ if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | -+ ATA_SENSE | ATA_ERR)) { -+ if (res->status & ATA_BUSY) - ata_dev_err(qc->dev, "status: { Busy }\n"); - else - ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", -- res->command & ATA_DRDY ? "DRDY " : "", -- res->command & ATA_DF ? "DF " : "", -- res->command & ATA_DRQ ? "DRQ " : "", -- res->command & ATA_SENSE ? "SENSE " : "", -- res->command & ATA_ERR ? "ERR " : ""); -+ res->status & ATA_DRDY ? "DRDY " : "", -+ res->status & ATA_DF ? "DF " : "", -+ res->status & ATA_DRQ ? "DRQ " : "", -+ res->status & ATA_SENSE ? "SENSE " : "", -+ res->status & ATA_ERR ? "ERR " : ""); - } - - if (cmd->command != ATA_CMD_PACKET && -- (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | -- ATA_IDNF | ATA_ABORTED))) -+ (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | -+ ATA_ABORTED))) - ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", -- res->feature & ATA_ICRC ? "ICRC " : "", -- res->feature & ATA_UNC ? "UNC " : "", -- res->feature & ATA_AMNF ? "AMNF " : "", -- res->feature & ATA_IDNF ? "IDNF " : "", -- res->feature & ATA_ABORTED ? "ABRT " : ""); -+ res->error & ATA_ICRC ? "ICRC " : "", -+ res->error & ATA_UNC ? "UNC " : "", -+ res->error & ATA_AMNF ? "AMNF " : "", -+ res->error & ATA_IDNF ? "IDNF " : "", -+ res->error & ATA_ABORTED ? "ABRT " : ""); - #endif - } - } -diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c -index 8f3ff830ab0c6..b5aa525d87603 100644 ---- a/drivers/ata/libata-sata.c -+++ b/drivers/ata/libata-sata.c -@@ -191,8 +191,8 @@ EXPORT_SYMBOL_GPL(ata_tf_to_fis); - - void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) - { -- tf->command = fis[2]; /* status */ -- tf->feature = fis[3]; /* error */ -+ tf->status = fis[2]; -+ tf->error = fis[3]; - - tf->lbal = fis[4]; - tf->lbam = fis[5]; -@@ -1402,8 +1402,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, - - *tag = buf[0] & 0x1f; - -- tf->command = buf[2]; -- tf->feature = buf[3]; -+ tf->status = buf[2]; -+ tf->error = buf[3]; - tf->lbal = buf[4]; - tf->lbam = buf[5]; - tf->lbah = buf[6]; -@@ -1413,7 +1413,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, - tf->hob_lbah = buf[10]; - tf->nsect = buf[12]; - tf->hob_nsect = buf[13]; -- if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id)) -+ if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id) && -+ (tf->status & ATA_SENSE)) - tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; - - return 0; -@@ -1477,8 +1478,12 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) - memcpy(&qc->result_tf, &tf, sizeof(tf)); - qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; - qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; -- if (dev->class == ATA_DEV_ZAC && -- ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) { -+ -+ /* -+ * If the device supports NCQ autosense, ata_eh_read_log_10h() will have -+ * stored the sense data in qc->result_tf.auxiliary. -+ */ -+ if (qc->result_tf.auxiliary) { - char sense_key, asc, ascq; - - sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; -diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c -index 1fb4611f7eeb9..fd9c768f31efe 100644 ---- a/drivers/ata/libata-scsi.c -+++ b/drivers/ata/libata-scsi.c -@@ -671,7 +671,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc) - */ - static void ata_dump_status(unsigned id, struct ata_taskfile *tf) - { -- u8 stat = tf->command, err = tf->feature; -+ u8 stat = tf->status, err = tf->error; - - pr_warn("ata%u: status=0x%02x { ", id, stat); - if (stat & ATA_BUSY) { -@@ -867,8 +867,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) - * onto sense key, asc & ascq. - */ - if (qc->err_mask || -- tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { -- ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, -+ tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { -+ ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, - &sense_key, &asc, &ascq, verbose); - ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); - } else { -@@ -897,13 +897,13 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) - * Copy registers into sense buffer. - */ - desc[2] = 0x00; -- desc[3] = tf->feature; /* == error reg */ -+ desc[3] = tf->error; - desc[5] = tf->nsect; - desc[7] = tf->lbal; - desc[9] = tf->lbam; - desc[11] = tf->lbah; - desc[12] = tf->device; -- desc[13] = tf->command; /* == status reg */ -+ desc[13] = tf->status; - - /* - * Fill in Extend bit, and the high order bytes -@@ -918,8 +918,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) - } - } else { - /* Fixed sense format */ -- desc[0] = tf->feature; -- desc[1] = tf->command; /* status */ -+ desc[0] = tf->error; -+ desc[1] = tf->status; - desc[2] = tf->device; - desc[3] = tf->nsect; - desc[7] = 0; -@@ -968,14 +968,14 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc) - * onto sense key, asc & ascq. - */ - if (qc->err_mask || -- tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { -- ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, -+ tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { -+ ata_to_sense_error(qc->ap->print_id, tf->status, tf->error, - &sense_key, &asc, &ascq, verbose); - ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq); - } else { - /* Could not decode error */ - ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n", -- tf->command, qc->err_mask); -+ tf->status, qc->err_mask); - ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0); - return; - } -@@ -2490,7 +2490,7 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) - - /* fill these in, for the case where they are -not- overwritten */ - cmd->sense_buffer[0] = 0x70; -- cmd->sense_buffer[2] = qc->tf.feature >> 4; -+ cmd->sense_buffer[2] = qc->tf.error >> 4; - - ata_qc_reinit(qc); - -@@ -2698,18 +2698,36 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) - return 0; - } - --static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) -+static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno) - { -- if (!sata_pmp_attached(ap)) { -- if (likely(devno >= 0 && -- devno < ata_link_max_devices(&ap->link))) -+ /* -+ * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case), -+ * or 2 (IDE master + slave case). However, the former case includes -+ * libsas hosted devices which are numbered per scsi host, leading -+ * to devno potentially being larger than 0 but with each struct -+ * ata_device having its own struct ata_port and struct ata_link. -+ * To accommodate these, ignore devno and always use device number 0. -+ */ -+ if (likely(!sata_pmp_attached(ap))) { -+ int link_max_devices = ata_link_max_devices(&ap->link); -+ -+ if (link_max_devices == 1) -+ return &ap->link.device[0]; -+ -+ if (devno < link_max_devices) - return &ap->link.device[devno]; -- } else { -- if (likely(devno >= 0 && -- devno < ap->nr_pmp_links)) -- return &ap->pmp_link[devno].device[0]; -+ -+ return NULL; - } - -+ /* -+ * For PMP-attached devices, the device number corresponds to C -+ * (channel) of SCSI [H:C:I:L], indicating the port pmp link -+ * for the device. -+ */ -+ if (devno < ap->nr_pmp_links) -+ return &ap->pmp_link[devno].device[0]; -+ - return NULL; - } - -@@ -2826,8 +2844,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) - goto invalid_fld; - } - -- if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0) -- tf->protocol = ATA_PROT_NCQ_NODATA; -+ if ((cdb[2 + cdb_offset] & 0x3) == 0) { -+ /* -+ * When T_LENGTH is zero (No data is transferred), dir should -+ * be DMA_NONE. -+ */ -+ if (scmd->sc_data_direction != DMA_NONE) { -+ fp = 2 + cdb_offset; -+ goto invalid_fld; -+ } -+ -+ if (ata_is_ncq(tf->protocol)) -+ tf->protocol = ATA_PROT_NCQ_NODATA; -+ } - - /* enable LBA */ - tf->flags |= ATA_TFLAG_LBA; -@@ -3248,6 +3277,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) - case REPORT_LUNS: - case REQUEST_SENSE: - case SYNCHRONIZE_CACHE: -+ case SYNCHRONIZE_CACHE_16: - case REZERO_UNIT: - case SEEK_6: - case SEEK_10: -@@ -3914,6 +3944,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) - return ata_scsi_write_same_xlat; - - case SYNCHRONIZE_CACHE: -+ case SYNCHRONIZE_CACHE_16: - if (ata_try_flush_cache(dev)) - return ata_scsi_flush_xlat; - break; -@@ -3975,44 +4006,51 @@ void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd) - - int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) - { -+ struct ata_port *ap = dev->link->ap; - u8 scsi_op = scmd->cmnd[0]; - ata_xlat_func_t xlat_func; -- int rc = 0; -+ -+ /* -+ * scsi_queue_rq() will defer commands if scsi_host_in_recovery(). -+ * However, this check is done without holding the ap->lock (a libata -+ * specific lock), so we can have received an error irq since then, -+ * therefore we must check if EH is pending, while holding ap->lock. -+ */ -+ if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) -+ return SCSI_MLQUEUE_DEVICE_BUSY; -+ -+ if (unlikely(!scmd->cmd_len)) -+ goto bad_cdb_len; - - if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { -- if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) -+ if (unlikely(scmd->cmd_len > dev->cdb_len)) - goto bad_cdb_len; - - xlat_func = ata_get_xlat_func(dev, scsi_op); -- } else { -- if (unlikely(!scmd->cmd_len)) -- goto bad_cdb_len; -+ } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { -+ /* relay SCSI command to ATAPI device */ -+ int len = COMMAND_SIZE(scsi_op); - -- xlat_func = NULL; -- if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { -- /* relay SCSI command to ATAPI device */ -- int len = COMMAND_SIZE(scsi_op); -- if (unlikely(len > scmd->cmd_len || -- len > dev->cdb_len || -- scmd->cmd_len > ATAPI_CDB_LEN)) -- goto bad_cdb_len; -+ if (unlikely(len > scmd->cmd_len || -+ len > dev->cdb_len || -+ scmd->cmd_len > ATAPI_CDB_LEN)) -+ goto bad_cdb_len; - -- xlat_func = atapi_xlat; -- } else { -- /* ATA_16 passthru, treat as an ATA command */ -- if (unlikely(scmd->cmd_len > 16)) -- goto bad_cdb_len; -+ xlat_func = atapi_xlat; -+ } else { -+ /* ATA_16 passthru, treat as an ATA command */ -+ if (unlikely(scmd->cmd_len > 16)) -+ goto bad_cdb_len; - -- xlat_func = ata_get_xlat_func(dev, scsi_op); -- } -+ xlat_func = ata_get_xlat_func(dev, scsi_op); - } - - if (xlat_func) -- rc = ata_scsi_translate(dev, scmd, xlat_func); -- else -- ata_scsi_simulate(dev, scmd); -+ return ata_scsi_translate(dev, scmd, xlat_func); - -- return rc; -+ ata_scsi_simulate(dev, scmd); -+ -+ return 0; - - bad_cdb_len: - DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", -@@ -4159,6 +4197,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) - * turning this into a no-op. - */ - case SYNCHRONIZE_CACHE: -+ case SYNCHRONIZE_CACHE_16: - fallthrough; - - /* no-op's, complete with success */ -diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c -index b71ea4a680b01..8409e53b7b7a0 100644 ---- a/drivers/ata/libata-sff.c -+++ b/drivers/ata/libata-sff.c -@@ -457,8 +457,8 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) - { - struct ata_ioports *ioaddr = &ap->ioaddr; - -- tf->command = ata_sff_check_status(ap); -- tf->feature = ioread8(ioaddr->error_addr); -+ tf->status = ata_sff_check_status(ap); -+ tf->error = ioread8(ioaddr->error_addr); - tf->nsect = ioread8(ioaddr->nsect_addr); - tf->lbal = ioread8(ioaddr->lbal_addr); - tf->lbam = ioread8(ioaddr->lbam_addr); -@@ -1837,7 +1837,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, - memset(&tf, 0, sizeof(tf)); - - ap->ops->sff_tf_read(ap, &tf); -- err = tf.feature; -+ err = tf.error; - if (r_err) - *r_err = err; - -diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c -index 34bb4608bdc67..60f22e1a4943f 100644 ---- a/drivers/ata/libata-transport.c -+++ b/drivers/ata/libata-transport.c -@@ -196,7 +196,7 @@ static struct { - { XFER_PIO_0, "XFER_PIO_0" }, - { XFER_PIO_SLOW, "XFER_PIO_SLOW" } - }; --ata_bitfield_name_match(xfer,ata_xfer_names) -+ata_bitfield_name_search(xfer, ata_xfer_names) - - /* - * ATA Port attributes -@@ -301,7 +301,9 @@ int ata_tport_add(struct device *parent, - pm_runtime_enable(dev); - pm_runtime_forbid(dev); - -- transport_add_device(dev); -+ error = transport_add_device(dev); -+ if (error) -+ goto tport_transport_add_err; - transport_configure_device(dev); - - error = ata_tlink_add(&ap->link); -@@ -312,12 +314,12 @@ int ata_tport_add(struct device *parent, - - tport_link_err: - transport_remove_device(dev); -+ tport_transport_add_err: - device_del(dev); - - tport_err: - transport_destroy_device(dev); - put_device(dev); -- ata_host_put(ap->host); - return error; - } - -@@ -426,7 +428,9 @@ int ata_tlink_add(struct ata_link *link) - goto tlink_err; - } - -- transport_add_device(dev); -+ error = transport_add_device(dev); -+ if (error) -+ goto tlink_transport_err; - transport_configure_device(dev); - - ata_for_each_dev(ata_dev, link, ALL) { -@@ -441,6 +445,7 @@ int ata_tlink_add(struct ata_link *link) - ata_tdev_delete(ata_dev); - } - transport_remove_device(dev); -+ tlink_transport_err: - device_del(dev); - tlink_err: - transport_destroy_device(dev); -@@ -678,7 +683,13 @@ static int ata_tdev_add(struct ata_device *ata_dev) - return error; - } - -- transport_add_device(dev); -+ error = transport_add_device(dev); -+ if (error) { -+ device_del(dev); -+ ata_tdev_free(ata_dev); -+ return error; -+ } -+ - transport_configure_device(dev); - return 0; - } -diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c -index 63f39440a9b42..4ba02f082f962 100644 ---- a/drivers/ata/pata_arasan_cf.c -+++ b/drivers/ata/pata_arasan_cf.c -@@ -528,7 +528,8 @@ static void data_xfer(struct work_struct *work) - /* dma_request_channel may sleep, so calling from process context */ - acdev->dma_chan = dma_request_chan(acdev->host->dev, "data"); - if (IS_ERR(acdev->dma_chan)) { -- dev_err(acdev->host->dev, "Unable to get dma_chan\n"); -+ dev_err_probe(acdev->host->dev, PTR_ERR(acdev->dma_chan), -+ "Unable to get dma_chan\n"); - acdev->dma_chan = NULL; - goto chan_request_fail; - } -diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c -index 46208ececbb6a..3fc26026014e2 100644 ---- a/drivers/ata/pata_ep93xx.c -+++ b/drivers/ata/pata_ep93xx.c -@@ -416,8 +416,8 @@ static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) - { - struct ep93xx_pata_data *drv_data = ap->host->private_data; - -- tf->command = ep93xx_pata_check_status(ap); -- tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); -+ tf->status = ep93xx_pata_check_status(ap); -+ tf->error = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); - tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT); - tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL); - tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM); -diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c -index 121635aa8c00c..a7745a2be9056 100644 ---- a/drivers/ata/pata_falcon.c -+++ b/drivers/ata/pata_falcon.c -@@ -123,8 +123,8 @@ static int __init pata_falcon_init_one(struct platform_device *pdev) - struct resource *base_res, *ctl_res, *irq_res; - struct ata_host *host; - struct ata_port *ap; -- void __iomem *base; -- int irq = 0; -+ void __iomem *base, *ctl_base; -+ int irq = 0, io_offset = 1, reg_shift = 2; /* Falcon defaults */ - - dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n"); - -@@ -165,26 +165,34 @@ static int __init pata_falcon_init_one(struct platform_device *pdev) - ap->pio_mask = ATA_PIO4; - ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY; - -- base = (void __iomem *)base_mem_res->start; - /* N.B. this assumes data_addr will be used for word-sized I/O only */ -- ap->ioaddr.data_addr = base + 0 + 0 * 4; -- ap->ioaddr.error_addr = base + 1 + 1 * 4; -- ap->ioaddr.feature_addr = base + 1 + 1 * 4; -- ap->ioaddr.nsect_addr = base + 1 + 2 * 4; -- ap->ioaddr.lbal_addr = base + 1 + 3 * 4; -- ap->ioaddr.lbam_addr = base + 1 + 4 * 4; -- ap->ioaddr.lbah_addr = base + 1 + 5 * 4; -- ap->ioaddr.device_addr = base + 1 + 6 * 4; -- ap->ioaddr.status_addr = base + 1 + 7 * 4; -- ap->ioaddr.command_addr = base + 1 + 7 * 4; -- -- base = (void __iomem *)ctl_mem_res->start; -- ap->ioaddr.altstatus_addr = base + 1; -- ap->ioaddr.ctl_addr = base + 1; -- -- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", -- (unsigned long)base_mem_res->start, -- (unsigned long)ctl_mem_res->start); -+ ap->ioaddr.data_addr = (void __iomem *)base_mem_res->start; -+ -+ if (base_res) { /* only Q40 has IO resources */ -+ io_offset = 0x10000; -+ reg_shift = 0; -+ base = (void __iomem *)base_res->start; -+ ctl_base = (void __iomem *)ctl_res->start; -+ } else { -+ base = (void __iomem *)base_mem_res->start; -+ ctl_base = (void __iomem *)ctl_mem_res->start; -+ } -+ -+ ap->ioaddr.error_addr = base + io_offset + (1 << reg_shift); -+ ap->ioaddr.feature_addr = base + io_offset + (1 << reg_shift); -+ ap->ioaddr.nsect_addr = base + io_offset + (2 << reg_shift); -+ ap->ioaddr.lbal_addr = base + io_offset + (3 << reg_shift); -+ ap->ioaddr.lbam_addr = base + io_offset + (4 << reg_shift); -+ ap->ioaddr.lbah_addr = base + io_offset + (5 << reg_shift); -+ ap->ioaddr.device_addr = base + io_offset + (6 << reg_shift); -+ ap->ioaddr.status_addr = base + io_offset + (7 << reg_shift); -+ ap->ioaddr.command_addr = base + io_offset + (7 << reg_shift); -+ -+ ap->ioaddr.altstatus_addr = ctl_base + io_offset; -+ ap->ioaddr.ctl_addr = ctl_base + io_offset; -+ -+ ata_port_desc(ap, "cmd %px ctl %px data %px", -+ base, ctl_base, ap->ioaddr.data_addr); - - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (irq_res && irq_res->start > 0) { -diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c -index 34cb104f6b43e..bc30e2f305beb 100644 ---- a/drivers/ata/pata_ftide010.c -+++ b/drivers/ata/pata_ftide010.c -@@ -570,6 +570,7 @@ static struct platform_driver pata_ftide010_driver = { - }; - module_platform_driver(pata_ftide010_driver); - -+MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010"); - MODULE_AUTHOR("Linus Walleij "); - MODULE_LICENSE("GPL"); - MODULE_ALIAS("platform:" DRV_NAME); -diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c -index f242157bc81bb..9d371859e81ed 100644 ---- a/drivers/ata/pata_hpt37x.c -+++ b/drivers/ata/pata_hpt37x.c -@@ -919,6 +919,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) - irqmask &= ~0x10; - pci_write_config_byte(dev, 0x5a, irqmask); - -+ /* -+ * HPT371 chips physically have only one channel, the secondary one, -+ * but the primary channel registers do exist! Go figure... -+ * So, we manually disable the non-existing channel here -+ * (if the BIOS hasn't done this already). -+ */ -+ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { -+ u8 mcr1; -+ -+ pci_read_config_byte(dev, 0x50, &mcr1); -+ mcr1 &= ~0x04; -+ pci_write_config_byte(dev, 0x50, mcr1); -+ } -+ - /* - * default to pci clock. make sure MA15/16 are set to output - * to prevent drives having problems with 40-pin cables. Needed -@@ -950,14 +964,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) - - if ((freq >> 12) != 0xABCDE) { - int i; -- u8 sr; -+ u16 sr; - u32 total = 0; - - pr_warn("BIOS has not set timing clocks\n"); - - /* This is the process the HPT371 BIOS is reported to use */ - for (i = 0; i < 128; i++) { -- pci_read_config_byte(dev, 0x78, &sr); -+ pci_read_config_word(dev, 0x78, &sr); - total += sr & 0x1FF; - udelay(15); - } -diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c -index 99c63087c8ae9..17b557c91e1c7 100644 ---- a/drivers/ata/pata_ixp4xx_cf.c -+++ b/drivers/ata/pata_ixp4xx_cf.c -@@ -114,7 +114,7 @@ static void ixp4xx_set_piomode(struct ata_port *ap, struct ata_device *adev) - { - struct ixp4xx_pata *ixpp = ap->host->private_data; - -- ata_dev_printk(adev, KERN_INFO, "configured for PIO%d 8bit\n", -+ ata_dev_info(adev, "configured for PIO%d 8bit\n", - adev->pio_mode - XFER_PIO_0); - ixp4xx_set_8bit_timing(ixpp, adev->pio_mode); - } -@@ -132,8 +132,8 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc, - struct ixp4xx_pata *ixpp = ap->host->private_data; - unsigned long flags; - -- ata_dev_printk(adev, KERN_DEBUG, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE", -- buflen); -+ ata_dev_dbg(adev, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE", -+ buflen); - spin_lock_irqsave(ap->lock, flags); - - /* set the expansion bus in 16bit mode and restore -diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c -index 0a8bf09a5c19e..03c580625c2cc 100644 ---- a/drivers/ata/pata_legacy.c -+++ b/drivers/ata/pata_legacy.c -@@ -315,9 +315,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) - outb(inb(0x1F4) & 0x07, 0x1F4); - - rt = inb(0x1F3); -- rt &= 0x07 << (3 * adev->devno); -+ rt &= ~(0x07 << (3 * !adev->devno)); - if (pio) -- rt |= (1 + 3 * pio) << (3 * adev->devno); -+ rt |= (1 + 3 * pio) << (3 * !adev->devno); -+ outb(rt, 0x1F3); - - udelay(100); - outb(inb(0x1F2) | 0x01, 0x1F2); -diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c -index 361597d14c569..d45a75bfc0169 100644 ---- a/drivers/ata/pata_marvell.c -+++ b/drivers/ata/pata_marvell.c -@@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap) - switch(ap->port_no) - { - case 0: -+ if (!ap->ioaddr.bmdma_addr) -+ return ATA_CBL_PATA_UNK; - if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1) - return ATA_CBL_PATA40; - return ATA_CBL_PATA80; -diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c -index f4949e704356e..602472d4e693e 100644 ---- a/drivers/ata/pata_ns87415.c -+++ b/drivers/ata/pata_ns87415.c -@@ -260,12 +260,12 @@ static u8 ns87560_check_status(struct ata_port *ap) - * LOCKING: - * Inherited from caller. - */ --void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) -+static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) - { - struct ata_ioports *ioaddr = &ap->ioaddr; - -- tf->command = ns87560_check_status(ap); -- tf->feature = ioread8(ioaddr->error_addr); -+ tf->status = ns87560_check_status(ap); -+ tf->error = ioread8(ioaddr->error_addr); - tf->nsect = ioread8(ioaddr->nsect_addr); - tf->lbal = ioread8(ioaddr->lbal_addr); - tf->lbam = ioread8(ioaddr->lbam_addr); -diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c -index b5a3f710d76de..6c9f2efcedc11 100644 ---- a/drivers/ata/pata_octeon_cf.c -+++ b/drivers/ata/pata_octeon_cf.c -@@ -386,7 +386,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) - void __iomem *base = ap->ioaddr.data_addr; - - blob = __raw_readw(base + 0xc); -- tf->feature = blob >> 8; -+ tf->error = blob >> 8; - - blob = __raw_readw(base + 2); - tf->nsect = blob & 0xff; -@@ -398,7 +398,7 @@ static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf) - - blob = __raw_readw(base + 6); - tf->device = blob & 0xff; -- tf->command = blob >> 8; -+ tf->status = blob >> 8; - - if (tf->flags & ATA_TFLAG_LBA48) { - if (likely(ap->ioaddr.ctl_addr)) { -@@ -888,12 +888,14 @@ static int octeon_cf_probe(struct platform_device *pdev) - int i; - res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0); - if (!res_dma) { -+ put_device(&dma_dev->dev); - of_node_put(dma_node); - return -EINVAL; - } - cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start, - resource_size(res_dma)); - if (!cf_port->dma_base) { -+ put_device(&dma_dev->dev); - of_node_put(dma_node); - return -EINVAL; - } -@@ -903,6 +905,7 @@ static int octeon_cf_probe(struct platform_device *pdev) - irq = i; - irq_handler = octeon_cf_interrupt; - } -+ put_device(&dma_dev->dev); - } - of_node_put(dma_node); - } -diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c -index 3da0e8e302861..149d771c61d67 100644 ---- a/drivers/ata/pata_samsung_cf.c -+++ b/drivers/ata/pata_samsung_cf.c -@@ -213,7 +213,7 @@ static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf) - { - struct ata_ioports *ioaddr = &ap->ioaddr; - -- tf->feature = ata_inb(ap->host, ioaddr->error_addr); -+ tf->error = ata_inb(ap->host, ioaddr->error_addr); - tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr); - tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr); - tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr); -diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c -index 338c2e50f7591..29e2b0dfba309 100644 ---- a/drivers/ata/sata_dwc_460ex.c -+++ b/drivers/ata/sata_dwc_460ex.c -@@ -145,7 +145,11 @@ struct sata_dwc_device { - #endif - }; - --#define SATA_DWC_QCMD_MAX 32 -+/* -+ * Allow one extra special slot for commands and DMA management -+ * to account for libata internal commands. -+ */ -+#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1) - - struct sata_dwc_device_port { - struct sata_dwc_device *hsdev; -diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c -index e5838b23c9e0a..3b31a4f596d86 100644 ---- a/drivers/ata/sata_fsl.c -+++ b/drivers/ata/sata_fsl.c -@@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host) - return 0; - } - -+static void sata_fsl_host_stop(struct ata_host *host) -+{ -+ struct sata_fsl_host_priv *host_priv = host->private_data; -+ -+ iounmap(host_priv->hcr_base); -+ kfree(host_priv); -+} -+ - /* - * scsi mid-layer and libata interface structures - */ -@@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = { - .port_start = sata_fsl_port_start, - .port_stop = sata_fsl_port_stop, - -+ .host_stop = sata_fsl_host_stop, -+ - .pmp_attach = sata_fsl_pmp_attach, - .pmp_detach = sata_fsl_pmp_detach, - }; -@@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev) - host_priv->ssr_base = ssr_base; - host_priv->csr_base = csr_base; - -- irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); -- if (!irq) { -- dev_err(&ofdev->dev, "invalid irq from platform\n"); -+ irq = platform_get_irq(ofdev, 0); -+ if (irq < 0) { -+ retval = irq; - goto error_exit_with_cleanup; - } - host_priv->irq = irq; -@@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev) - - ata_host_detach(host); - -- irq_dispose_mapping(host_priv->irq); -- iounmap(host_priv->hcr_base); -- kfree(host_priv); -- - return 0; - } - -diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c -index f793564f3d787..6fd54e968d10a 100644 ---- a/drivers/ata/sata_gemini.c -+++ b/drivers/ata/sata_gemini.c -@@ -435,6 +435,7 @@ static struct platform_driver gemini_sata_driver = { - }; - module_platform_driver(gemini_sata_driver); - -+MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge"); - MODULE_AUTHOR("Linus Walleij "); - MODULE_LICENSE("GPL"); - MODULE_ALIAS("platform:" DRV_NAME); -diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c -index 8440203e835ed..f9bb3be4b939e 100644 ---- a/drivers/ata/sata_highbank.c -+++ b/drivers/ata/sata_highbank.c -@@ -400,7 +400,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, - - /* clear D2H reception area to properly wait for D2H FIS */ - ata_tf_init(link->device, &tf); -- tf.command = ATA_BUSY; -+ tf.status = ATA_BUSY; - ata_tf_to_fis(&tf, 0, 0, d2h_fis); - - do { -diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c -index e517bd8822a5f..659f1a903298f 100644 ---- a/drivers/ata/sata_inic162x.c -+++ b/drivers/ata/sata_inic162x.c -@@ -559,13 +559,13 @@ static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) - { - void __iomem *port_base = inic_port_base(ap); - -- tf->feature = readb(port_base + PORT_TF_FEATURE); -+ tf->error = readb(port_base + PORT_TF_FEATURE); - tf->nsect = readb(port_base + PORT_TF_NSECT); - tf->lbal = readb(port_base + PORT_TF_LBAL); - tf->lbam = readb(port_base + PORT_TF_LBAM); - tf->lbah = readb(port_base + PORT_TF_LBAH); - tf->device = readb(port_base + PORT_TF_DEVICE); -- tf->command = readb(port_base + PORT_TF_COMMAND); -+ tf->status = readb(port_base + PORT_TF_COMMAND); - } - - static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) -@@ -582,11 +582,11 @@ static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) - */ - inic_tf_read(qc->ap, &tf); - -- if (!(tf.command & ATA_ERR)) -+ if (!(tf.status & ATA_ERR)) - return false; - -- rtf->command = tf.command; -- rtf->feature = tf.feature; -+ rtf->status = tf.status; -+ rtf->error = tf.error; - return true; - } - -diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c -index 44b0ed8f6bb8a..9759e24f718fc 100644 ---- a/drivers/ata/sata_rcar.c -+++ b/drivers/ata/sata_rcar.c -@@ -417,8 +417,8 @@ static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf) - { - struct ata_ioports *ioaddr = &ap->ioaddr; - -- tf->command = sata_rcar_check_status(ap); -- tf->feature = ioread32(ioaddr->error_addr); -+ tf->status = sata_rcar_check_status(ap); -+ tf->error = ioread32(ioaddr->error_addr); - tf->nsect = ioread32(ioaddr->nsect_addr); - tf->lbal = ioread32(ioaddr->lbal_addr); - tf->lbam = ioread32(ioaddr->lbam_addr); -diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c -index f8552559db7f5..2e3418a82b445 100644 ---- a/drivers/ata/sata_svw.c -+++ b/drivers/ata/sata_svw.c -@@ -194,24 +194,24 @@ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) - static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) - { - struct ata_ioports *ioaddr = &ap->ioaddr; -- u16 nsect, lbal, lbam, lbah, feature; -+ u16 nsect, lbal, lbam, lbah, error; - -- tf->command = k2_stat_check_status(ap); -+ tf->status = k2_stat_check_status(ap); - tf->device = readw(ioaddr->device_addr); -- feature = readw(ioaddr->error_addr); -+ error = readw(ioaddr->error_addr); - nsect = readw(ioaddr->nsect_addr); - lbal = readw(ioaddr->lbal_addr); - lbam = readw(ioaddr->lbam_addr); - lbah = readw(ioaddr->lbah_addr); - -- tf->feature = feature; -+ tf->error = error; - tf->nsect = nsect; - tf->lbal = lbal; - tf->lbam = lbam; - tf->lbah = lbah; - - if (tf->flags & ATA_TFLAG_LBA48) { -- tf->hob_feature = feature >> 8; -+ tf->hob_feature = error >> 8; - tf->hob_nsect = nsect >> 8; - tf->hob_lbal = lbal >> 8; - tf->hob_lbam = lbam >> 8; -diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c -index 8fa952cb9f7f4..87e4ed66b3064 100644 ---- a/drivers/ata/sata_vsc.c -+++ b/drivers/ata/sata_vsc.c -@@ -183,24 +183,24 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) - static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) - { - struct ata_ioports *ioaddr = &ap->ioaddr; -- u16 nsect, lbal, lbam, lbah, feature; -+ u16 nsect, lbal, lbam, lbah, error; - -- tf->command = ata_sff_check_status(ap); -+ tf->status = ata_sff_check_status(ap); - tf->device = readw(ioaddr->device_addr); -- feature = readw(ioaddr->error_addr); -+ error = readw(ioaddr->error_addr); - nsect = readw(ioaddr->nsect_addr); - lbal = readw(ioaddr->lbal_addr); - lbam = readw(ioaddr->lbam_addr); - lbah = readw(ioaddr->lbah_addr); - -- tf->feature = feature; -+ tf->error = error; - tf->nsect = nsect; - tf->lbal = lbal; - tf->lbam = lbam; - tf->lbah = lbah; - - if (tf->flags & ATA_TFLAG_LBA48) { -- tf->hob_feature = feature >> 8; -+ tf->hob_feature = error >> 8; - tf->hob_nsect = nsect >> 8; - tf->hob_lbal = lbal >> 8; - tf->hob_lbam = lbam >> 8; -diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c -index 422753d52244b..a31ffe16e626f 100644 ---- a/drivers/atm/eni.c -+++ b/drivers/atm/eni.c -@@ -1112,6 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags); - skb_data3 = skb->data[3]; - paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len, - DMA_TO_DEVICE); -+ if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr)) -+ return enq_next; - ENI_PRV_PADDR(skb) = paddr; - /* prepare DMA queue entries */ - j = 0; -diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c -index 3bc3c314a467b..4f67404fe64c7 100644 ---- a/drivers/atm/firestream.c -+++ b/drivers/atm/firestream.c -@@ -1676,6 +1676,8 @@ static int fs_init(struct fs_dev *dev) - dev->hw_base = pci_resource_start(pci_dev, 0); - - dev->base = ioremap(dev->hw_base, 0x1000); -+ if (!dev->base) -+ return 1; - - reset_chip (dev); - -diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c -index 81ce81a75fc67..49cb4537344aa 100644 ---- a/drivers/atm/idt77252.c -+++ b/drivers/atm/idt77252.c -@@ -2909,6 +2909,7 @@ close_card_oam(struct idt77252_dev *card) - - recycle_rx_pool_skb(card, &vc->rcv.rx_pool); - } -+ kfree(vc); - } - } - } -@@ -2952,6 +2953,15 @@ open_card_ubr0(struct idt77252_dev *card) - return 0; - } - -+static void -+close_card_ubr0(struct idt77252_dev *card) -+{ -+ struct vc_map *vc = card->vcs[0]; -+ -+ free_scq(card, vc->scq); -+ kfree(vc); -+} -+ - static int - idt77252_dev_open(struct idt77252_dev *card) - { -@@ -3001,6 +3011,7 @@ static void idt77252_dev_close(struct atm_dev *dev) - struct idt77252_dev *card = dev->dev_data; - u32 conf; - -+ close_card_ubr0(card); - close_card_oam(card); - - conf = SAR_CFG_RXPTH | /* enable receive path */ -@@ -3752,6 +3763,7 @@ static void __exit idt77252_exit(void) - card = idt77252_chain; - dev = card->atmdev; - idt77252_chain = card->next; -+ del_timer_sync(&card->tst_timer); - - if (dev->phy->stop) - dev->phy->stop(dev); -diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c -index 304accde365c8..6c010d4efa4ae 100644 ---- a/drivers/auxdisplay/charlcd.c -+++ b/drivers/auxdisplay/charlcd.c -@@ -578,6 +578,9 @@ static int charlcd_init(struct charlcd *lcd) - * Since charlcd_init_display() needs to write data, we have to - * enable mark the LCD initialized just before. - */ -+ if (WARN_ON(!lcd->ops->init_display)) -+ return -EINVAL; -+ - ret = lcd->ops->init_display(lcd); - if (ret) - return ret; -diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c -index 8b2a0eb3f32a4..d56a5d508ccd7 100644 ---- a/drivers/auxdisplay/hd44780.c -+++ b/drivers/auxdisplay/hd44780.c -@@ -322,8 +322,10 @@ fail1: - static int hd44780_remove(struct platform_device *pdev) - { - struct charlcd *lcd = platform_get_drvdata(pdev); -+ struct hd44780_common *hdc = lcd->drvdata; - - charlcd_unregister(lcd); -+ kfree(hdc->hd44780); - kfree(lcd->drvdata); - - kfree(lcd); -diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c -index 1e69cc6d21a0d..ed58083499907 100644 ---- a/drivers/auxdisplay/ht16k33.c -+++ b/drivers/auxdisplay/ht16k33.c -@@ -219,6 +219,15 @@ static const struct backlight_ops ht16k33_bl_ops = { - .check_fb = ht16k33_bl_check_fb, - }; - -+/* -+ * Blank events will be passed to the actual device handling the backlight when -+ * we return zero here. -+ */ -+static int ht16k33_blank(int blank, struct fb_info *info) -+{ -+ return 0; -+} -+ - static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma) - { - struct ht16k33_priv *priv = info->par; -@@ -231,6 +240,7 @@ static const struct fb_ops ht16k33_fb_ops = { - .owner = THIS_MODULE, - .fb_read = fb_sys_read, - .fb_write = fb_sys_write, -+ .fb_blank = ht16k33_blank, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, -@@ -413,6 +423,33 @@ static int ht16k33_probe(struct i2c_client *client, - if (err) - return err; - -+ /* Backlight */ -+ memset(&bl_props, 0, sizeof(struct backlight_properties)); -+ bl_props.type = BACKLIGHT_RAW; -+ bl_props.max_brightness = MAX_BRIGHTNESS; -+ -+ bl = devm_backlight_device_register(&client->dev, DRIVER_NAME"-bl", -+ &client->dev, priv, -+ &ht16k33_bl_ops, &bl_props); -+ if (IS_ERR(bl)) { -+ dev_err(&client->dev, "failed to register backlight\n"); -+ return PTR_ERR(bl); -+ } -+ -+ err = of_property_read_u32(node, "default-brightness-level", -+ &dft_brightness); -+ if (err) { -+ dft_brightness = MAX_BRIGHTNESS; -+ } else if (dft_brightness > MAX_BRIGHTNESS) { -+ dev_warn(&client->dev, -+ "invalid default brightness level: %u, using %u\n", -+ dft_brightness, MAX_BRIGHTNESS); -+ dft_brightness = MAX_BRIGHTNESS; -+ } -+ -+ bl->props.brightness = dft_brightness; -+ ht16k33_bl_update_status(bl); -+ - /* Framebuffer (2 bytes per column) */ - BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE); - fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL); -@@ -445,6 +482,7 @@ static int ht16k33_probe(struct i2c_client *client, - fbdev->info->screen_size = HT16K33_FB_SIZE; - fbdev->info->fix = ht16k33_fb_fix; - fbdev->info->var = ht16k33_fb_var; -+ fbdev->info->bl_dev = bl; - fbdev->info->pseudo_palette = NULL; - fbdev->info->flags = FBINFO_FLAG_DEFAULT; - fbdev->info->par = priv; -@@ -460,34 +498,6 @@ static int ht16k33_probe(struct i2c_client *client, - goto err_fbdev_unregister; - } - -- /* Backlight */ -- memset(&bl_props, 0, sizeof(struct backlight_properties)); -- bl_props.type = BACKLIGHT_RAW; -- bl_props.max_brightness = MAX_BRIGHTNESS; -- -- bl = devm_backlight_device_register(&client->dev, DRIVER_NAME"-bl", -- &client->dev, priv, -- &ht16k33_bl_ops, &bl_props); -- if (IS_ERR(bl)) { -- dev_err(&client->dev, "failed to register backlight\n"); -- err = PTR_ERR(bl); -- goto err_fbdev_unregister; -- } -- -- err = of_property_read_u32(node, "default-brightness-level", -- &dft_brightness); -- if (err) { -- dft_brightness = MAX_BRIGHTNESS; -- } else if (dft_brightness > MAX_BRIGHTNESS) { -- dev_warn(&client->dev, -- "invalid default brightness level: %u, using %u\n", -- dft_brightness, MAX_BRIGHTNESS); -- dft_brightness = MAX_BRIGHTNESS; -- } -- -- bl->props.brightness = dft_brightness; -- ht16k33_bl_update_status(bl); -- - ht16k33_fb_queue(priv); - return 0; - -diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c -index 1cce409ce5cac..e33ce0151cdfd 100644 ---- a/drivers/auxdisplay/img-ascii-lcd.c -+++ b/drivers/auxdisplay/img-ascii-lcd.c -@@ -280,6 +280,16 @@ static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx, - if (msg[count - 1] == '\n') - count--; - -+ if (!count) { -+ /* clear the LCD */ -+ devm_kfree(&ctx->pdev->dev, ctx->message); -+ ctx->message = NULL; -+ ctx->message_len = 0; -+ memset(ctx->curr, ' ', ctx->cfg->num_chars); -+ ctx->cfg->update(ctx); -+ return 0; -+ } -+ - new_msg = devm_kmalloc(&ctx->pdev->dev, count + 1, GFP_KERNEL); - if (!new_msg) - return -ENOMEM; -diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c -index 38ba08628ccb3..2578b2d454397 100644 ---- a/drivers/auxdisplay/lcd2s.c -+++ b/drivers/auxdisplay/lcd2s.c -@@ -238,7 +238,7 @@ static int lcd2s_redefine_char(struct charlcd *lcd, char *esc) - if (buf[1] > 7) - return 1; - -- i = 0; -+ i = 2; - shift = 0; - value = 0; - while (*esc && i < LCD2S_CHARACTER_SIZE + 2) { -@@ -298,6 +298,10 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, - I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) - return -EIO; - -+ lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL); -+ if (!lcd2s) -+ return -ENOMEM; -+ - /* Test, if the display is responding */ - err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF); - if (err < 0) -@@ -307,12 +311,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, - if (!lcd) - return -ENOMEM; - -- lcd2s = kzalloc(sizeof(struct lcd2s_data), GFP_KERNEL); -- if (!lcd2s) { -- err = -ENOMEM; -- goto fail1; -- } -- - lcd->drvdata = lcd2s; - lcd2s->i2c = i2c; - lcd2s->charlcd = lcd; -@@ -321,26 +319,24 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c, - err = device_property_read_u32(&i2c->dev, "display-height-chars", - &lcd->height); - if (err) -- goto fail2; -+ goto fail1; - - err = device_property_read_u32(&i2c->dev, "display-width-chars", - &lcd->width); - if (err) -- goto fail2; -+ goto fail1; - - lcd->ops = &lcd2s_ops; - - err = charlcd_register(lcd2s->charlcd); - if (err) -- goto fail2; -+ goto fail1; - - i2c_set_clientdata(i2c, lcd2s); - return 0; - --fail2: -- kfree(lcd2s); - fail1: -- kfree(lcd); -+ charlcd_free(lcd2s->charlcd); - return err; - } - -@@ -349,7 +345,7 @@ static int lcd2s_i2c_remove(struct i2c_client *i2c) - struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c); - - charlcd_unregister(lcd2s->charlcd); -- kfree(lcd2s->charlcd); -+ charlcd_free(lcd2s->charlcd); - return 0; - } - -diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c -index 43407665918f3..31bd6f4e5dc47 100644 ---- a/drivers/base/arch_topology.c -+++ b/drivers/base/arch_topology.c -@@ -609,7 +609,7 @@ void update_siblings_masks(unsigned int cpuid) - for_each_online_cpu(cpu) { - cpu_topo = &cpu_topology[cpu]; - -- if (cpuid_topo->llc_id == cpu_topo->llc_id) { -+ if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) { - cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); - cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); - } -@@ -690,4 +690,23 @@ void __init init_cpu_topology(void) - else if (of_have_populated_dt() && parse_dt_topology()) - reset_cpu_topology(); - } -+ -+void store_cpu_topology(unsigned int cpuid) -+{ -+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; -+ -+ if (cpuid_topo->package_id != -1) -+ goto topology_populated; -+ -+ cpuid_topo->thread_id = -1; -+ cpuid_topo->core_id = cpuid; -+ cpuid_topo->package_id = cpu_to_node(cpuid); -+ -+ pr_debug("CPU%u: package %d core %d thread %d\n", -+ cpuid, cpuid_topo->package_id, cpuid_topo->core_id, -+ cpuid_topo->thread_id); -+ -+topology_populated: -+ update_siblings_masks(cpuid); -+} - #endif -diff --git a/drivers/base/bus.c b/drivers/base/bus.c -index bdc98c5713d5e..d171535fc18f5 100644 ---- a/drivers/base/bus.c -+++ b/drivers/base/bus.c -@@ -617,7 +617,7 @@ int bus_add_driver(struct device_driver *drv) - if (drv->bus->p->drivers_autoprobe) { - error = driver_attach(drv); - if (error) -- goto out_unregister; -+ goto out_del_list; - } - module_add_driver(drv->owner, drv); - -@@ -644,6 +644,8 @@ int bus_add_driver(struct device_driver *drv) - - return 0; - -+out_del_list: -+ klist_del(&priv->knode_bus); - out_unregister: - kobject_put(&priv->kobj); - /* drv->p is freed in driver_release() */ -diff --git a/drivers/base/class.c b/drivers/base/class.c -index 7476f393df977..0e44a68e90a02 100644 ---- a/drivers/base/class.c -+++ b/drivers/base/class.c -@@ -192,6 +192,11 @@ int __class_register(struct class *cls, struct lock_class_key *key) - } - error = class_add_groups(class_get(cls), cls->class_groups); - class_put(cls); -+ if (error) { -+ kobject_del(&cp->subsys.kobj); -+ kfree_const(cp->subsys.kobj.name); -+ kfree(cp); -+ } - return error; - } - EXPORT_SYMBOL_GPL(__class_register); -diff --git a/drivers/base/component.c b/drivers/base/component.c -index 5e79299f6c3ff..058f1a2cb2a9a 100644 ---- a/drivers/base/component.c -+++ b/drivers/base/component.c -@@ -130,7 +130,7 @@ static void component_master_debugfs_add(struct master *m) - - static void component_master_debugfs_del(struct master *m) - { -- debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir)); -+ debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir); - } - - #else -@@ -246,7 +246,7 @@ static int try_to_bring_up_master(struct master *master, - return 0; - } - -- if (!devres_open_group(master->parent, NULL, GFP_KERNEL)) -+ if (!devres_open_group(master->parent, master, GFP_KERNEL)) - return -ENOMEM; - - /* Found all components */ -@@ -258,6 +258,7 @@ static int try_to_bring_up_master(struct master *master, - return ret; - } - -+ devres_close_group(master->parent, NULL); - master->bound = true; - return 1; - } -@@ -282,7 +283,7 @@ static void take_down_master(struct master *master) - { - if (master->bound) { - master->ops->unbind(master->parent); -- devres_release_group(master->parent, NULL); -+ devres_release_group(master->parent, master); - master->bound = false; - } - } -diff --git a/drivers/base/core.c b/drivers/base/core.c -index 249da496581a0..adf003a7e8d6a 100644 ---- a/drivers/base/core.c -+++ b/drivers/base/core.c -@@ -485,8 +485,8 @@ static void device_link_release_fn(struct work_struct *work) - /* Ensure that all references to the link object have been dropped. */ - device_link_synchronize_removal(); - -- while (refcount_dec_not_one(&link->rpm_active)) -- pm_runtime_put(link->supplier); -+ pm_runtime_release_supplier(link); -+ pm_request_idle(link->supplier); - - put_device(link->consumer); - put_device(link->supplier); -@@ -821,9 +821,7 @@ struct device_link *device_link_add(struct device *consumer, - dev_bus_name(supplier), dev_name(supplier), - dev_bus_name(consumer), dev_name(consumer)); - if (device_register(&link->link_dev)) { -- put_device(consumer); -- put_device(supplier); -- kfree(link); -+ put_device(&link->link_dev); - link = NULL; - goto out; - } -@@ -3330,7 +3328,7 @@ int device_add(struct device *dev) - /* we require the name to be set before, and pass NULL */ - error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); - if (error) { -- glue_dir = get_glue_dir(dev); -+ glue_dir = kobj; - goto Error; - } - -@@ -3430,6 +3428,7 @@ done: - device_pm_remove(dev); - dpm_sysfs_remove(dev); - DPMError: -+ dev->driver = NULL; - bus_remove_device(dev); - BusError: - device_remove_attrs(dev); -diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c -index 5fc258073bc75..46430cf2401e7 100644 ---- a/drivers/base/cpu.c -+++ b/drivers/base/cpu.c -@@ -487,7 +487,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = { - bool cpu_is_hotpluggable(unsigned int cpu) - { - struct device *dev = get_cpu_device(cpu); -- return dev && container_of(dev, struct cpu, dev)->hotpluggable; -+ return dev && container_of(dev, struct cpu, dev)->hotpluggable -+ && tick_nohz_cpu_hotpluggable(cpu); - } - EXPORT_SYMBOL_GPL(cpu_is_hotpluggable); - -@@ -564,6 +565,30 @@ ssize_t __weak cpu_show_srbds(struct device *dev, - return sysfs_emit(buf, "Not affected\n"); - } - -+ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return sysfs_emit(buf, "Not affected\n"); -+} -+ -+ssize_t __weak cpu_show_retbleed(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return sysfs_emit(buf, "Not affected\n"); -+} -+ -+ssize_t __weak cpu_show_gds(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return sysfs_emit(buf, "Not affected\n"); -+} -+ -+ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return sysfs_emit(buf, "Not affected\n"); -+} -+ - static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); - static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); - static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); -@@ -573,6 +598,10 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); - static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); - static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); - static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); -+static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); -+static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); -+static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); -+static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); - - static struct attribute *cpu_root_vulnerabilities_attrs[] = { - &dev_attr_meltdown.attr, -@@ -584,6 +613,10 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { - &dev_attr_tsx_async_abort.attr, - &dev_attr_itlb_multihit.attr, - &dev_attr_srbds.attr, -+ &dev_attr_mmio_stale_data.attr, -+ &dev_attr_retbleed.attr, -+ &dev_attr_gather_data_sampling.attr, -+ &dev_attr_spec_rstack_overflow.attr, - NULL - }; - -diff --git a/drivers/base/dd.c b/drivers/base/dd.c -index 68ea1f949daa9..ab0b2eb5fa07f 100644 ---- a/drivers/base/dd.c -+++ b/drivers/base/dd.c -@@ -257,7 +257,6 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs); - - int driver_deferred_probe_timeout; - EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout); --static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue); - - static int __init deferred_probe_timeout_setup(char *str) - { -@@ -296,6 +295,7 @@ int driver_deferred_probe_check_state(struct device *dev) - - return -EPROBE_DEFER; - } -+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state); - - static void deferred_probe_timeout_work_func(struct work_struct *work) - { -@@ -311,7 +311,6 @@ static void deferred_probe_timeout_work_func(struct work_struct *work) - list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) - dev_info(p->device, "deferred probe pending\n"); - mutex_unlock(&deferred_probe_mutex); -- wake_up_all(&probe_timeout_waitqueue); - } - static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); - -@@ -353,7 +352,7 @@ late_initcall(deferred_probe_initcall); - - static void __exit deferred_probe_exit(void) - { -- debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL)); -+ debugfs_lookup_and_remove("devices_deferred", NULL); - } - __exitcall(deferred_probe_exit); - -@@ -629,6 +628,9 @@ re_probe: - drv->remove(dev); - - devres_release_all(dev); -+ arch_teardown_dma_ops(dev); -+ kfree(dev->dma_range_map); -+ dev->dma_range_map = NULL; - driver_sysfs_remove(dev); - dev->driver = NULL; - dev_set_drvdata(dev, NULL); -@@ -688,7 +690,12 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv) - calltime = ktime_get(); - ret = really_probe(dev, drv); - rettime = ktime_get(); -- pr_debug("probe of %s returned %d after %lld usecs\n", -+ /* -+ * Don't change this to pr_debug() because that requires -+ * CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the -+ * kernel commandline to print this all the time at the debug level. -+ */ -+ printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n", - dev_name(dev), ret, ktime_us_delta(rettime, calltime)); - return ret; - } -@@ -715,9 +722,6 @@ int driver_probe_done(void) - */ - void wait_for_device_probe(void) - { -- /* wait for probe timeout */ -- wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout); -- - /* wait for the deferred probe workqueue to finish */ - flush_work(&deferred_probe_work); - -@@ -806,7 +810,7 @@ static int __init save_async_options(char *buf) - pr_warn("Too long list of driver names for 'driver_async_probe'!\n"); - - strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); -- return 0; -+ return 1; - } - __setup("driver_async_probe=", save_async_options); - -@@ -878,6 +882,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) - dev_dbg(dev, "Device match requests probe deferral\n"); - dev->can_match = true; - driver_deferred_probe_add(dev); -+ /* -+ * Device can't match with a driver right now, so don't attempt -+ * to match or bind with other drivers on the bus. -+ */ -+ return ret; - } else if (ret < 0) { - dev_dbg(dev, "Bus failed to match device: %d\n", ret); - return ret; -@@ -940,6 +949,7 @@ out_unlock: - static int __device_attach(struct device *dev, bool allow_async) - { - int ret = 0; -+ bool async = false; - - device_lock(dev); - if (dev->p->dead) { -@@ -978,7 +988,7 @@ static int __device_attach(struct device *dev, bool allow_async) - */ - dev_dbg(dev, "scheduling asynchronous probe\n"); - get_device(dev); -- async_schedule_dev(__device_attach_async_helper, dev); -+ async = true; - } else { - pm_request_idle(dev); - } -@@ -988,6 +998,8 @@ static int __device_attach(struct device *dev, bool allow_async) - } - out_unlock: - device_unlock(dev); -+ if (async) -+ async_schedule_dev(__device_attach_async_helper, dev); - return ret; - } - -@@ -1092,6 +1104,7 @@ static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie) - static int __driver_attach(struct device *dev, void *data) - { - struct device_driver *drv = data; -+ bool async = false; - int ret; - - /* -@@ -1112,9 +1125,18 @@ static int __driver_attach(struct device *dev, void *data) - dev_dbg(dev, "Device match requests probe deferral\n"); - dev->can_match = true; - driver_deferred_probe_add(dev); -+ /* -+ * Driver could not match with device, but may match with -+ * another device on the bus. -+ */ -+ return 0; - } else if (ret < 0) { - dev_dbg(dev, "Bus failed to match device: %d\n", ret); -- return ret; -+ /* -+ * Driver could not match with device, but may match with -+ * another device on the bus. -+ */ -+ return 0; - } /* ret > 0 means positive match */ - - if (driver_allows_async_probing(drv)) { -@@ -1130,9 +1152,11 @@ static int __driver_attach(struct device *dev, void *data) - if (!dev->driver) { - get_device(dev); - dev->p->async_driver = drv; -- async_schedule_dev(__driver_attach_async_helper, dev); -+ async = true; - } - device_unlock(dev); -+ if (async) -+ async_schedule_dev(__driver_attach_async_helper, dev); - return 0; - } - -@@ -1208,6 +1232,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) - - devres_release_all(dev); - arch_teardown_dma_ops(dev); -+ kfree(dev->dma_range_map); -+ dev->dma_range_map = NULL; - dev->driver = NULL; - dev_set_drvdata(dev, NULL); - if (dev->pm_domain && dev->pm_domain->dismiss) -diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c -index 8be352ab4ddbf..fa13ad49d2116 100644 ---- a/drivers/base/devtmpfs.c -+++ b/drivers/base/devtmpfs.c -@@ -59,8 +59,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla - const char *dev_name, void *data) - { - struct super_block *s = mnt->mnt_sb; -+ int err; -+ - atomic_inc(&s->s_active); - down_write(&s->s_umount); -+ err = reconfigure_single(s, flags, data); -+ if (err < 0) { -+ deactivate_locked_super(s); -+ return ERR_PTR(err); -+ } - return dget(s->s_root); - } - -diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c -index bdbedc6660a87..04ede46f75123 100644 ---- a/drivers/base/firmware_loader/main.c -+++ b/drivers/base/firmware_loader/main.c -@@ -100,12 +100,15 @@ static struct firmware_cache fw_cache; - extern struct builtin_fw __start_builtin_fw[]; - extern struct builtin_fw __end_builtin_fw[]; - --static void fw_copy_to_prealloc_buf(struct firmware *fw, -+static bool fw_copy_to_prealloc_buf(struct firmware *fw, - void *buf, size_t size) - { -- if (!buf || size < fw->size) -- return; -+ if (!buf) -+ return true; -+ if (size < fw->size) -+ return false; - memcpy(buf, fw->data, fw->size); -+ return true; - } - - static bool fw_get_builtin_firmware(struct firmware *fw, const char *name, -@@ -117,9 +120,7 @@ static bool fw_get_builtin_firmware(struct firmware *fw, const char *name, - if (strcmp(name, b_fw->name) == 0) { - fw->size = b_fw->size; - fw->data = b_fw->data; -- fw_copy_to_prealloc_buf(fw, buf, size); -- -- return true; -+ return fw_copy_to_prealloc_buf(fw, buf, size); - } - } - -@@ -794,6 +795,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, - size_t offset, u32 opt_flags) - { - struct firmware *fw = NULL; -+ struct cred *kern_cred = NULL; -+ const struct cred *old_cred; - bool nondirect = false; - int ret; - -@@ -810,6 +813,18 @@ _request_firmware(const struct firmware **firmware_p, const char *name, - if (ret <= 0) /* error or already assigned */ - goto out; - -+ /* -+ * We are about to try to access the firmware file. Because we may have been -+ * called by a driver when serving an unrelated request from userland, we use -+ * the kernel credentials to read the file. -+ */ -+ kern_cred = prepare_kernel_cred(NULL); -+ if (!kern_cred) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ old_cred = override_creds(kern_cred); -+ - ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL); - - /* Only full reads can support decompression, platform, and sysfs. */ -@@ -835,6 +850,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name, - } else - ret = assign_fw(fw, device); - -+ revert_creds(old_cred); -+ put_cred(kern_cred); -+ - out: - if (ret < 0) { - fw_abort_batch_reqs(fw); -diff --git a/drivers/base/init.c b/drivers/base/init.c -index a9f57c22fb9e2..dab8aa5d28889 100644 ---- a/drivers/base/init.c -+++ b/drivers/base/init.c -@@ -8,6 +8,7 @@ - #include - #include - #include -+#include - - #include "base.h" - -@@ -20,6 +21,7 @@ - void __init driver_init(void) - { - /* These are the core pieces */ -+ bdi_init(&noop_backing_dev_info); - devtmpfs_init(); - devices_init(); - buses_init(); -diff --git a/drivers/base/memory.c b/drivers/base/memory.c -index 365cd4a7f2397..c778d1df74557 100644 ---- a/drivers/base/memory.c -+++ b/drivers/base/memory.c -@@ -555,6 +555,8 @@ static ssize_t hard_offline_page_store(struct device *dev, - return -EINVAL; - pfn >>= PAGE_SHIFT; - ret = memory_failure(pfn, 0); -+ if (ret == -EOPNOTSUPP) -+ ret = 0; - return ret ? ret : count; - } - -@@ -634,10 +636,9 @@ int register_memory(struct memory_block *memory) - } - ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory, - GFP_KERNEL)); -- if (ret) { -- put_device(&memory->dev); -+ if (ret) - device_unregister(&memory->dev); -- } -+ - return ret; - } - -@@ -663,14 +664,16 @@ static int init_memory_block(unsigned long block_id, unsigned long state, - mem->nr_vmemmap_pages = nr_vmemmap_pages; - INIT_LIST_HEAD(&mem->group_next); - -+ ret = register_memory(mem); -+ if (ret) -+ return ret; -+ - if (group) { - mem->group = group; - list_add(&mem->group_next, &group->memory_blocks); - } - -- ret = register_memory(mem); -- -- return ret; -+ return 0; - } - - static int add_memory_block(unsigned long base_section_nr) -diff --git a/drivers/base/node.c b/drivers/base/node.c -index c56d34f8158f7..5366d1b5359c8 100644 ---- a/drivers/base/node.c -+++ b/drivers/base/node.c -@@ -45,7 +45,7 @@ static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj, - return n; - } - --static BIN_ATTR_RO(cpumap, 0); -+static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES); - - static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, -@@ -66,7 +66,7 @@ static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj, - return n; - } - --static BIN_ATTR_RO(cpulist, 0); -+static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES); - - /** - * struct node_access_nodes - Access class device to hold user visible -@@ -679,6 +679,7 @@ static int register_node(struct node *node, int num) - */ - void unregister_node(struct node *node) - { -+ compaction_unregister_node(node); - hugetlb_unregister_node(node); /* no-op, if memoryless node */ - node_remove_accesses(node); - node_remove_caches(node); -diff --git a/drivers/base/platform.c b/drivers/base/platform.c -index 652531f67135a..ac5cf1a8d79ab 100644 ---- a/drivers/base/platform.c -+++ b/drivers/base/platform.c -@@ -1427,7 +1427,9 @@ static void platform_remove(struct device *_dev) - struct platform_driver *drv = to_platform_driver(_dev->driver); - struct platform_device *dev = to_platform_device(_dev); - -- if (drv->remove) { -+ if (drv->remove_new) { -+ drv->remove_new(dev); -+ } else if (drv->remove) { - int ret = drv->remove(dev); - - if (ret) -diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c -index 5db704f02e712..6ffee01e174da 100644 ---- a/drivers/base/power/domain.c -+++ b/drivers/base/power/domain.c -@@ -217,10 +217,10 @@ static void genpd_debug_add(struct generic_pm_domain *genpd); - - static void genpd_debug_remove(struct generic_pm_domain *genpd) - { -- struct dentry *d; -+ if (!genpd_debugfs_dir) -+ return; - -- d = debugfs_lookup(genpd->name, genpd_debugfs_dir); -- debugfs_remove(d); -+ debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir); - } - - static void genpd_update_accounting(struct generic_pm_domain *genpd) -@@ -1978,6 +1978,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, - genpd->device_count = 0; - genpd->max_off_time_ns = -1; - genpd->max_off_time_changed = true; -+ genpd->next_wakeup = KTIME_MAX; - genpd->provider = NULL; - genpd->has_provider = false; - genpd->accounting_time = ktime_get(); -@@ -2058,9 +2059,9 @@ static int genpd_remove(struct generic_pm_domain *genpd) - kfree(link); - } - -- genpd_debug_remove(genpd); - list_del(&genpd->gpd_list_node); - genpd_unlock(genpd); -+ genpd_debug_remove(genpd); - cancel_work_sync(&genpd->power_off_work); - if (genpd_is_cpu_domain(genpd)) - free_cpumask_var(genpd->cpus); -@@ -2859,10 +2860,10 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, - - err = of_property_read_u32(state_node, "min-residency-us", &residency); - if (!err) -- genpd_state->residency_ns = 1000 * residency; -+ genpd_state->residency_ns = 1000LL * residency; - -- genpd_state->power_on_latency_ns = 1000 * exit_latency; -- genpd_state->power_off_latency_ns = 1000 * entry_latency; -+ genpd_state->power_on_latency_ns = 1000LL * exit_latency; -+ genpd_state->power_off_latency_ns = 1000LL * entry_latency; - genpd_state->fwnode = &state_node->fwnode; - - return 0; -@@ -2885,6 +2886,10 @@ static int genpd_iterate_idle_states(struct device_node *dn, - np = it.node; - if (!of_match_node(idle_state_match, np)) - continue; -+ -+ if (!of_device_is_available(np)) -+ continue; -+ - if (states) { - ret = genpd_parse_state(&states[i], np); - if (ret) { -diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c -index cbea78e79f3df..8c4819fe73d4c 100644 ---- a/drivers/base/power/main.c -+++ b/drivers/base/power/main.c -@@ -711,6 +711,7 @@ static void dpm_noirq_resume_devices(pm_message_t state) - dev = to_device(dpm_noirq_list.next); - get_device(dev); - list_move_tail(&dev->power.entry, &dpm_late_early_list); -+ - mutex_unlock(&dpm_list_mtx); - - if (!is_async(dev)) { -@@ -725,8 +726,9 @@ static void dpm_noirq_resume_devices(pm_message_t state) - } - } - -- mutex_lock(&dpm_list_mtx); - put_device(dev); -+ -+ mutex_lock(&dpm_list_mtx); - } - mutex_unlock(&dpm_list_mtx); - async_synchronize_full(); -@@ -852,6 +854,7 @@ void dpm_resume_early(pm_message_t state) - dev = to_device(dpm_late_early_list.next); - get_device(dev); - list_move_tail(&dev->power.entry, &dpm_suspended_list); -+ - mutex_unlock(&dpm_list_mtx); - - if (!is_async(dev)) { -@@ -865,8 +868,10 @@ void dpm_resume_early(pm_message_t state) - pm_dev_err(dev, state, " early", error); - } - } -- mutex_lock(&dpm_list_mtx); -+ - put_device(dev); -+ -+ mutex_lock(&dpm_list_mtx); - } - mutex_unlock(&dpm_list_mtx); - async_synchronize_full(); -@@ -1029,7 +1034,12 @@ void dpm_resume(pm_message_t state) - } - if (!list_empty(&dev->power.entry)) - list_move_tail(&dev->power.entry, &dpm_prepared_list); -+ -+ mutex_unlock(&dpm_list_mtx); -+ - put_device(dev); -+ -+ mutex_lock(&dpm_list_mtx); - } - mutex_unlock(&dpm_list_mtx); - async_synchronize_full(); -@@ -1051,7 +1061,7 @@ static void device_complete(struct device *dev, pm_message_t state) - const char *info = NULL; - - if (dev->power.syscore) -- return; -+ goto out; - - device_lock(dev); - -@@ -1081,6 +1091,7 @@ static void device_complete(struct device *dev, pm_message_t state) - - device_unlock(dev); - -+out: - pm_runtime_put(dev); - } - -@@ -1106,14 +1117,16 @@ void dpm_complete(pm_message_t state) - get_device(dev); - dev->power.is_prepared = false; - list_move(&dev->power.entry, &list); -+ - mutex_unlock(&dpm_list_mtx); - - trace_device_pm_callback_start(dev, "", state.event); - device_complete(dev, state); - trace_device_pm_callback_end(dev, 0); - -- mutex_lock(&dpm_list_mtx); - put_device(dev); -+ -+ mutex_lock(&dpm_list_mtx); - } - list_splice(&list, &dpm_list); - mutex_unlock(&dpm_list_mtx); -@@ -1298,17 +1311,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state) - error = device_suspend_noirq(dev); - - mutex_lock(&dpm_list_mtx); -+ - if (error) { - pm_dev_err(dev, state, " noirq", error); - dpm_save_failed_dev(dev_name(dev)); -- put_device(dev); -- break; -- } -- if (!list_empty(&dev->power.entry)) -+ } else if (!list_empty(&dev->power.entry)) { - list_move(&dev->power.entry, &dpm_noirq_list); -+ } -+ -+ mutex_unlock(&dpm_list_mtx); -+ - put_device(dev); - -- if (async_error) -+ mutex_lock(&dpm_list_mtx); -+ -+ if (error || async_error) - break; - } - mutex_unlock(&dpm_list_mtx); -@@ -1475,23 +1492,28 @@ int dpm_suspend_late(pm_message_t state) - struct device *dev = to_device(dpm_suspended_list.prev); - - get_device(dev); -+ - mutex_unlock(&dpm_list_mtx); - - error = device_suspend_late(dev); - - mutex_lock(&dpm_list_mtx); -+ - if (!list_empty(&dev->power.entry)) - list_move(&dev->power.entry, &dpm_late_early_list); - - if (error) { - pm_dev_err(dev, state, " late", error); - dpm_save_failed_dev(dev_name(dev)); -- put_device(dev); -- break; - } -+ -+ mutex_unlock(&dpm_list_mtx); -+ - put_device(dev); - -- if (async_error) -+ mutex_lock(&dpm_list_mtx); -+ -+ if (error || async_error) - break; - } - mutex_unlock(&dpm_list_mtx); -@@ -1751,21 +1773,27 @@ int dpm_suspend(pm_message_t state) - struct device *dev = to_device(dpm_prepared_list.prev); - - get_device(dev); -+ - mutex_unlock(&dpm_list_mtx); - - error = device_suspend(dev); - - mutex_lock(&dpm_list_mtx); -+ - if (error) { - pm_dev_err(dev, state, "", error); - dpm_save_failed_dev(dev_name(dev)); -- put_device(dev); -- break; -- } -- if (!list_empty(&dev->power.entry)) -+ } else if (!list_empty(&dev->power.entry)) { - list_move(&dev->power.entry, &dpm_suspended_list); -+ } -+ -+ mutex_unlock(&dpm_list_mtx); -+ - put_device(dev); -- if (async_error) -+ -+ mutex_lock(&dpm_list_mtx); -+ -+ if (error || async_error) - break; - } - mutex_unlock(&dpm_list_mtx); -@@ -1794,9 +1822,6 @@ static int device_prepare(struct device *dev, pm_message_t state) - int (*callback)(struct device *) = NULL; - int ret = 0; - -- if (dev->power.syscore) -- return 0; -- - /* - * If a device's parent goes into runtime suspend at the wrong time, - * it won't be possible to resume the device. To prevent this we -@@ -1805,6 +1830,9 @@ static int device_prepare(struct device *dev, pm_message_t state) - */ - pm_runtime_get_noresume(dev); - -+ if (dev->power.syscore) -+ return 0; -+ - device_lock(dev); - - dev->power.wakeup_path = false; -@@ -1878,10 +1906,11 @@ int dpm_prepare(pm_message_t state) - device_block_probing(); - - mutex_lock(&dpm_list_mtx); -- while (!list_empty(&dpm_list)) { -+ while (!list_empty(&dpm_list) && !error) { - struct device *dev = to_device(dpm_list.next); - - get_device(dev); -+ - mutex_unlock(&dpm_list_mtx); - - trace_device_pm_callback_start(dev, "", state.event); -@@ -1889,21 +1918,23 @@ int dpm_prepare(pm_message_t state) - trace_device_pm_callback_end(dev, error); - - mutex_lock(&dpm_list_mtx); -- if (error) { -- if (error == -EAGAIN) { -- put_device(dev); -- error = 0; -- continue; -- } -+ -+ if (!error) { -+ dev->power.is_prepared = true; -+ if (!list_empty(&dev->power.entry)) -+ list_move_tail(&dev->power.entry, &dpm_prepared_list); -+ } else if (error == -EAGAIN) { -+ error = 0; -+ } else { - dev_info(dev, "not prepared for power transition: code %d\n", - error); -- put_device(dev); -- break; - } -- dev->power.is_prepared = true; -- if (!list_empty(&dev->power.entry)) -- list_move_tail(&dev->power.entry, &dpm_prepared_list); -+ -+ mutex_unlock(&dpm_list_mtx); -+ - put_device(dev); -+ -+ mutex_lock(&dpm_list_mtx); - } - mutex_unlock(&dpm_list_mtx); - trace_suspend_resume(TPS("dpm_prepare"), state.event, false); -@@ -1991,7 +2022,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops) - - void device_pm_check_callbacks(struct device *dev) - { -- spin_lock_irq(&dev->power.lock); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&dev->power.lock, flags); - dev->power.no_pm_callbacks = - (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && - !dev->bus->suspend && !dev->bus->resume)) && -@@ -2000,7 +2033,7 @@ void device_pm_check_callbacks(struct device *dev) - (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && - (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && - !dev->driver->suspend && !dev->driver->resume)); -- spin_unlock_irq(&dev->power.lock); -+ spin_unlock_irqrestore(&dev->power.lock, flags); - } - - bool dev_pm_skip_suspend(struct device *dev) -diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h -index 54292cdd7808b..922ed457db191 100644 ---- a/drivers/base/power/power.h -+++ b/drivers/base/power/power.h -@@ -25,8 +25,11 @@ extern u64 pm_runtime_active_time(struct device *dev); - - #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) - #define WAKE_IRQ_DEDICATED_MANAGED BIT(1) -+#define WAKE_IRQ_DEDICATED_REVERSE BIT(2) - #define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ -- WAKE_IRQ_DEDICATED_MANAGED) -+ WAKE_IRQ_DEDICATED_MANAGED | \ -+ WAKE_IRQ_DEDICATED_REVERSE) -+#define WAKE_IRQ_DEDICATED_ENABLED BIT(3) - - struct wake_irq { - struct device *dev; -@@ -39,7 +42,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); - extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); - extern void dev_pm_enable_wake_irq_check(struct device *dev, - bool can_change_status); --extern void dev_pm_disable_wake_irq_check(struct device *dev); -+extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable); -+extern void dev_pm_enable_wake_irq_complete(struct device *dev); - - #ifdef CONFIG_PM_SLEEP - -diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c -index ec94049442b99..5824d41a0b745 100644 ---- a/drivers/base/power/runtime.c -+++ b/drivers/base/power/runtime.c -@@ -305,16 +305,34 @@ static int rpm_get_suppliers(struct device *dev) - return 0; - } - -+/** -+ * pm_runtime_release_supplier - Drop references to device link's supplier. -+ * @link: Target device link. -+ * -+ * Drop all runtime PM references associated with @link to its supplier device. -+ */ -+void pm_runtime_release_supplier(struct device_link *link) -+{ -+ struct device *supplier = link->supplier; -+ -+ /* -+ * The additional power.usage_count check is a safety net in case -+ * the rpm_active refcount becomes saturated, in which case -+ * refcount_dec_not_one() would return true forever, but it is not -+ * strictly necessary. -+ */ -+ while (refcount_dec_not_one(&link->rpm_active) && -+ atomic_read(&supplier->power.usage_count) > 0) -+ pm_runtime_put_noidle(supplier); -+} -+ - static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) - { - struct device_link *link; - - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, - device_links_read_lock_held()) { -- -- while (refcount_dec_not_one(&link->rpm_active)) -- pm_runtime_put_noidle(link->supplier); -- -+ pm_runtime_release_supplier(link); - if (try_to_suspend) - pm_request_idle(link->supplier); - } -@@ -466,7 +484,17 @@ static int rpm_idle(struct device *dev, int rpmflags) - - dev->power.idle_notification = true; - -- retval = __rpm_callback(callback, dev); -+ if (dev->power.irq_safe) -+ spin_unlock(&dev->power.lock); -+ else -+ spin_unlock_irq(&dev->power.lock); -+ -+ retval = callback(dev); -+ -+ if (dev->power.irq_safe) -+ spin_lock(&dev->power.lock); -+ else -+ spin_lock_irq(&dev->power.lock); - - dev->power.idle_notification = false; - wake_up_all(&dev->power.wait_queue); -@@ -645,6 +673,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) - if (retval) - goto fail; - -+ dev_pm_enable_wake_irq_complete(dev); -+ - no_callback: - __update_runtime_status(dev, RPM_SUSPENDED); - pm_runtime_deactivate_timer(dev); -@@ -690,7 +720,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) - return retval; - - fail: -- dev_pm_disable_wake_irq_check(dev); -+ dev_pm_disable_wake_irq_check(dev, true); - __update_runtime_status(dev, RPM_ACTIVE); - dev->power.deferred_resume = false; - wake_up_all(&dev->power.wait_queue); -@@ -873,7 +903,7 @@ static int rpm_resume(struct device *dev, int rpmflags) - - callback = RPM_GET_CALLBACK(dev, runtime_resume); - -- dev_pm_disable_wake_irq_check(dev); -+ dev_pm_disable_wake_irq_check(dev, false); - retval = rpm_callback(callback, dev); - if (retval) { - __update_runtime_status(dev, RPM_SUSPENDED); -@@ -1770,9 +1800,8 @@ void pm_runtime_drop_link(struct device_link *link) - return; - - pm_runtime_drop_link_count(link->consumer); -- -- while (refcount_dec_not_one(&link->rpm_active)) -- pm_runtime_put(link->supplier); -+ pm_runtime_release_supplier(link); -+ pm_request_idle(link->supplier); - } - - static bool pm_runtime_need_not_resume(struct device *dev) -diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c -index 94665037f4a35..72b7a92337b18 100644 ---- a/drivers/base/power/trace.c -+++ b/drivers/base/power/trace.c -@@ -120,7 +120,11 @@ static unsigned int read_magic_time(void) - struct rtc_time time; - unsigned int val; - -- mc146818_get_time(&time); -+ if (mc146818_get_time(&time) < 0) { -+ pr_err("Unable to read current time from RTC\n"); -+ return 0; -+ } -+ - pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time); - val = time.tm_year; /* 100 years */ - if (val > 100) -diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c -index b91a3a9bf9f6d..6f2cdd8643afa 100644 ---- a/drivers/base/power/wakeirq.c -+++ b/drivers/base/power/wakeirq.c -@@ -142,24 +142,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) - return IRQ_HANDLED; - } - --/** -- * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt -- * @dev: Device entry -- * @irq: Device wake-up interrupt -- * -- * Unless your hardware has separate wake-up interrupts in addition -- * to the device IO interrupts, you don't need this. -- * -- * Sets up a threaded interrupt handler for a device that has -- * a dedicated wake-up interrupt in addition to the device IO -- * interrupt. -- * -- * The interrupt starts disabled, and needs to be managed for -- * the device by the bus code or the device driver using -- * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq() -- * functions. -- */ --int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) -+static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag) - { - struct wake_irq *wirq; - int err; -@@ -197,7 +180,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) - if (err) - goto err_free_irq; - -- wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED; -+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag; - - return err; - -@@ -210,8 +193,57 @@ err_free: - - return err; - } -+ -+ -+/** -+ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt -+ * @dev: Device entry -+ * @irq: Device wake-up interrupt -+ * -+ * Unless your hardware has separate wake-up interrupts in addition -+ * to the device IO interrupts, you don't need this. -+ * -+ * Sets up a threaded interrupt handler for a device that has -+ * a dedicated wake-up interrupt in addition to the device IO -+ * interrupt. -+ * -+ * The interrupt starts disabled, and needs to be managed for -+ * the device by the bus code or the device driver using -+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() -+ * functions. -+ */ -+int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) -+{ -+ return __dev_pm_set_dedicated_wake_irq(dev, irq, 0); -+} - EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); - -+/** -+ * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt -+ * with reverse enable ordering -+ * @dev: Device entry -+ * @irq: Device wake-up interrupt -+ * -+ * Unless your hardware has separate wake-up interrupts in addition -+ * to the device IO interrupts, you don't need this. -+ * -+ * Sets up a threaded interrupt handler for a device that has a dedicated -+ * wake-up interrupt in addition to the device IO interrupt. It sets -+ * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend() -+ * to enable dedicated wake-up interrupt after running the runtime suspend -+ * callback for @dev. -+ * -+ * The interrupt starts disabled, and needs to be managed for -+ * the device by the bus code or the device driver using -+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() -+ * functions. -+ */ -+int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) -+{ -+ return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE); -+} -+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse); -+ - /** - * dev_pm_enable_wake_irq - Enable device wake-up interrupt - * @dev: Device -@@ -282,25 +314,56 @@ void dev_pm_enable_wake_irq_check(struct device *dev, - return; - - enable: -- enable_irq(wirq->irq); -+ if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) { -+ enable_irq(wirq->irq); -+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; -+ } - } - - /** - * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt - * @dev: Device -+ * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE - * - * Disables wake-up interrupt conditionally based on status. - * Should be only called from rpm_suspend() and rpm_resume() path. - */ --void dev_pm_disable_wake_irq_check(struct device *dev) -+void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable) - { - struct wake_irq *wirq = dev->power.wakeirq; - - if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) - return; - -- if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) -+ if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) -+ return; -+ -+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) { -+ wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED; - disable_irq_nosync(wirq->irq); -+ } -+} -+ -+/** -+ * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before -+ * @dev: Device using the wake IRQ -+ * -+ * Enable wake IRQ conditionally based on status, mainly used if want to -+ * enable wake IRQ after running ->runtime_suspend() which depends on -+ * WAKE_IRQ_DEDICATED_REVERSE. -+ * -+ * Should be only called from rpm_suspend() path. -+ */ -+void dev_pm_enable_wake_irq_complete(struct device *dev) -+{ -+ struct wake_irq *wirq = dev->power.wakeirq; -+ -+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) -+ return; -+ -+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED && -+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) -+ enable_irq(wirq->irq); - } - - /** -@@ -317,7 +380,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) - - if (device_may_wakeup(wirq->dev)) { - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && -- !pm_runtime_status_suspended(wirq->dev)) -+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) - enable_irq(wirq->irq); - - enable_irq_wake(wirq->irq); -@@ -340,7 +403,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) - disable_irq_wake(wirq->irq); - - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && -- !pm_runtime_status_suspended(wirq->dev)) -+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) - disable_irq_nosync(wirq->irq); - } - } -diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c -index 99bda0da23a82..8666590201c9a 100644 ---- a/drivers/base/power/wakeup.c -+++ b/drivers/base/power/wakeup.c -@@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state; - bool events_check_enabled __read_mostly; - - /* First wakeup IRQ seen by the kernel in the last cycle. */ --unsigned int pm_wakeup_irq __read_mostly; -+static unsigned int wakeup_irq[2] __read_mostly; -+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); - - /* If greater than 0 and the system is suspending, terminate the suspend. */ - static atomic_t pm_abort_suspend __read_mostly; -@@ -942,19 +943,45 @@ void pm_system_cancel_wakeup(void) - atomic_dec_if_positive(&pm_abort_suspend); - } - --void pm_wakeup_clear(bool reset) -+void pm_wakeup_clear(unsigned int irq_number) - { -- pm_wakeup_irq = 0; -- if (reset) -+ raw_spin_lock_irq(&wakeup_irq_lock); -+ -+ if (irq_number && wakeup_irq[0] == irq_number) -+ wakeup_irq[0] = wakeup_irq[1]; -+ else -+ wakeup_irq[0] = 0; -+ -+ wakeup_irq[1] = 0; -+ -+ raw_spin_unlock_irq(&wakeup_irq_lock); -+ -+ if (!irq_number) - atomic_set(&pm_abort_suspend, 0); - } - - void pm_system_irq_wakeup(unsigned int irq_number) - { -- if (pm_wakeup_irq == 0) { -- pm_wakeup_irq = irq_number; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&wakeup_irq_lock, flags); -+ -+ if (wakeup_irq[0] == 0) -+ wakeup_irq[0] = irq_number; -+ else if (wakeup_irq[1] == 0) -+ wakeup_irq[1] = irq_number; -+ else -+ irq_number = 0; -+ -+ raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); -+ -+ if (irq_number) - pm_system_wakeup(); -- } -+} -+ -+unsigned int pm_wakeup_irq(void) -+{ -+ return wakeup_irq[0]; - } - - /** -diff --git a/drivers/base/property.c b/drivers/base/property.c -index 453918eb7390c..17a648d643566 100644 ---- a/drivers/base/property.c -+++ b/drivers/base/property.c -@@ -48,12 +48,14 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode, - { - bool ret; - -+ if (IS_ERR_OR_NULL(fwnode)) -+ return false; -+ - ret = fwnode_call_bool_op(fwnode, property_present, propname); -- if (ret == false && !IS_ERR_OR_NULL(fwnode) && -- !IS_ERR_OR_NULL(fwnode->secondary)) -- ret = fwnode_call_bool_op(fwnode->secondary, property_present, -- propname); -- return ret; -+ if (ret) -+ return ret; -+ -+ return fwnode_call_bool_op(fwnode->secondary, property_present, propname); - } - EXPORT_SYMBOL_GPL(fwnode_property_present); - -@@ -233,15 +235,16 @@ static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode, - { - int ret; - -+ if (IS_ERR_OR_NULL(fwnode)) -+ return -EINVAL; -+ - ret = fwnode_call_int_op(fwnode, property_read_int_array, propname, - elem_size, val, nval); -- if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && -- !IS_ERR_OR_NULL(fwnode->secondary)) -- ret = fwnode_call_int_op( -- fwnode->secondary, property_read_int_array, propname, -- elem_size, val, nval); -+ if (ret != -EINVAL) -+ return ret; - -- return ret; -+ return fwnode_call_int_op(fwnode->secondary, property_read_int_array, propname, -+ elem_size, val, nval); - } - - /** -@@ -372,14 +375,16 @@ int fwnode_property_read_string_array(const struct fwnode_handle *fwnode, - { - int ret; - -+ if (IS_ERR_OR_NULL(fwnode)) -+ return -EINVAL; -+ - ret = fwnode_call_int_op(fwnode, property_read_string_array, propname, - val, nval); -- if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && -- !IS_ERR_OR_NULL(fwnode->secondary)) -- ret = fwnode_call_int_op(fwnode->secondary, -- property_read_string_array, propname, -- val, nval); -- return ret; -+ if (ret != -EINVAL) -+ return ret; -+ -+ return fwnode_call_int_op(fwnode->secondary, property_read_string_array, propname, -+ val, nval); - } - EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); - -@@ -479,7 +484,20 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, - unsigned int nargs, unsigned int index, - struct fwnode_reference_args *args) - { -- return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop, -+ int ret; -+ -+ if (IS_ERR_OR_NULL(fwnode)) -+ return -ENOENT; -+ -+ ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop, -+ nargs, index, args); -+ if (ret == 0) -+ return ret; -+ -+ if (IS_ERR_OR_NULL(fwnode->secondary)) -+ return ret; -+ -+ return fwnode_call_int_op(fwnode->secondary, get_reference_args, prop, nargs_prop, - nargs, index, args); - } - EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args); -@@ -675,12 +693,13 @@ EXPORT_SYMBOL_GPL(fwnode_count_parents); - struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode, - unsigned int depth) - { -- unsigned int i; -- - fwnode_handle_get(fwnode); - -- for (i = 0; i < depth && fwnode; i++) -+ do { -+ if (depth-- == 0) -+ break; - fwnode = fwnode_get_next_parent(fwnode); -+ } while (fwnode); - - return fwnode; - } -@@ -699,17 +718,17 @@ EXPORT_SYMBOL_GPL(fwnode_get_nth_parent); - bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor, - struct fwnode_handle *test_child) - { -- if (!test_ancestor) -+ if (IS_ERR_OR_NULL(test_ancestor)) - return false; - - fwnode_handle_get(test_child); -- while (test_child) { -+ do { - if (test_child == test_ancestor) { - fwnode_handle_put(test_child); - return true; - } - test_child = fwnode_get_next_parent(test_child); -- } -+ } while (test_child); - return false; - } - -@@ -738,7 +757,7 @@ fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode, - { - struct fwnode_handle *next_child = child; - -- if (!fwnode) -+ if (IS_ERR_OR_NULL(fwnode)) - return NULL; - - do { -@@ -762,16 +781,16 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, - const struct fwnode_handle *fwnode = dev_fwnode(dev); - struct fwnode_handle *next; - -+ if (IS_ERR_OR_NULL(fwnode)) -+ return NULL; -+ - /* Try to find a child in primary fwnode */ - next = fwnode_get_next_child_node(fwnode, child); - if (next) - return next; - - /* When no more children in primary, continue with secondary */ -- if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) -- next = fwnode_get_next_child_node(fwnode->secondary, child); -- -- return next; -+ return fwnode_get_next_child_node(fwnode->secondary, child); - } - EXPORT_SYMBOL_GPL(device_get_next_child_node); - -@@ -838,6 +857,9 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put); - */ - bool fwnode_device_is_available(const struct fwnode_handle *fwnode) - { -+ if (IS_ERR_OR_NULL(fwnode)) -+ return false; -+ - if (!fwnode_has_op(fwnode, device_is_available)) - return true; - -@@ -1033,25 +1055,31 @@ struct fwnode_handle * - fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, - struct fwnode_handle *prev) - { -+ struct fwnode_handle *ep, *port_parent = NULL; - const struct fwnode_handle *parent; -- struct fwnode_handle *ep; - - /* - * If this function is in a loop and the previous iteration returned - * an endpoint from fwnode->secondary, then we need to use the secondary - * as parent rather than @fwnode. - */ -- if (prev) -- parent = fwnode_graph_get_port_parent(prev); -- else -+ if (prev) { -+ port_parent = fwnode_graph_get_port_parent(prev); -+ parent = port_parent; -+ } else { - parent = fwnode; -+ } -+ if (IS_ERR_OR_NULL(parent)) -+ return NULL; - - ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev); -+ if (ep) -+ goto out_put_port_parent; - -- if (IS_ERR_OR_NULL(ep) && -- !IS_ERR_OR_NULL(parent) && !IS_ERR_OR_NULL(parent->secondary)) -- ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL); -+ ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL); - -+out_put_port_parent: -+ fwnode_handle_put(port_parent); - return ep; - } - EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); -@@ -1269,8 +1297,10 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, - - fwnode_graph_for_each_endpoint(fwnode, ep) { - node = fwnode_graph_get_remote_port_parent(ep); -- if (!fwnode_device_is_available(node)) -+ if (!fwnode_device_is_available(node)) { -+ fwnode_handle_put(node); - continue; -+ } - - ret = match(node, con_id, data); - fwnode_handle_put(node); -diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c -index fabf87058d80b..ae6b8788d5f3f 100644 ---- a/drivers/base/regmap/regcache-rbtree.c -+++ b/drivers/base/regmap/regcache-rbtree.c -@@ -277,7 +277,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, - - blk = krealloc(rbnode->block, - blklen * map->cache_word_size, -- GFP_KERNEL); -+ map->alloc_flags); - if (!blk) - return -ENOMEM; - -@@ -286,7 +286,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, - if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { - present = krealloc(rbnode->cache_present, - BITS_TO_LONGS(blklen) * sizeof(*present), -- GFP_KERNEL); -+ map->alloc_flags); - if (!present) - return -ENOMEM; - -@@ -320,7 +320,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) - const struct regmap_range *range; - int i; - -- rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL); -+ rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags); - if (!rbnode) - return NULL; - -@@ -346,13 +346,13 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) - } - - rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size, -- GFP_KERNEL); -+ map->alloc_flags); - if (!rbnode->block) - goto err_free; - - rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen), - sizeof(*rbnode->cache_present), -- GFP_KERNEL); -+ map->alloc_flags); - if (!rbnode->cache_present) - goto err_free_block; - -diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c -index f2469d3435ca3..0b517a83c4493 100644 ---- a/drivers/base/regmap/regcache.c -+++ b/drivers/base/regmap/regcache.c -@@ -343,6 +343,9 @@ int regcache_sync(struct regmap *map) - const char *name; - bool bypass; - -+ if (WARN_ON(map->cache_type == REGCACHE_NONE)) -+ return -EINVAL; -+ - BUG_ON(!map->cache_ops); - - map->lock(map->lock_arg); -@@ -412,6 +415,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min, - const char *name; - bool bypass; - -+ if (WARN_ON(map->cache_type == REGCACHE_NONE)) -+ return -EINVAL; -+ - BUG_ON(!map->cache_ops); - - map->lock(map->lock_arg); -diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c -index 980e5ce6a3a35..3ec611dc0c09f 100644 ---- a/drivers/base/regmap/regmap-i2c.c -+++ b/drivers/base/regmap/regmap-i2c.c -@@ -242,8 +242,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, - static const struct regmap_bus regmap_i2c_smbus_i2c_block = { - .write = regmap_i2c_smbus_i2c_write, - .read = regmap_i2c_smbus_i2c_read, -- .max_raw_read = I2C_SMBUS_BLOCK_MAX, -- .max_raw_write = I2C_SMBUS_BLOCK_MAX, -+ .max_raw_read = I2C_SMBUS_BLOCK_MAX - 1, -+ .max_raw_write = I2C_SMBUS_BLOCK_MAX - 1, - }; - - static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data, -@@ -299,8 +299,8 @@ static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg, - static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = { - .write = regmap_i2c_smbus_i2c_write_reg16, - .read = regmap_i2c_smbus_i2c_read_reg16, -- .max_raw_read = I2C_SMBUS_BLOCK_MAX, -- .max_raw_write = I2C_SMBUS_BLOCK_MAX, -+ .max_raw_read = I2C_SMBUS_BLOCK_MAX - 2, -+ .max_raw_write = I2C_SMBUS_BLOCK_MAX - 2, - }; - - static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, -diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c -index d2656581a6085..3aac960ae30ab 100644 ---- a/drivers/base/regmap/regmap-irq.c -+++ b/drivers/base/regmap/regmap-irq.c -@@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) - ret = regmap_write(map, reg, d->mask_buf[i]); - if (d->chip->clear_ack) { - if (d->chip->ack_invert && !ret) -- ret = regmap_write(map, reg, -- d->mask_buf[i]); -+ ret = regmap_write(map, reg, UINT_MAX); - else if (!ret) -- ret = regmap_write(map, reg, -- ~d->mask_buf[i]); -+ ret = regmap_write(map, reg, 0); - } - if (ret != 0) - dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", -@@ -254,6 +252,7 @@ static void regmap_irq_enable(struct irq_data *data) - struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); - struct regmap *map = d->map; - const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); -+ unsigned int reg = irq_data->reg_offset / map->reg_stride; - unsigned int mask, type; - - type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; -@@ -270,14 +269,14 @@ static void regmap_irq_enable(struct irq_data *data) - * at the corresponding offset in regmap_irq_set_type(). - */ - if (d->chip->type_in_mask && type) -- mask = d->type_buf[irq_data->reg_offset / map->reg_stride]; -+ mask = d->type_buf[reg] & irq_data->mask; - else - mask = irq_data->mask; - - if (d->chip->clear_on_unmask) - d->clear_status = true; - -- d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask; -+ d->mask_buf[reg] &= ~mask; - } - - static void regmap_irq_disable(struct irq_data *data) -@@ -388,6 +387,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, - subreg = &chip->sub_reg_offsets[b]; - for (i = 0; i < subreg->num_regs; i++) { - unsigned int offset = subreg->offset[i]; -+ unsigned int index = offset / map->reg_stride; - - if (chip->not_fixed_stride) - ret = regmap_read(map, -@@ -396,7 +396,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, - else - ret = regmap_read(map, - chip->status_base + offset, -- &data->status_buf[offset]); -+ &data->status_buf[index]); - - if (ret) - break; -@@ -556,11 +556,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) - data->status_buf[i]); - if (chip->clear_ack) { - if (chip->ack_invert && !ret) -- ret = regmap_write(map, reg, -- data->status_buf[i]); -+ ret = regmap_write(map, reg, UINT_MAX); - else if (!ret) -- ret = regmap_write(map, reg, -- ~data->status_buf[i]); -+ ret = regmap_write(map, reg, 0); - } - if (ret != 0) - dev_err(map->dev, "Failed to ack 0x%x: %d\n", -@@ -817,13 +815,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, - d->status_buf[i] & d->mask_buf[i]); - if (chip->clear_ack) { - if (chip->ack_invert && !ret) -- ret = regmap_write(map, reg, -- (d->status_buf[i] & -- d->mask_buf[i])); -+ ret = regmap_write(map, reg, UINT_MAX); - else if (!ret) -- ret = regmap_write(map, reg, -- ~(d->status_buf[i] & -- d->mask_buf[i])); -+ ret = regmap_write(map, reg, 0); - } - if (ret != 0) { - dev_err(map->dev, "Failed to ack 0x%x: %d\n", -diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c -index 21a0c2562ec06..f7811641ed5ae 100644 ---- a/drivers/base/regmap/regmap.c -+++ b/drivers/base/regmap/regmap.c -@@ -647,6 +647,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, - if (ret) - return ret; - -+ regmap_debugfs_exit(map); - regmap_debugfs_init(map); - - /* Add a devres resource for dev_get_regmap() */ -diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c -index c46f6a8e14d23..3ba1232ce8451 100644 ---- a/drivers/base/swnode.c -+++ b/drivers/base/swnode.c -@@ -535,7 +535,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, - return -ENOENT; - - if (nargs_prop) { -- error = property_entry_read_int_array(swnode->node->properties, -+ error = property_entry_read_int_array(ref->node->properties, - nargs_prop, sizeof(u32), - &nargs_prop_val, 1); - if (error) -diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c -index 3bb7beb127a96..88336f093decd 100644 ---- a/drivers/base/test/test_async_driver_probe.c -+++ b/drivers/base/test/test_async_driver_probe.c -@@ -84,7 +84,7 @@ test_platform_device_register_node(char *name, int id, int nid) - - pdev = platform_device_alloc(name, id); - if (!pdev) -- return NULL; -+ return ERR_PTR(-ENOMEM); - - if (nid != NUMA_NO_NODE) - set_dev_node(&pdev->dev, nid); -@@ -146,7 +146,7 @@ static int __init test_async_probe_init(void) - calltime = ktime_get(); - for_each_online_cpu(cpu) { - nid = cpu_to_node(cpu); -- pdev = &sync_dev[sync_id]; -+ pdev = &async_dev[async_id]; - - *pdev = test_platform_device_register_node("test_async_driver", - async_id, -diff --git a/drivers/base/topology.c b/drivers/base/topology.c -index 43c0940643f5d..5df6d861bc21b 100644 ---- a/drivers/base/topology.c -+++ b/drivers/base/topology.c -@@ -52,39 +52,39 @@ define_id_show_func(core_id); - static DEVICE_ATTR_RO(core_id); - - define_siblings_read_func(thread_siblings, sibling_cpumask); --static BIN_ATTR_RO(thread_siblings, 0); --static BIN_ATTR_RO(thread_siblings_list, 0); -+static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES); -+static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES); - - define_siblings_read_func(core_cpus, sibling_cpumask); --static BIN_ATTR_RO(core_cpus, 0); --static BIN_ATTR_RO(core_cpus_list, 0); -+static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES); -+static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES); - - define_siblings_read_func(core_siblings, core_cpumask); --static BIN_ATTR_RO(core_siblings, 0); --static BIN_ATTR_RO(core_siblings_list, 0); -+static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES); -+static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES); - - define_siblings_read_func(die_cpus, die_cpumask); --static BIN_ATTR_RO(die_cpus, 0); --static BIN_ATTR_RO(die_cpus_list, 0); -+static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES); -+static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES); - - define_siblings_read_func(package_cpus, core_cpumask); --static BIN_ATTR_RO(package_cpus, 0); --static BIN_ATTR_RO(package_cpus_list, 0); -+static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES); -+static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES); - - #ifdef CONFIG_SCHED_BOOK - define_id_show_func(book_id); - static DEVICE_ATTR_RO(book_id); - define_siblings_read_func(book_siblings, book_cpumask); --static BIN_ATTR_RO(book_siblings, 0); --static BIN_ATTR_RO(book_siblings_list, 0); -+static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES); -+static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES); - #endif - - #ifdef CONFIG_SCHED_DRAWER - define_id_show_func(drawer_id); - static DEVICE_ATTR_RO(drawer_id); - define_siblings_read_func(drawer_siblings, drawer_cpumask); --static BIN_ATTR_RO(drawer_siblings, 0); --static BIN_ATTR_RO(drawer_siblings_list, 0); -+static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES); -+static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES); - #endif - - static struct bin_attribute *bin_attrs[] = { -diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c -index ccc86206e5087..09ee2a1e35bbd 100644 ---- a/drivers/base/transport_class.c -+++ b/drivers/base/transport_class.c -@@ -155,12 +155,27 @@ static int transport_add_class_device(struct attribute_container *cont, - struct device *dev, - struct device *classdev) - { -+ struct transport_class *tclass = class_to_transport_class(cont->class); - int error = attribute_container_add_class_device(classdev); - struct transport_container *tcont = - attribute_container_to_transport_container(cont); - -- if (!error && tcont->statistics) -+ if (error) -+ goto err_remove; -+ -+ if (tcont->statistics) { - error = sysfs_create_group(&classdev->kobj, tcont->statistics); -+ if (error) -+ goto err_del; -+ } -+ -+ return 0; -+ -+err_del: -+ attribute_container_class_device_del(classdev); -+err_remove: -+ if (tclass->remove) -+ tclass->remove(tcont, dev, classdev); - - return error; - } -diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig -index ab3e37aa1830c..fb12311b87ff9 100644 ---- a/drivers/block/Kconfig -+++ b/drivers/block/Kconfig -@@ -33,6 +33,22 @@ config BLK_DEV_FD - To compile this driver as a module, choose M here: the - module will be called floppy. - -+config BLK_DEV_FD_RAWCMD -+ bool "Support for raw floppy disk commands (DEPRECATED)" -+ depends on BLK_DEV_FD -+ help -+ If you want to use actual physical floppies and expect to do -+ special low-level hardware accesses to them (access and use -+ non-standard formats, for example), then enable this. -+ -+ Note that the code enabled by this option is rarely used and -+ might be unstable or insecure, and distros should not enable it. -+ -+ Note: FDRAWCMD is deprecated and will be removed from the kernel -+ in the near future. -+ -+ If unsure, say N. -+ - config AMIGA_FLOPPY - tristate "Amiga floppy support" - depends on AMIGA -@@ -255,15 +271,6 @@ config BLK_DEV_NBD - - If unsure, say N. - --config BLK_DEV_SX8 -- tristate "Promise SATA SX8 support" -- depends on PCI -- help -- Saying Y or M here will enable support for the -- Promise SATA SX8 controllers. -- -- Use devices /dev/sx8/$N and /dev/sx8/$Np$M. -- - config BLK_DEV_RAM - tristate "RAM block device support" - help -@@ -394,6 +401,7 @@ config XEN_BLKDEV_BACKEND - config VIRTIO_BLK - tristate "Virtio block driver" - depends on VIRTIO -+ select SG_POOL - help - This is the virtual block driver for virtio. It can be used with - QEMU based VMMs (like KVM or Xen). Say Y or M. -diff --git a/drivers/block/Makefile b/drivers/block/Makefile -index bc68817ef4966..91220b251b467 100644 ---- a/drivers/block/Makefile -+++ b/drivers/block/Makefile -@@ -27,8 +27,6 @@ obj-$(CONFIG_BLK_DEV_NBD) += nbd.o - obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o - obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o - --obj-$(CONFIG_BLK_DEV_SX8) += sx8.o -- - obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o - obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/ - obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ -diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c -index 8b1714021498c..1ed557cb5ed23 100644 ---- a/drivers/block/amiflop.c -+++ b/drivers/block/amiflop.c -@@ -61,6 +61,7 @@ - #include - #include - #include -+#include - #include - #include - #include -diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c -index a093644ac39fb..82faaa4581579 100644 ---- a/drivers/block/ataflop.c -+++ b/drivers/block/ataflop.c -@@ -68,6 +68,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -298,6 +299,7 @@ static struct atari_floppy_struct { - disk change detection) */ - int flags; /* flags */ - struct gendisk *disk[NUM_DISK_MINORS]; -+ bool registered[NUM_DISK_MINORS]; - int ref; - int type; - struct blk_mq_tag_set tag_set; -@@ -456,10 +458,20 @@ static DEFINE_TIMER(fd_timer, check_change); - - static void fd_end_request_cur(blk_status_t err) - { -+ DPRINT(("fd_end_request_cur(), bytes %d of %d\n", -+ blk_rq_cur_bytes(fd_request), -+ blk_rq_bytes(fd_request))); -+ - if (!blk_update_request(fd_request, err, - blk_rq_cur_bytes(fd_request))) { -+ DPRINT(("calling __blk_mq_end_request()\n")); - __blk_mq_end_request(fd_request, err); - fd_request = NULL; -+ } else { -+ /* requeue rest of request */ -+ DPRINT(("calling blk_mq_requeue_request()\n")); -+ blk_mq_requeue_request(fd_request, true); -+ fd_request = NULL; - } - } - -@@ -653,9 +665,6 @@ static inline void copy_buffer(void *from, void *to) - *p2++ = *p1++; - } - -- -- -- - /* General Interrupt Handling */ - - static void (*FloppyIRQHandler)( int status ) = NULL; -@@ -700,12 +709,21 @@ static void fd_error( void ) - if (fd_request->error_count >= MAX_ERRORS) { - printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); - fd_end_request_cur(BLK_STS_IOERR); -+ finish_fdc(); -+ return; - } - else if (fd_request->error_count == RECALIBRATE_ERRORS) { - printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); - if (SelectedDrive != -1) - SUD.track = -1; - } -+ /* need to re-run request to recalibrate */ -+ atari_disable_irq( IRQ_MFP_FDC ); -+ -+ setup_req_params( SelectedDrive ); -+ do_fd_action( SelectedDrive ); -+ -+ atari_enable_irq( IRQ_MFP_FDC ); - } - - -@@ -732,8 +750,10 @@ static int do_format(int drive, int type, struct atari_format_descr *desc) - if (type) { - type--; - if (type >= NUM_DISK_MINORS || -- minor2disktype[type].drive_types > DriveType) -+ minor2disktype[type].drive_types > DriveType) { -+ finish_fdc(); - return -EINVAL; -+ } - } - - q = unit[drive].disk[type]->queue; -@@ -751,6 +771,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc) - } - - if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) { -+ finish_fdc(); - ret = -EINVAL; - goto out; - } -@@ -791,6 +812,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc) - - wait_for_completion(&format_wait); - -+ finish_fdc(); - ret = FormatError ? -EIO : 0; - out: - blk_mq_unquiesce_queue(q); -@@ -825,6 +847,7 @@ static void do_fd_action( int drive ) - else { - /* all sectors finished */ - fd_end_request_cur(BLK_STS_OK); -+ finish_fdc(); - return; - } - } -@@ -1229,6 +1252,7 @@ static void fd_rwsec_done1(int status) - else { - /* all sectors finished */ - fd_end_request_cur(BLK_STS_OK); -+ finish_fdc(); - } - return; - -@@ -1350,7 +1374,7 @@ static void fd_times_out(struct timer_list *unused) - - static void finish_fdc( void ) - { -- if (!NeedSeek) { -+ if (!NeedSeek || !stdma_is_locked_by(floppy_irq)) { - finish_fdc_done( 0 ); - } - else { -@@ -1385,7 +1409,8 @@ static void finish_fdc_done( int dummy ) - start_motor_off_timer(); - - local_irq_save(flags); -- stdma_release(); -+ if (stdma_is_locked_by(floppy_irq)) -+ stdma_release(); - local_irq_restore(flags); - - DPRINT(("finish_fdc() finished\n")); -@@ -1475,15 +1500,6 @@ static void setup_req_params( int drive ) - ReqTrack, ReqSector, (unsigned long)ReqData )); - } - --static void ataflop_commit_rqs(struct blk_mq_hw_ctx *hctx) --{ -- spin_lock_irq(&ataflop_lock); -- atari_disable_irq(IRQ_MFP_FDC); -- finish_fdc(); -- atari_enable_irq(IRQ_MFP_FDC); -- spin_unlock_irq(&ataflop_lock); --} -- - static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd) - { -@@ -1491,6 +1507,10 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx, - int drive = floppy - unit; - int type = floppy->type; - -+ DPRINT(("Queue request: drive %d type %d sectors %d of %d last %d\n", -+ drive, type, blk_rq_cur_sectors(bd->rq), -+ blk_rq_sectors(bd->rq), bd->last)); -+ - spin_lock_irq(&ataflop_lock); - if (fd_request) { - spin_unlock_irq(&ataflop_lock); -@@ -1511,6 +1531,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx, - /* drive not connected */ - printk(KERN_ERR "Unknown Device: fd%d\n", drive ); - fd_end_request_cur(BLK_STS_IOERR); -+ stdma_release(); - goto out; - } - -@@ -1527,11 +1548,13 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx, - if (--type >= NUM_DISK_MINORS) { - printk(KERN_WARNING "fd%d: invalid disk format", drive ); - fd_end_request_cur(BLK_STS_IOERR); -+ stdma_release(); - goto out; - } - if (minor2disktype[type].drive_types > DriveType) { - printk(KERN_WARNING "fd%d: unsupported disk format", drive ); - fd_end_request_cur(BLK_STS_IOERR); -+ stdma_release(); - goto out; - } - type = minor2disktype[type].index; -@@ -1550,8 +1573,6 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx, - setup_req_params( drive ); - do_fd_action( drive ); - -- if (bd->last) -- finish_fdc(); - atari_enable_irq( IRQ_MFP_FDC ); - - out: -@@ -1634,6 +1655,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, - /* what if type > 0 here? Overwrite specified entry ? */ - if (type) { - /* refuse to re-set a predefined type for now */ -+ finish_fdc(); - return -EINVAL; - } - -@@ -1701,8 +1723,10 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, - - /* sanity check */ - if (setprm.track != dtp->blocks/dtp->spt/2 || -- setprm.head != 2) -+ setprm.head != 2) { -+ finish_fdc(); - return -EINVAL; -+ } - - UDT = dtp; - set_capacity(disk, UDT->blocks); -@@ -1962,7 +1986,6 @@ static const struct block_device_operations floppy_fops = { - - static const struct blk_mq_ops ataflop_mq_ops = { - .queue_rq = ataflop_queue_rq, -- .commit_rqs = ataflop_commit_rqs, - }; - - static int ataflop_alloc_disk(unsigned int drive, unsigned int type) -@@ -1986,8 +2009,6 @@ static int ataflop_alloc_disk(unsigned int drive, unsigned int type) - return 0; - } - --static DEFINE_MUTEX(ataflop_probe_lock); -- - static void ataflop_probe(dev_t dev) - { - int drive = MINOR(dev) & 3; -@@ -1998,12 +2019,46 @@ static void ataflop_probe(dev_t dev) - - if (drive >= FD_MAX_UNITS || type >= NUM_DISK_MINORS) - return; -- mutex_lock(&ataflop_probe_lock); - if (!unit[drive].disk[type]) { -- if (ataflop_alloc_disk(drive, type) == 0) -+ if (ataflop_alloc_disk(drive, type) == 0) { - add_disk(unit[drive].disk[type]); -+ unit[drive].registered[type] = true; -+ } -+ } -+} -+ -+static void atari_floppy_cleanup(void) -+{ -+ int i; -+ int type; -+ -+ for (i = 0; i < FD_MAX_UNITS; i++) { -+ for (type = 0; type < NUM_DISK_MINORS; type++) { -+ if (!unit[i].disk[type]) -+ continue; -+ del_gendisk(unit[i].disk[type]); -+ blk_cleanup_queue(unit[i].disk[type]->queue); -+ put_disk(unit[i].disk[type]); -+ } -+ blk_mq_free_tag_set(&unit[i].tag_set); -+ } -+ -+ del_timer_sync(&fd_timer); -+ atari_stram_free(DMABuffer); -+} -+ -+static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs) -+{ -+ int type; -+ -+ for (type = 0; type < NUM_DISK_MINORS; type++) { -+ if (!fs->disk[type]) -+ continue; -+ if (fs->registered[type]) -+ del_gendisk(fs->disk[type]); -+ blk_cleanup_disk(fs->disk[type]); - } -- mutex_unlock(&ataflop_probe_lock); -+ blk_mq_free_tag_set(&fs->tag_set); - } - - static int __init atari_floppy_init (void) -@@ -2015,11 +2070,6 @@ static int __init atari_floppy_init (void) - /* Amiga, Mac, ... don't have Atari-compatible floppy :-) */ - return -ENODEV; - -- mutex_lock(&ataflop_probe_lock); -- ret = __register_blkdev(FLOPPY_MAJOR, "fd", ataflop_probe); -- if (ret) -- goto out_unlock; -- - for (i = 0; i < FD_MAX_UNITS; i++) { - memset(&unit[i].tag_set, 0, sizeof(unit[i].tag_set)); - unit[i].tag_set.ops = &ataflop_mq_ops; -@@ -2065,6 +2115,7 @@ static int __init atari_floppy_init (void) - unit[i].track = -1; - unit[i].flags = 0; - add_disk(unit[i].disk[0]); -+ unit[i].registered[0] = true; - } - - printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n", -@@ -2072,18 +2123,17 @@ static int __init atari_floppy_init (void) - UseTrackbuffer ? "" : "no "); - config_types(); - -- return 0; -+ ret = __register_blkdev(FLOPPY_MAJOR, "fd", ataflop_probe); -+ if (ret) { -+ printk(KERN_ERR "atari_floppy_init: cannot register block device\n"); -+ atari_floppy_cleanup(); -+ } -+ return ret; - - err: -- while (--i >= 0) { -- blk_cleanup_queue(unit[i].disk[0]->queue); -- put_disk(unit[i].disk[0]); -- blk_mq_free_tag_set(&unit[i].tag_set); -- } -+ while (--i >= 0) -+ atari_cleanup_floppy_disk(&unit[i]); - -- unregister_blkdev(FLOPPY_MAJOR, "fd"); --out_unlock: -- mutex_unlock(&ataflop_probe_lock); - return ret; - } - -@@ -2128,22 +2178,8 @@ __setup("floppy=", atari_floppy_setup); - - static void __exit atari_floppy_exit(void) - { -- int i, type; -- -- for (i = 0; i < FD_MAX_UNITS; i++) { -- for (type = 0; type < NUM_DISK_MINORS; type++) { -- if (!unit[i].disk[type]) -- continue; -- del_gendisk(unit[i].disk[type]); -- blk_cleanup_queue(unit[i].disk[type]->queue); -- put_disk(unit[i].disk[type]); -- } -- blk_mq_free_tag_set(&unit[i].tag_set); -- } - unregister_blkdev(FLOPPY_MAJOR, "fd"); -- -- del_timer_sync(&fd_timer); -- atari_stram_free( DMABuffer ); -+ atari_floppy_cleanup(); - } - - module_init(atari_floppy_init) -diff --git a/drivers/block/brd.c b/drivers/block/brd.c -index 530b312402031..76ce6f766d55e 100644 ---- a/drivers/block/brd.c -+++ b/drivers/block/brd.c -@@ -78,11 +78,9 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) - } - - /* -- * Look up and return a brd's page for a given sector. -- * If one does not exist, allocate an empty page, and insert that. Then -- * return it. -+ * Insert a new page for a given sector, if one does not already exist. - */ --static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) -+static int brd_insert_page(struct brd_device *brd, sector_t sector) - { - pgoff_t idx; - struct page *page; -@@ -90,7 +88,7 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) - - page = brd_lookup_page(brd, sector); - if (page) -- return page; -+ return 0; - - /* - * Must use NOIO because we don't want to recurse back into the -@@ -99,11 +97,11 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) - gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM; - page = alloc_page(gfp_flags); - if (!page) -- return NULL; -+ return -ENOMEM; - - if (radix_tree_preload(GFP_NOIO)) { - __free_page(page); -- return NULL; -+ return -ENOMEM; - } - - spin_lock(&brd->brd_lock); -@@ -120,8 +118,7 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) - spin_unlock(&brd->brd_lock); - - radix_tree_preload_end(); -- -- return page; -+ return 0; - } - - /* -@@ -174,16 +171,17 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) - { - unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; - size_t copy; -+ int ret; - - copy = min_t(size_t, n, PAGE_SIZE - offset); -- if (!brd_insert_page(brd, sector)) -- return -ENOSPC; -+ ret = brd_insert_page(brd, sector); -+ if (ret) -+ return ret; - if (copy < n) { - sector += copy >> SECTOR_SHIFT; -- if (!brd_insert_page(brd, sector)) -- return -ENOSPC; -+ ret = brd_insert_page(brd, sector); - } -- return 0; -+ return ret; - } - - /* -@@ -372,6 +370,7 @@ static int brd_alloc(int i) - struct brd_device *brd; - struct gendisk *disk; - char buf[DISK_NAME_LEN]; -+ int err = -ENOMEM; - - mutex_lock(&brd_devices_mutex); - list_for_each_entry(brd, &brd_devices, brd_list) { -@@ -422,16 +421,21 @@ static int brd_alloc(int i) - /* Tell the block layer that this is not a rotational device */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); -- add_disk(disk); -+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue); -+ err = add_disk(disk); -+ if (err) -+ goto out_cleanup_disk; - - return 0; - -+out_cleanup_disk: -+ blk_cleanup_disk(disk); - out_free_dev: - mutex_lock(&brd_devices_mutex); - list_del(&brd->brd_list); - mutex_unlock(&brd_devices_mutex); - kfree(brd); -- return -ENOMEM; -+ return err; - } - - static void brd_probe(dev_t dev) -diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h -index 5d9181382ce19..0a5766a2f1618 100644 ---- a/drivers/block/drbd/drbd_int.h -+++ b/drivers/block/drbd/drbd_int.h -@@ -1642,22 +1642,22 @@ struct sib_info { - }; - void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib); - --extern void notify_resource_state(struct sk_buff *, -+extern int notify_resource_state(struct sk_buff *, - unsigned int, - struct drbd_resource *, - struct resource_info *, - enum drbd_notification_type); --extern void notify_device_state(struct sk_buff *, -+extern int notify_device_state(struct sk_buff *, - unsigned int, - struct drbd_device *, - struct device_info *, - enum drbd_notification_type); --extern void notify_connection_state(struct sk_buff *, -+extern int notify_connection_state(struct sk_buff *, - unsigned int, - struct drbd_connection *, - struct connection_info *, - enum drbd_notification_type); --extern void notify_peer_device_state(struct sk_buff *, -+extern int notify_peer_device_state(struct sk_buff *, - unsigned int, - struct drbd_peer_device *, - struct peer_device_info *, -diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c -index 55234a558e98b..eaf20a3324018 100644 ---- a/drivers/block/drbd/drbd_main.c -+++ b/drivers/block/drbd/drbd_main.c -@@ -171,7 +171,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, - unsigned int set_size) - { - struct drbd_request *r; -- struct drbd_request *req = NULL; -+ struct drbd_request *req = NULL, *tmp = NULL; - int expect_epoch = 0; - int expect_size = 0; - -@@ -225,8 +225,11 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr, - * to catch requests being barrier-acked "unexpectedly". - * It usually should find the same req again, or some READ preceding it. */ - list_for_each_entry(req, &connection->transfer_log, tl_requests) -- if (req->epoch == expect_epoch) -+ if (req->epoch == expect_epoch) { -+ tmp = req; - break; -+ } -+ req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests); - list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) { - if (req->epoch != expect_epoch) - break; -@@ -2241,7 +2244,8 @@ void drbd_destroy_device(struct kref *kref) - kref_put(&peer_device->connection->kref, drbd_destroy_connection); - kfree(peer_device); - } -- memset(device, 0xfd, sizeof(*device)); -+ if (device->submit.wq) -+ destroy_workqueue(device->submit.wq); - kfree(device); - kref_put(&resource->kref, drbd_destroy_resource); - } -@@ -2333,7 +2337,6 @@ void drbd_destroy_resource(struct kref *kref) - idr_destroy(&resource->devices); - free_cpumask_var(resource->cpu_mask); - kfree(resource->name); -- memset(resource, 0xf2, sizeof(*resource)); - kfree(resource); - } - -@@ -2674,7 +2677,6 @@ void drbd_destroy_connection(struct kref *kref) - drbd_free_socket(&connection->data); - kfree(connection->int_dig_in); - kfree(connection->int_dig_vv); -- memset(connection, 0xfc, sizeof(*connection)); - kfree(connection); - kref_put(&resource->kref, drbd_destroy_resource); - } -@@ -2696,7 +2698,7 @@ static int init_submitter(struct drbd_device *device) - enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor) - { - struct drbd_resource *resource = adm_ctx->resource; -- struct drbd_connection *connection; -+ struct drbd_connection *connection, *n; - struct drbd_device *device; - struct drbd_peer_device *peer_device, *tmp_peer_device; - struct gendisk *disk; -@@ -2737,6 +2739,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig - sprintf(disk->disk_name, "drbd%d", minor); - disk->private_data = device; - -+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); - blk_queue_write_cache(disk->queue, true, true); - /* Setting the max_hw_sectors to an odd value of 8kibyte here - This triggers a max_bio_size message upon first attach or connect */ -@@ -2791,10 +2794,12 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig - - if (init_submitter(device)) { - err = ERR_NOMEM; -- goto out_idr_remove_vol; -+ goto out_idr_remove_from_resource; - } - -- add_disk(disk); -+ err = add_disk(disk); -+ if (err) -+ goto out_destroy_workqueue; - - /* inherit the connection state */ - device->state.conn = first_connection(resource)->cstate; -@@ -2808,10 +2813,10 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig - drbd_debugfs_device_add(device); - return NO_ERROR; - --out_idr_remove_vol: -- idr_remove(&connection->peer_devices, vnr); -+out_destroy_workqueue: -+ destroy_workqueue(device->submit.wq); - out_idr_remove_from_resource: -- for_each_connection(connection, resource) { -+ for_each_connection_safe(connection, n, resource) { - peer_device = idr_remove(&connection->peer_devices, vnr); - if (peer_device) - kref_put(&connection->kref, drbd_destroy_connection); -@@ -3603,9 +3608,8 @@ const char *cmdname(enum drbd_packet cmd) - * when we want to support more than - * one PRO_VERSION */ - static const char *cmdnames[] = { -+ - [P_DATA] = "Data", -- [P_WSAME] = "WriteSame", -- [P_TRIM] = "Trim", - [P_DATA_REPLY] = "DataReply", - [P_RS_DATA_REPLY] = "RSDataReply", - [P_BARRIER] = "Barrier", -@@ -3616,7 +3620,6 @@ const char *cmdname(enum drbd_packet cmd) - [P_DATA_REQUEST] = "DataRequest", - [P_RS_DATA_REQUEST] = "RSDataRequest", - [P_SYNC_PARAM] = "SyncParam", -- [P_SYNC_PARAM89] = "SyncParam89", - [P_PROTOCOL] = "ReportProtocol", - [P_UUIDS] = "ReportUUIDs", - [P_SIZES] = "ReportSizes", -@@ -3624,6 +3627,7 @@ const char *cmdname(enum drbd_packet cmd) - [P_SYNC_UUID] = "ReportSyncUUID", - [P_AUTH_CHALLENGE] = "AuthChallenge", - [P_AUTH_RESPONSE] = "AuthResponse", -+ [P_STATE_CHG_REQ] = "StateChgRequest", - [P_PING] = "Ping", - [P_PING_ACK] = "PingAck", - [P_RECV_ACK] = "RecvAck", -@@ -3634,23 +3638,25 @@ const char *cmdname(enum drbd_packet cmd) - [P_NEG_DREPLY] = "NegDReply", - [P_NEG_RS_DREPLY] = "NegRSDReply", - [P_BARRIER_ACK] = "BarrierAck", -- [P_STATE_CHG_REQ] = "StateChgRequest", - [P_STATE_CHG_REPLY] = "StateChgReply", - [P_OV_REQUEST] = "OVRequest", - [P_OV_REPLY] = "OVReply", - [P_OV_RESULT] = "OVResult", - [P_CSUM_RS_REQUEST] = "CsumRSRequest", - [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", -+ [P_SYNC_PARAM89] = "SyncParam89", - [P_COMPRESSED_BITMAP] = "CBitmap", - [P_DELAY_PROBE] = "DelayProbe", - [P_OUT_OF_SYNC] = "OutOfSync", -- [P_RETRY_WRITE] = "RetryWrite", - [P_RS_CANCEL] = "RSCancel", - [P_CONN_ST_CHG_REQ] = "conn_st_chg_req", - [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply", - [P_PROTOCOL_UPDATE] = "protocol_update", -+ [P_TRIM] = "Trim", - [P_RS_THIN_REQ] = "rs_thin_req", - [P_RS_DEALLOCATED] = "rs_deallocated", -+ [P_WSAME] = "WriteSame", -+ [P_ZEROES] = "Zeroes", - - /* enum drbd_packet, but not commands - obsoleted flags: - * P_MAY_IGNORE -diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c -index 44ccf8b4f4b29..69184cf17b6ad 100644 ---- a/drivers/block/drbd/drbd_nl.c -+++ b/drivers/block/drbd/drbd_nl.c -@@ -4617,7 +4617,7 @@ static int nla_put_notification_header(struct sk_buff *msg, - return drbd_notification_header_to_skb(msg, &nh, true); - } - --void notify_resource_state(struct sk_buff *skb, -+int notify_resource_state(struct sk_buff *skb, - unsigned int seq, - struct drbd_resource *resource, - struct resource_info *resource_info, -@@ -4659,16 +4659,17 @@ void notify_resource_state(struct sk_buff *skb, - if (err && err != -ESRCH) - goto failed; - } -- return; -+ return 0; - - nla_put_failure: - nlmsg_free(skb); - failed: - drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n", - err, seq); -+ return err; - } - --void notify_device_state(struct sk_buff *skb, -+int notify_device_state(struct sk_buff *skb, - unsigned int seq, - struct drbd_device *device, - struct device_info *device_info, -@@ -4708,16 +4709,17 @@ void notify_device_state(struct sk_buff *skb, - if (err && err != -ESRCH) - goto failed; - } -- return; -+ return 0; - - nla_put_failure: - nlmsg_free(skb); - failed: - drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n", - err, seq); -+ return err; - } - --void notify_connection_state(struct sk_buff *skb, -+int notify_connection_state(struct sk_buff *skb, - unsigned int seq, - struct drbd_connection *connection, - struct connection_info *connection_info, -@@ -4757,16 +4759,17 @@ void notify_connection_state(struct sk_buff *skb, - if (err && err != -ESRCH) - goto failed; - } -- return; -+ return 0; - - nla_put_failure: - nlmsg_free(skb); - failed: - drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n", - err, seq); -+ return err; - } - --void notify_peer_device_state(struct sk_buff *skb, -+int notify_peer_device_state(struct sk_buff *skb, - unsigned int seq, - struct drbd_peer_device *peer_device, - struct peer_device_info *peer_device_info, -@@ -4807,13 +4810,14 @@ void notify_peer_device_state(struct sk_buff *skb, - if (err && err != -ESRCH) - goto failed; - } -- return; -+ return 0; - - nla_put_failure: - nlmsg_free(skb); - failed: - drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n", - err, seq); -+ return err; - } - - void notify_helper(enum drbd_notification_type type, -@@ -4864,7 +4868,7 @@ fail: - err, seq); - } - --static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) -+static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq) - { - struct drbd_genlmsghdr *dh; - int err; -@@ -4878,11 +4882,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) - if (nla_put_notification_header(skb, NOTIFY_EXISTS)) - goto nla_put_failure; - genlmsg_end(skb, dh); -- return; -+ return 0; - - nla_put_failure: - nlmsg_free(skb); - pr_err("Error %d sending event. Event seq:%u\n", err, seq); -+ return err; - } - - static void free_state_changes(struct list_head *list) -@@ -4909,6 +4914,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) - unsigned int seq = cb->args[2]; - unsigned int n; - enum drbd_notification_type flags = 0; -+ int err = 0; - - /* There is no need for taking notification_mutex here: it doesn't - matter if the initial state events mix with later state chage -@@ -4917,32 +4923,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) - - cb->args[5]--; - if (cb->args[5] == 1) { -- notify_initial_state_done(skb, seq); -+ err = notify_initial_state_done(skb, seq); - goto out; - } - n = cb->args[4]++; - if (cb->args[4] < cb->args[3]) - flags |= NOTIFY_CONTINUES; - if (n < 1) { -- notify_resource_state_change(skb, seq, state_change->resource, -+ err = notify_resource_state_change(skb, seq, state_change->resource, - NOTIFY_EXISTS | flags); - goto next; - } - n--; - if (n < state_change->n_connections) { -- notify_connection_state_change(skb, seq, &state_change->connections[n], -+ err = notify_connection_state_change(skb, seq, &state_change->connections[n], - NOTIFY_EXISTS | flags); - goto next; - } - n -= state_change->n_connections; - if (n < state_change->n_devices) { -- notify_device_state_change(skb, seq, &state_change->devices[n], -+ err = notify_device_state_change(skb, seq, &state_change->devices[n], - NOTIFY_EXISTS | flags); - goto next; - } - n -= state_change->n_devices; - if (n < state_change->n_devices * state_change->n_connections) { -- notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], -+ err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], - NOTIFY_EXISTS | flags); - goto next; - } -@@ -4957,7 +4963,10 @@ next: - cb->args[4] = 0; - } - out: -- return skb->len; -+ if (err) -+ return err; -+ else -+ return skb->len; - } - - int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) -diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c -index 1f740e42e4571..0104e101b0d71 100644 ---- a/drivers/block/drbd/drbd_receiver.c -+++ b/drivers/block/drbd/drbd_receiver.c -@@ -1301,7 +1301,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont - bio_set_dev(bio, device->ldev->backing_bdev); - bio->bi_private = octx; - bio->bi_end_io = one_flush_endio; -- bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; -+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - - device->flush_jif = jiffies; - set_bit(FLUSH_PENDING, &device->flags); -diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c -index 5ca233644d705..4281dc847bc22 100644 ---- a/drivers/block/drbd/drbd_req.c -+++ b/drivers/block/drbd/drbd_req.c -@@ -180,7 +180,8 @@ void start_new_tl_epoch(struct drbd_connection *connection) - void complete_master_bio(struct drbd_device *device, - struct bio_and_error *m) - { -- m->bio->bi_status = errno_to_blk_status(m->error); -+ if (unlikely(m->error)) -+ m->bio->bi_status = errno_to_blk_status(m->error); - bio_endio(m->bio); - dec_ap_bio(device); - } -@@ -1601,6 +1602,8 @@ blk_qc_t drbd_submit_bio(struct bio *bio) - struct drbd_device *device = bio->bi_bdev->bd_disk->private_data; - - blk_queue_split(&bio); -+ if (!bio) -+ return BLK_QC_T_NONE; - - /* - * what we "blindly" assume: -diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c -index b8a27818ab3f8..4ee11aef6672b 100644 ---- a/drivers/block/drbd/drbd_state.c -+++ b/drivers/block/drbd/drbd_state.c -@@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device, - return rv; - } - --void notify_resource_state_change(struct sk_buff *skb, -+int notify_resource_state_change(struct sk_buff *skb, - unsigned int seq, - struct drbd_resource_state_change *resource_state_change, - enum drbd_notification_type type) -@@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb, - .res_susp_fen = resource_state_change->susp_fen[NEW], - }; - -- notify_resource_state(skb, seq, resource, &resource_info, type); -+ return notify_resource_state(skb, seq, resource, &resource_info, type); - } - --void notify_connection_state_change(struct sk_buff *skb, -+int notify_connection_state_change(struct sk_buff *skb, - unsigned int seq, - struct drbd_connection_state_change *connection_state_change, - enum drbd_notification_type type) -@@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb, - .conn_role = connection_state_change->peer_role[NEW], - }; - -- notify_connection_state(skb, seq, connection, &connection_info, type); -+ return notify_connection_state(skb, seq, connection, &connection_info, type); - } - --void notify_device_state_change(struct sk_buff *skb, -+int notify_device_state_change(struct sk_buff *skb, - unsigned int seq, - struct drbd_device_state_change *device_state_change, - enum drbd_notification_type type) -@@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb, - .dev_disk_state = device_state_change->disk_state[NEW], - }; - -- notify_device_state(skb, seq, device, &device_info, type); -+ return notify_device_state(skb, seq, device, &device_info, type); - } - --void notify_peer_device_state_change(struct sk_buff *skb, -+int notify_peer_device_state_change(struct sk_buff *skb, - unsigned int seq, - struct drbd_peer_device_state_change *p, - enum drbd_notification_type type) -@@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb, - .peer_resync_susp_dependency = p->resync_susp_dependency[NEW], - }; - -- notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); -+ return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); - } - - static void broadcast_state_change(struct drbd_state_change *state_change) -@@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change) - struct drbd_resource_state_change *resource_state_change = &state_change->resource[0]; - bool resource_state_has_changed; - unsigned int n_device, n_connection, n_peer_device, n_peer_devices; -- void (*last_func)(struct sk_buff *, unsigned int, void *, -+ int (*last_func)(struct sk_buff *, unsigned int, void *, - enum drbd_notification_type) = NULL; - void *last_arg = NULL; - -diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h -index ba80f612d6abb..d5b0479bc9a66 100644 ---- a/drivers/block/drbd/drbd_state_change.h -+++ b/drivers/block/drbd/drbd_state_change.h -@@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_ - extern void copy_old_to_new_state_change(struct drbd_state_change *); - extern void forget_state_change(struct drbd_state_change *); - --extern void notify_resource_state_change(struct sk_buff *, -+extern int notify_resource_state_change(struct sk_buff *, - unsigned int, - struct drbd_resource_state_change *, - enum drbd_notification_type type); --extern void notify_connection_state_change(struct sk_buff *, -+extern int notify_connection_state_change(struct sk_buff *, - unsigned int, - struct drbd_connection_state_change *, - enum drbd_notification_type type); --extern void notify_device_state_change(struct sk_buff *, -+extern int notify_device_state_change(struct sk_buff *, - unsigned int, - struct drbd_device_state_change *, - enum drbd_notification_type type); --extern void notify_peer_device_state_change(struct sk_buff *, -+extern int notify_peer_device_state_change(struct sk_buff *, - unsigned int, - struct drbd_peer_device_state_change *, - enum drbd_notification_type type); -diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c -index fef79ea52e3ed..4dc25a123d946 100644 ---- a/drivers/block/floppy.c -+++ b/drivers/block/floppy.c -@@ -184,6 +184,7 @@ static int print_unex = 1; - #include - #include - #include -+#include - #include - #include - #include -@@ -508,8 +509,8 @@ static unsigned long fdc_busy; - static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); - static DECLARE_WAIT_QUEUE_HEAD(command_done); - --/* Errors during formatting are counted here. */ --static int format_errors; -+/* errors encountered on the current (or last) request */ -+static int floppy_errors; - - /* Format request descriptor. */ - static struct format_descr format_req; -@@ -529,7 +530,6 @@ static struct format_descr format_req; - static char *floppy_track_buffer; - static int max_buffer_sectors; - --static int *errors; - typedef void (*done_f)(int); - static const struct cont_t { - void (*interrupt)(void); -@@ -1014,7 +1014,7 @@ static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn); - static void cancel_activity(void) - { - do_floppy = NULL; -- cancel_delayed_work_sync(&fd_timer); -+ cancel_delayed_work(&fd_timer); - cancel_work_sync(&floppy_work); - } - -@@ -1454,7 +1454,7 @@ static int interpret_errors(void) - if (drive_params[current_drive].flags & FTD_MSG) - DPRINT("Over/Underrun - retrying\n"); - bad = 0; -- } else if (*errors >= drive_params[current_drive].max_errors.reporting) { -+ } else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) { - print_errors(); - } - if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC) -@@ -2094,7 +2094,7 @@ static void bad_flp_intr(void) - if (!next_valid_format(current_drive)) - return; - } -- err_count = ++(*errors); -+ err_count = ++floppy_errors; - INFBOUND(write_errors[current_drive].badness, err_count); - if (err_count > drive_params[current_drive].max_errors.abort) - cont->done(0); -@@ -2240,9 +2240,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req) - return -EINVAL; - } - format_req = *tmp_format_req; -- format_errors = 0; - cont = &format_cont; -- errors = &format_errors; -+ floppy_errors = 0; - ret = wait_til_done(redo_format, true); - if (ret == -EINTR) - return -EINTR; -@@ -2760,10 +2759,11 @@ static int set_next_request(void) - current_req = list_first_entry_or_null(&floppy_reqs, struct request, - queuelist); - if (current_req) { -- current_req->error_count = 0; -+ floppy_errors = 0; - list_del_init(¤t_req->queuelist); -+ return 1; - } -- return current_req != NULL; -+ return 0; - } - - /* Starts or continues processing request. Will automatically unlock the -@@ -2822,7 +2822,6 @@ do_request: - _floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format]; - } else - probing = 0; -- errors = &(current_req->error_count); - tmp = make_raw_rw_request(); - if (tmp < 2) { - request_done(tmp); -@@ -2983,6 +2982,8 @@ static const char *drive_name(int type, int drive) - return "(null)"; - } - -+#ifdef CONFIG_BLK_DEV_FD_RAWCMD -+ - /* raw commands */ - static void raw_cmd_done(int flag) - { -@@ -3080,6 +3081,8 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr) - } - } - -+#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT) -+ - static int raw_cmd_copyin(int cmd, void __user *param, - struct floppy_raw_cmd **rcmd) - { -@@ -3107,7 +3110,7 @@ loop: - ptr->resultcode = 0; - - if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { -- if (ptr->length <= 0) -+ if (ptr->length <= 0 || ptr->length >= MAX_LEN) - return -EINVAL; - ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length); - fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length); -@@ -3180,6 +3183,35 @@ static int raw_cmd_ioctl(int cmd, void __user *param) - return ret; - } - -+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, -+ void __user *param) -+{ -+ int ret; -+ -+ pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n"); -+ -+ if (type) -+ return -EINVAL; -+ if (lock_fdc(drive)) -+ return -EINTR; -+ set_floppy(drive); -+ ret = raw_cmd_ioctl(cmd, param); -+ if (ret == -EINTR) -+ return -EINTR; -+ process_fd_request(); -+ return ret; -+} -+ -+#else /* CONFIG_BLK_DEV_FD_RAWCMD */ -+ -+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, -+ void __user *param) -+{ -+ return -EOPNOTSUPP; -+} -+ -+#endif -+ - static int invalidate_drive(struct block_device *bdev) - { - /* invalidate the buffer track to force a reread */ -@@ -3368,7 +3400,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int - { - int drive = (long)bdev->bd_disk->private_data; - int type = ITYPE(drive_state[drive].fd_device); -- int i; - int ret; - int size; - union inparam { -@@ -3519,16 +3550,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int - outparam = &write_errors[drive]; - break; - case FDRAWCMD: -- if (type) -- return -EINVAL; -- if (lock_fdc(drive)) -- return -EINTR; -- set_floppy(drive); -- i = raw_cmd_ioctl(cmd, (void __user *)param); -- if (i == -EINTR) -- return -EINTR; -- process_fd_request(); -- return i; -+ return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param); - case FDTWADDLE: - if (lock_fdc(drive)) - return -EINTR; -@@ -4478,6 +4500,7 @@ static const struct blk_mq_ops floppy_mq_ops = { - }; - - static struct platform_device floppy_device[N_DRIVE]; -+static bool registered[N_DRIVE]; - - static bool floppy_available(int drive) - { -@@ -4564,8 +4587,10 @@ static int __init do_floppy_init(void) - goto out_put_disk; - - err = floppy_alloc_disk(drive, 0); -- if (err) -+ if (err) { -+ blk_mq_free_tag_set(&tag_sets[drive]); - goto out_put_disk; -+ } - - timer_setup(&motor_off_timer[drive], motor_off_callback, 0); - } -@@ -4693,6 +4718,8 @@ static int __init do_floppy_init(void) - if (err) - goto out_remove_drives; - -+ registered[drive] = true; -+ - device_add_disk(&floppy_device[drive].dev, disks[drive][0], - NULL); - } -@@ -4703,7 +4730,8 @@ out_remove_drives: - while (drive--) { - if (floppy_available(drive)) { - del_gendisk(disks[drive][0]); -- platform_device_unregister(&floppy_device[drive]); -+ if (registered[drive]) -+ platform_device_unregister(&floppy_device[drive]); - } - } - out_release_dma: -@@ -4946,7 +4974,8 @@ static void __exit floppy_module_exit(void) - if (disks[drive][i]) - del_gendisk(disks[drive][i]); - } -- platform_device_unregister(&floppy_device[drive]); -+ if (registered[drive]) -+ platform_device_unregister(&floppy_device[drive]); - } - for (i = 0; i < ARRAY_SIZE(floppy_type); i++) { - if (disks[drive][i]) -diff --git a/drivers/block/loop.c b/drivers/block/loop.c -index 7bf4686af774e..1d60d5ac0db80 100644 ---- a/drivers/block/loop.c -+++ b/drivers/block/loop.c -@@ -79,6 +79,7 @@ - #include - #include - #include -+#include - - #include "loop.h" - -@@ -272,19 +273,6 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) - blk_mq_unfreeze_queue(lo->lo_queue); - } - --/** -- * loop_validate_block_size() - validates the passed in block size -- * @bsize: size to validate -- */ --static int --loop_validate_block_size(unsigned short bsize) --{ -- if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) -- return -EINVAL; -- -- return 0; --} -- - /** - * loop_set_size() - sets device size and notifies userspace - * @lo: struct loop_device to set the size for -@@ -748,6 +736,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, - - if (!file) - return -EBADF; -+ -+ /* suppress uevents while reconfiguring the device */ -+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); -+ - is_loop = is_loop_device(file); - error = loop_global_lock_killable(lo, is_loop); - if (error) -@@ -802,13 +794,18 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, - fput(old_file); - if (partscan) - loop_reread_partitions(lo); -- return 0; -+ -+ error = 0; -+done: -+ /* enable and uncork uevent now that we are done */ -+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); -+ return error; - - out_err: - loop_global_unlock(lo, is_loop); - out_putf: - fput(file); -- return error; -+ goto done; - } - - /* loop sysfs attributes */ -@@ -856,33 +853,33 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) - - static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) - { -- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); -+ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset); - } - - static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) - { -- return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); -+ return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); - } - - static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) - { - int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); - -- return sprintf(buf, "%s\n", autoclear ? "1" : "0"); -+ return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0"); - } - - static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) - { - int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); - -- return sprintf(buf, "%s\n", partscan ? "1" : "0"); -+ return sysfs_emit(buf, "%s\n", partscan ? "1" : "0"); - } - - static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) - { - int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); - -- return sprintf(buf, "%s\n", dio ? "1" : "0"); -+ return sysfs_emit(buf, "%s\n", dio ? "1" : "0"); - } - - LOOP_ATTR_RO(backing_file); -@@ -952,8 +949,13 @@ static void loop_config_discard(struct loop_device *lo) - granularity = 0; - - } else { -+ struct kstatfs sbuf; -+ - max_discard_sectors = UINT_MAX >> 9; -- granularity = inode->i_sb->s_blocksize; -+ if (!vfs_statfs(&file->f_path, &sbuf)) -+ granularity = sbuf.f_bsize; -+ else -+ max_discard_sectors = 0; - } - - if (max_discard_sectors) { -@@ -1159,8 +1161,13 @@ loop_set_status_from_info(struct loop_device *lo, - if (err) - return err; - -+ /* Avoid assigning overflow values */ -+ if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX) -+ return -EOVERFLOW; -+ - lo->lo_offset = info->lo_offset; - lo->lo_sizelimit = info->lo_sizelimit; -+ - memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); - memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); - lo->lo_file_name[LO_NAME_SIZE-1] = 0; -@@ -1236,7 +1243,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, - } - - if (config->block_size) { -- error = loop_validate_block_size(config->block_size); -+ error = blk_validate_block_size(config->block_size); - if (error) - goto out_unlock; - } -@@ -1258,6 +1265,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, - goto out_unlock; - } - -+ /* suppress uevents while reconfiguring the device */ -+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); -+ - disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); - set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); - -@@ -1304,13 +1314,18 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, - lo->lo_flags |= LO_FLAGS_PARTSCAN; - partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; - if (partscan) -- lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; -+ lo->lo_disk->flags &= ~GENHD_FL_NO_PART; -+ -+ /* enable and uncork uevent now that we are done */ -+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); - - loop_global_unlock(lo, is_loop); - if (partscan) - loop_reread_partitions(lo); -+ - if (!(mode & FMODE_EXCL)) - bd_abort_claiming(bdev, loop_configure); -+ - return 0; - - out_unlock: -@@ -1448,7 +1463,7 @@ out_unlock: - mutex_lock(&lo->lo_mutex); - lo->lo_flags = 0; - if (!part_shift) -- lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; -+ lo->lo_disk->flags |= GENHD_FL_NO_PART; - lo->lo_state = Lo_unbound; - mutex_unlock(&lo->lo_mutex); - -@@ -1565,7 +1580,7 @@ out_unfreeze: - - if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) && - !(prev_lo_flags & LO_FLAGS_PARTSCAN)) { -- lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; -+ lo->lo_disk->flags &= ~GENHD_FL_NO_PART; - partscan = true; - } - out_unlock: -@@ -1759,7 +1774,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) - if (lo->lo_state != Lo_bound) - return -ENXIO; - -- err = loop_validate_block_size(arg); -+ err = blk_validate_block_size(arg); - if (err) - return err; - -@@ -2093,7 +2108,16 @@ static const struct block_device_operations lo_fops = { - /* - * And now the modules code and kernel interface. - */ --static int max_loop; -+ -+/* -+ * If max_loop is specified, create that many devices upfront. -+ * This also becomes a hard limit. If max_loop is not specified, -+ * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module -+ * init time. Loop devices can be requested on-demand with the -+ * /dev/loop-control interface, or be instantiated by accessing -+ * a 'dead' device node. -+ */ -+static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT; - module_param(max_loop, int, 0444); - MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); - module_param(max_part, int, 0444); -@@ -2181,35 +2205,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, - - static void loop_handle_cmd(struct loop_cmd *cmd) - { -+ struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css; -+ struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css; - struct request *rq = blk_mq_rq_from_pdu(cmd); - const bool write = op_is_write(req_op(rq)); - struct loop_device *lo = rq->q->queuedata; - int ret = 0; - struct mem_cgroup *old_memcg = NULL; -+ const bool use_aio = cmd->use_aio; - - if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { - ret = -EIO; - goto failed; - } - -- if (cmd->blkcg_css) -- kthread_associate_blkcg(cmd->blkcg_css); -- if (cmd->memcg_css) -+ if (cmd_blkcg_css) -+ kthread_associate_blkcg(cmd_blkcg_css); -+ if (cmd_memcg_css) - old_memcg = set_active_memcg( -- mem_cgroup_from_css(cmd->memcg_css)); -+ mem_cgroup_from_css(cmd_memcg_css)); - -+ /* -+ * do_req_filebacked() may call blk_mq_complete_request() synchronously -+ * or asynchronously if using aio. Hence, do not touch 'cmd' after -+ * do_req_filebacked() has returned unless we are sure that 'cmd' has -+ * not yet been completed. -+ */ - ret = do_req_filebacked(lo, rq); - -- if (cmd->blkcg_css) -+ if (cmd_blkcg_css) - kthread_associate_blkcg(NULL); - -- if (cmd->memcg_css) { -+ if (cmd_memcg_css) { - set_active_memcg(old_memcg); -- css_put(cmd->memcg_css); -+ css_put(cmd_memcg_css); - } - failed: - /* complete non-aio request */ -- if (!cmd->use_aio || ret) { -+ if (!use_aio || ret) { - if (ret == -EOPNOTSUPP) - cmd->ret = ret; - else -@@ -2377,7 +2410,7 @@ static int loop_add(int i) - * userspace tools. Parameters like this in general should be avoided. - */ - if (!part_shift) -- disk->flags |= GENHD_FL_NO_PART_SCAN; -+ disk->flags |= GENHD_FL_NO_PART; - disk->flags |= GENHD_FL_EXT_DEVT; - atomic_set(&lo->lo_refcnt, 0); - mutex_init(&lo->lo_mutex); -@@ -2442,7 +2475,7 @@ static int loop_control_remove(int idx) - int ret; - - if (idx < 0) { -- pr_warn("deleting an unspecified loop device is not supported.\n"); -+ pr_warn_once("deleting an unspecified loop device is not supported.\n"); - return -EINVAL; - } - -@@ -2538,7 +2571,7 @@ MODULE_ALIAS("devname:loop-control"); - - static int __init loop_init(void) - { -- int i, nr; -+ int i; - int err; - - part_shift = 0; -@@ -2566,19 +2599,6 @@ static int __init loop_init(void) - goto err_out; - } - -- /* -- * If max_loop is specified, create that many devices upfront. -- * This also becomes a hard limit. If max_loop is not specified, -- * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module -- * init time. Loop devices can be requested on-demand with the -- * /dev/loop-control interface, or be instantiated by accessing -- * a 'dead' device node. -- */ -- if (max_loop) -- nr = max_loop; -- else -- nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; -- - err = misc_register(&loop_misc); - if (err < 0) - goto err_out; -@@ -2590,7 +2610,7 @@ static int __init loop_init(void) - } - - /* pre-create number of devices given by config or max_loop */ -- for (i = 0; i < nr; i++) -+ for (i = 0; i < max_loop; i++) - loop_add(i); - - printk(KERN_INFO "loop: module loaded\n"); -diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c -index 901855717cb53..ba61e72741eab 100644 ---- a/drivers/block/mtip32xx/mtip32xx.c -+++ b/drivers/block/mtip32xx/mtip32xx.c -@@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev) - "Completion workers still active!\n"); - } - -- blk_set_queue_dying(dd->queue); -+ blk_mark_disk_dead(dd->disk); - set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); - - /* Clean up the block layer. */ -diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c -index 26798da661bd4..0bda4a468c660 100644 ---- a/drivers/block/n64cart.c -+++ b/drivers/block/n64cart.c -@@ -88,7 +88,7 @@ static blk_qc_t n64cart_submit_bio(struct bio *bio) - { - struct bio_vec bvec; - struct bvec_iter iter; -- struct device *dev = bio->bi_disk->private_data; -+ struct device *dev = bio->bi_bdev->bd_disk->private_data; - u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT; - - bio_for_each_segment(bvec, bio, iter) { -@@ -137,7 +137,7 @@ static int __init n64cart_probe(struct platform_device *pdev) - return -ENOMEM; - - disk->first_minor = 0; -- disk->flags = GENHD_FL_NO_PART_SCAN; -+ disk->flags = GENHD_FL_NO_PART; - disk->fops = &n64cart_fops; - disk->private_data = &pdev->dev; - strcpy(disk->disk_name, "n64cart"); -diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c -index 1183f7872b713..e563aa407e888 100644 ---- a/drivers/block/nbd.c -+++ b/drivers/block/nbd.c -@@ -122,10 +122,10 @@ struct nbd_device { - struct work_struct remove_work; - - struct list_head list; -- struct task_struct *task_recv; - struct task_struct *task_setup; - - unsigned long flags; -+ pid_t pid; /* pid of nbd-client, if attached */ - - char *backend; - }; -@@ -217,7 +217,7 @@ static ssize_t pid_show(struct device *dev, - struct gendisk *disk = dev_to_disk(dev); - struct nbd_device *nbd = (struct nbd_device *)disk->private_data; - -- return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); -+ return sprintf(buf, "%d\n", nbd->pid); - } - - static const struct device_attribute pid_attr = { -@@ -254,7 +254,7 @@ static void nbd_dev_remove(struct nbd_device *nbd) - mutex_lock(&nbd_index_mutex); - idr_remove(&nbd_index_idr, nbd->index); - mutex_unlock(&nbd_index_mutex); -- -+ destroy_workqueue(nbd->recv_workq); - kfree(nbd); - } - -@@ -326,10 +326,13 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, - if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize)) - return -EINVAL; - -+ if (bytesize < 0) -+ return -EINVAL; -+ - nbd->config->bytesize = bytesize; - nbd->config->blksize_bits = __ffs(blksize); - -- if (!nbd->task_recv) -+ if (!nbd->pid) - return 0; - - if (nbd->config->flags & NBD_FLAG_SEND_TRIM) { -@@ -896,11 +899,15 @@ static int wait_for_reconnect(struct nbd_device *nbd) - struct nbd_config *config = nbd->config; - if (!config->dead_conn_timeout) - return 0; -- if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) -+ -+ if (!wait_event_timeout(config->conn_wait, -+ test_bit(NBD_RT_DISCONNECTED, -+ &config->runtime_flags) || -+ atomic_read(&config->live_connections) > 0, -+ config->dead_conn_timeout)) - return 0; -- return wait_event_timeout(config->conn_wait, -- atomic_read(&config->live_connections) > 0, -- config->dead_conn_timeout) > 0; -+ -+ return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); - } - - static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) -@@ -1044,6 +1051,9 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, - struct nbd_sock *nsock; - int err; - -+ /* Arg will be cast to int, check it to avoid overflow */ -+ if (arg > INT_MAX) -+ return -EINVAL; - sock = nbd_get_socket(nbd, arg, &err); - if (!sock) - return err; -@@ -1163,11 +1173,11 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) - return -ENOSPC; - } - --static void nbd_bdev_reset(struct block_device *bdev) -+static void nbd_bdev_reset(struct nbd_device *nbd) - { -- if (bdev->bd_openers > 1) -+ if (nbd->disk->part0->bd_openers > 1) - return; -- set_capacity(bdev->bd_disk, 0); -+ set_capacity(nbd->disk, 0); - } - - static void nbd_parse_flags(struct nbd_device *nbd) -@@ -1241,7 +1251,7 @@ static void nbd_config_put(struct nbd_device *nbd) - if (test_and_clear_bit(NBD_RT_HAS_PID_FILE, - &config->runtime_flags)) - device_remove_file(disk_to_dev(nbd->disk), &pid_attr); -- nbd->task_recv = NULL; -+ nbd->pid = 0; - if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE, - &config->runtime_flags)) { - device_remove_file(disk_to_dev(nbd->disk), &backend_attr); -@@ -1260,10 +1270,6 @@ static void nbd_config_put(struct nbd_device *nbd) - kfree(nbd->config); - nbd->config = NULL; - -- if (nbd->recv_workq) -- destroy_workqueue(nbd->recv_workq); -- nbd->recv_workq = NULL; -- - nbd->tag_set.timeout = 0; - nbd->disk->queue->limits.discard_granularity = 0; - nbd->disk->queue->limits.discard_alignment = 0; -@@ -1282,7 +1288,7 @@ static int nbd_start_device(struct nbd_device *nbd) - int num_connections = config->num_connections; - int error = 0, i; - -- if (nbd->task_recv) -+ if (nbd->pid) - return -EBUSY; - if (!config->socks) - return -EINVAL; -@@ -1292,16 +1298,8 @@ static int nbd_start_device(struct nbd_device *nbd) - return -EINVAL; - } - -- nbd->recv_workq = alloc_workqueue("knbd%d-recv", -- WQ_MEM_RECLAIM | WQ_HIGHPRI | -- WQ_UNBOUND, 0, nbd->index); -- if (!nbd->recv_workq) { -- dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); -- return -ENOMEM; -- } -- - blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); -- nbd->task_recv = current; -+ nbd->pid = task_pid_nr(current); - - nbd_parse_flags(nbd); - -@@ -1345,7 +1343,7 @@ static int nbd_start_device(struct nbd_device *nbd) - return nbd_set_size(nbd, config->bytesize, nbd_blksize(config)); - } - --static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) -+static int nbd_start_device_ioctl(struct nbd_device *nbd) - { - struct nbd_config *config = nbd->config; - int ret; -@@ -1359,12 +1357,14 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b - mutex_unlock(&nbd->config_lock); - ret = wait_event_interruptible(config->recv_wq, - atomic_read(&config->recv_threads) == 0); -- if (ret) -+ if (ret) { - sock_shutdown(nbd); -- flush_workqueue(nbd->recv_workq); -+ nbd_clear_que(nbd); -+ } - -+ flush_workqueue(nbd->recv_workq); - mutex_lock(&nbd->config_lock); -- nbd_bdev_reset(bdev); -+ nbd_bdev_reset(nbd); - /* user requested, ignore socket errors */ - if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags)) - ret = 0; -@@ -1376,9 +1376,9 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b - static void nbd_clear_sock_ioctl(struct nbd_device *nbd, - struct block_device *bdev) - { -- sock_shutdown(nbd); -+ nbd_clear_sock(nbd); - __invalidate_device(bdev, true); -- nbd_bdev_reset(bdev); -+ nbd_bdev_reset(nbd); - if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, - &nbd->config->runtime_flags)) - nbd_config_put(nbd); -@@ -1424,7 +1424,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, - config->flags = arg; - return 0; - case NBD_DO_IT: -- return nbd_start_device_ioctl(nbd, bdev); -+ return nbd_start_device_ioctl(nbd); - case NBD_CLEAR_QUE: - /* - * This is for compatibility only. The queue is always cleared -@@ -1475,15 +1475,20 @@ static struct nbd_config *nbd_alloc_config(void) - { - struct nbd_config *config; - -+ if (!try_module_get(THIS_MODULE)) -+ return ERR_PTR(-ENODEV); -+ - config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); -- if (!config) -- return NULL; -+ if (!config) { -+ module_put(THIS_MODULE); -+ return ERR_PTR(-ENOMEM); -+ } -+ - atomic_set(&config->recv_threads, 0); - init_waitqueue_head(&config->recv_wq); - init_waitqueue_head(&config->conn_wait); - config->blksize_bits = NBD_DEF_BLKSIZE_BITS; - atomic_set(&config->live_connections, 0); -- try_module_get(THIS_MODULE); - return config; - } - -@@ -1510,12 +1515,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) - mutex_unlock(&nbd->config_lock); - goto out; - } -- config = nbd->config = nbd_alloc_config(); -- if (!config) { -- ret = -ENOMEM; -+ config = nbd_alloc_config(); -+ if (IS_ERR(config)) { -+ ret = PTR_ERR(config); - mutex_unlock(&nbd->config_lock); - goto out; - } -+ nbd->config = config; - refcount_set(&nbd->config_refs, 1); - refcount_inc(&nbd->refs); - mutex_unlock(&nbd->config_lock); -@@ -1557,8 +1563,8 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) - { - struct nbd_device *nbd = s->private; - -- if (nbd->task_recv) -- seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); -+ if (nbd->pid) -+ seq_printf(s, "recv: %d\n", nbd->pid); - - return 0; - } -@@ -1599,7 +1605,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd) - return -EIO; - - dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); -- if (!dir) { -+ if (IS_ERR(dir)) { - dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", - nbd_name(nbd)); - return -EIO; -@@ -1625,7 +1631,7 @@ static int nbd_dbg_init(void) - struct dentry *dbg_dir; - - dbg_dir = debugfs_create_dir("nbd", NULL); -- if (!dbg_dir) -+ if (IS_ERR(dbg_dir)) - return -EIO; - - nbd_dbg_dir = dbg_dir; -@@ -1725,6 +1731,15 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) - } - nbd->disk = disk; - -+ nbd->recv_workq = alloc_workqueue("nbd%d-recv", -+ WQ_MEM_RECLAIM | WQ_HIGHPRI | -+ WQ_UNBOUND, 0, nbd->index); -+ if (!nbd->recv_workq) { -+ dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); -+ err = -ENOMEM; -+ goto out_err_disk; -+ } -+ - /* - * Tell the block layer that we are not a rotational device - */ -@@ -1747,22 +1762,14 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) - refcount_set(&nbd->refs, 0); - INIT_LIST_HEAD(&nbd->list); - disk->major = NBD_MAJOR; -- -- /* Too big first_minor can cause duplicate creation of -- * sysfs files/links, since first_minor will be truncated to -- * byte in __device_add_disk(). -- */ - disk->first_minor = index << part_shift; -- if (disk->first_minor > 0xff) { -- err = -EINVAL; -- goto out_free_idr; -- } -- - disk->minors = 1 << part_shift; - disk->fops = &nbd_fops; - disk->private_data = nbd; - sprintf(disk->disk_name, "nbd%d", index); -- add_disk(disk); -+ err = add_disk(disk); -+ if (err) -+ goto out_free_work; - - /* - * Now publish the device. -@@ -1771,6 +1778,10 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs) - nbd_total_devices++; - return nbd; - -+out_free_work: -+ destroy_workqueue(nbd->recv_workq); -+out_err_disk: -+ blk_cleanup_disk(disk); - out_free_idr: - mutex_lock(&nbd_index_mutex); - idr_remove(&nbd_index_idr, index); -@@ -1856,8 +1867,19 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) - if (!netlink_capable(skb, CAP_SYS_ADMIN)) - return -EPERM; - -- if (info->attrs[NBD_ATTR_INDEX]) -+ if (info->attrs[NBD_ATTR_INDEX]) { - index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); -+ -+ /* -+ * Too big first_minor can cause duplicate creation of -+ * sysfs files/links, since index << part_shift might overflow, or -+ * MKDEV() expect that the max bits of first_minor is 20. -+ */ -+ if (index < 0 || index > MINORMASK >> part_shift) { -+ printk(KERN_ERR "nbd: illegal input index %d\n", index); -+ return -EINVAL; -+ } -+ } - if (!info->attrs[NBD_ATTR_SOCKETS]) { - printk(KERN_ERR "nbd: must specify at least one socket\n"); - return -EINVAL; -@@ -1907,13 +1929,14 @@ again: - nbd_put(nbd); - return -EINVAL; - } -- config = nbd->config = nbd_alloc_config(); -- if (!nbd->config) { -+ config = nbd_alloc_config(); -+ if (IS_ERR(config)) { - mutex_unlock(&nbd->config_lock); - nbd_put(nbd); - printk(KERN_ERR "nbd: couldn't allocate config\n"); -- return -ENOMEM; -+ return PTR_ERR(config); - } -+ nbd->config = config; - refcount_set(&nbd->config_refs, 1); - set_bit(NBD_RT_BOUND, &config->runtime_flags); - -@@ -2023,14 +2046,12 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd) - mutex_lock(&nbd->config_lock); - nbd_disconnect(nbd); - sock_shutdown(nbd); -+ wake_up(&nbd->config->conn_wait); - /* -- * Make sure recv thread has finished, so it does not drop the last -- * config ref and try to destroy the workqueue from inside the work -- * queue. And this also ensure that we can safely call nbd_clear_que() -+ * Make sure recv thread has finished, we can safely call nbd_clear_que() - * to cancel the inflight I/Os. - */ -- if (nbd->recv_workq) -- flush_workqueue(nbd->recv_workq); -+ flush_workqueue(nbd->recv_workq); - nbd_clear_que(nbd); - nbd->task_setup = NULL; - mutex_unlock(&nbd->config_lock); -@@ -2135,7 +2156,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) - mutex_lock(&nbd->config_lock); - config = nbd->config; - if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || -- !nbd->task_recv) { -+ !nbd->pid) { - dev_err(nbd_to_dev(nbd), - "not configured, cannot reconfigure\n"); - ret = -EINVAL; -@@ -2473,6 +2494,12 @@ static void __exit nbd_cleanup(void) - struct nbd_device *nbd; - LIST_HEAD(del_list); - -+ /* -+ * Unregister netlink interface prior to waiting -+ * for the completion of netlink commands. -+ */ -+ genl_unregister_family(&nbd_genl_family); -+ - nbd_dbg_close(); - - mutex_lock(&nbd_index_mutex); -@@ -2482,6 +2509,9 @@ static void __exit nbd_cleanup(void) - while (!list_empty(&del_list)) { - nbd = list_first_entry(&del_list, struct nbd_device, list); - list_del_init(&nbd->list); -+ if (refcount_read(&nbd->config_refs)) -+ printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n", -+ refcount_read(&nbd->config_refs)); - if (refcount_read(&nbd->refs) != 1) - printk(KERN_ERR "nbd: possibly leaking a device\n"); - nbd_put(nbd); -@@ -2491,7 +2521,6 @@ static void __exit nbd_cleanup(void) - destroy_workqueue(nbd_del_wq); - - idr_destroy(&nbd_index_idr); -- genl_unregister_family(&nbd_genl_family); - unregister_blkdev(NBD_MAJOR, "nbd"); - } - -diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c -index 187d779c8ca08..686ec6bcdef3d 100644 ---- a/drivers/block/null_blk/main.c -+++ b/drivers/block/null_blk/main.c -@@ -1314,8 +1314,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) - case NULL_IRQ_SOFTIRQ: - switch (cmd->nq->dev->queue_mode) { - case NULL_Q_MQ: -- if (likely(!blk_should_fake_timeout(cmd->rq->q))) -- blk_mq_complete_request(cmd->rq); -+ blk_mq_complete_request(cmd->rq); - break; - case NULL_Q_BIO: - /* -@@ -1491,7 +1490,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, - cmd->rq = bd->rq; - cmd->error = BLK_STS_OK; - cmd->nq = nq; -- cmd->fake_timeout = should_timeout_request(bd->rq); -+ cmd->fake_timeout = should_timeout_request(bd->rq) || -+ blk_should_fake_timeout(bd->rq->q); - - blk_mq_start_request(bd->rq); - -@@ -1744,6 +1744,11 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) - - static int null_validate_conf(struct nullb_device *dev) - { -+ if (dev->queue_mode == NULL_Q_RQ) { -+ pr_err("legacy IO path is no longer available\n"); -+ return -EINVAL; -+ } -+ - dev->blocksize = round_down(dev->blocksize, 512); - dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); - -@@ -1884,8 +1889,13 @@ static int null_add_dev(struct nullb_device *dev) - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); - - mutex_lock(&lock); -- nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); -- dev->index = nullb->index; -+ rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); -+ if (rv < 0) { -+ mutex_unlock(&lock); -+ goto out_cleanup_zone; -+ } -+ nullb->index = rv; -+ dev->index = rv; - mutex_unlock(&lock); - - blk_queue_logical_block_size(nullb->q, dev->blocksize); -@@ -1905,13 +1915,16 @@ static int null_add_dev(struct nullb_device *dev) - - rv = null_gendisk_register(nullb); - if (rv) -- goto out_cleanup_zone; -+ goto out_ida_free; - - mutex_lock(&lock); - list_add_tail(&nullb->list, &nullb_list); - mutex_unlock(&lock); - - return 0; -+ -+out_ida_free: -+ ida_free(&nullb_indexes, nullb->index); - out_cleanup_zone: - null_free_zoned_dev(dev); - out_cleanup_disk: -diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c -index f9cdd11f02f58..91369084e1274 100644 ---- a/drivers/block/paride/pcd.c -+++ b/drivers/block/paride/pcd.c -@@ -183,8 +183,6 @@ static int pcd_audio_ioctl(struct cdrom_device_info *cdi, - static int pcd_packet(struct cdrom_device_info *cdi, - struct packet_command *cgc); - --static int pcd_detect(void); --static void pcd_probe_capabilities(void); - static void do_pcd_read_drq(void); - static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd); -@@ -302,53 +300,6 @@ static const struct blk_mq_ops pcd_mq_ops = { - .queue_rq = pcd_queue_rq, - }; - --static void pcd_init_units(void) --{ -- struct pcd_unit *cd; -- int unit; -- -- pcd_drive_count = 0; -- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -- struct gendisk *disk; -- -- if (blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1, -- BLK_MQ_F_SHOULD_MERGE)) -- continue; -- -- disk = blk_mq_alloc_disk(&cd->tag_set, cd); -- if (IS_ERR(disk)) { -- blk_mq_free_tag_set(&cd->tag_set); -- continue; -- } -- -- INIT_LIST_HEAD(&cd->rq_list); -- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH); -- cd->disk = disk; -- cd->pi = &cd->pia; -- cd->present = 0; -- cd->last_sense = 0; -- cd->changed = 1; -- cd->drive = (*drives[unit])[D_SLV]; -- if ((*drives[unit])[D_PRT]) -- pcd_drive_count++; -- -- cd->name = &cd->info.name[0]; -- snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit); -- cd->info.ops = &pcd_dops; -- cd->info.handle = cd; -- cd->info.speed = 0; -- cd->info.capacity = 1; -- cd->info.mask = 0; -- disk->major = major; -- disk->first_minor = unit; -- disk->minors = 1; -- strcpy(disk->disk_name, cd->name); /* umm... */ -- disk->fops = &pcd_bdops; -- disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; -- disk->events = DISK_EVENT_MEDIA_CHANGE; -- } --} -- - static int pcd_open(struct cdrom_device_info *cdi, int purpose) - { - struct pcd_unit *cd = cdi->handle; -@@ -630,10 +581,11 @@ static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr) - return CDS_DISC_OK; - } - --static int pcd_identify(struct pcd_unit *cd, char *id) -+static int pcd_identify(struct pcd_unit *cd) - { -- int k, s; - char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; -+ char id[18]; -+ int k, s; - - pcd_bufblk = -1; - -@@ -661,108 +613,47 @@ static int pcd_identify(struct pcd_unit *cd, char *id) - } - - /* -- * returns 0, with id set if drive is detected -- * -1, if drive detection failed -+ * returns 0, with id set if drive is detected, otherwise an error code. - */ --static int pcd_probe(struct pcd_unit *cd, int ms, char *id) -+static int pcd_probe(struct pcd_unit *cd, int ms) - { - if (ms == -1) { - for (cd->drive = 0; cd->drive <= 1; cd->drive++) -- if (!pcd_reset(cd) && !pcd_identify(cd, id)) -+ if (!pcd_reset(cd) && !pcd_identify(cd)) - return 0; - } else { - cd->drive = ms; -- if (!pcd_reset(cd) && !pcd_identify(cd, id)) -+ if (!pcd_reset(cd) && !pcd_identify(cd)) - return 0; - } -- return -1; -+ return -ENODEV; - } - --static void pcd_probe_capabilities(void) -+static int pcd_probe_capabilities(struct pcd_unit *cd) - { -- int unit, r; -- char buffer[32]; - char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 }; -- struct pcd_unit *cd; -- -- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -- if (!cd->present) -- continue; -- r = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities"); -- if (r) -- continue; -- /* we should now have the cap page */ -- if ((buffer[11] & 1) == 0) -- cd->info.mask |= CDC_CD_R; -- if ((buffer[11] & 2) == 0) -- cd->info.mask |= CDC_CD_RW; -- if ((buffer[12] & 1) == 0) -- cd->info.mask |= CDC_PLAY_AUDIO; -- if ((buffer[14] & 1) == 0) -- cd->info.mask |= CDC_LOCK; -- if ((buffer[14] & 8) == 0) -- cd->info.mask |= CDC_OPEN_TRAY; -- if ((buffer[14] >> 6) == 0) -- cd->info.mask |= CDC_CLOSE_TRAY; -- } --} -- --static int pcd_detect(void) --{ -- char id[18]; -- int k, unit; -- struct pcd_unit *cd; -+ char buffer[32]; -+ int ret; - -- printk("%s: %s version %s, major %d, nice %d\n", -- name, name, PCD_VERSION, major, nice); -+ ret = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities"); -+ if (ret) -+ return ret; -+ -+ /* we should now have the cap page */ -+ if ((buffer[11] & 1) == 0) -+ cd->info.mask |= CDC_CD_R; -+ if ((buffer[11] & 2) == 0) -+ cd->info.mask |= CDC_CD_RW; -+ if ((buffer[12] & 1) == 0) -+ cd->info.mask |= CDC_PLAY_AUDIO; -+ if ((buffer[14] & 1) == 0) -+ cd->info.mask |= CDC_LOCK; -+ if ((buffer[14] & 8) == 0) -+ cd->info.mask |= CDC_OPEN_TRAY; -+ if ((buffer[14] >> 6) == 0) -+ cd->info.mask |= CDC_CLOSE_TRAY; - -- par_drv = pi_register_driver(name); -- if (!par_drv) { -- pr_err("failed to register %s driver\n", name); -- return -1; -- } -- -- k = 0; -- if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */ -- cd = pcd; -- if (cd->disk && pi_init(cd->pi, 1, -1, -1, -1, -1, -1, -- pcd_buffer, PI_PCD, verbose, cd->name)) { -- if (!pcd_probe(cd, -1, id)) { -- cd->present = 1; -- k++; -- } else -- pi_release(cd->pi); -- } -- } else { -- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -- int *conf = *drives[unit]; -- if (!conf[D_PRT]) -- continue; -- if (!cd->disk) -- continue; -- if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD], -- conf[D_UNI], conf[D_PRO], conf[D_DLY], -- pcd_buffer, PI_PCD, verbose, cd->name)) -- continue; -- if (!pcd_probe(cd, conf[D_SLV], id)) { -- cd->present = 1; -- k++; -- } else -- pi_release(cd->pi); -- } -- } -- if (k) -- return 0; -- -- printk("%s: No CD-ROM drive found\n", name); -- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -- if (!cd->disk) -- continue; -- blk_cleanup_disk(cd->disk); -- blk_mq_free_tag_set(&cd->tag_set); -- } -- pi_unregister_driver(par_drv); -- return -1; -+ return 0; - } - - /* I/O request processing */ -@@ -999,43 +890,124 @@ static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) - return 0; - } - -+static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port, -+ int mode, int unit, int protocol, int delay, int ms) -+{ -+ struct gendisk *disk; -+ int ret; -+ -+ ret = blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1, -+ BLK_MQ_F_SHOULD_MERGE); -+ if (ret) -+ return ret; -+ -+ disk = blk_mq_alloc_disk(&cd->tag_set, cd); -+ if (IS_ERR(disk)) { -+ ret = PTR_ERR(disk); -+ goto out_free_tag_set; -+ } -+ -+ INIT_LIST_HEAD(&cd->rq_list); -+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH); -+ cd->disk = disk; -+ cd->pi = &cd->pia; -+ cd->present = 0; -+ cd->last_sense = 0; -+ cd->changed = 1; -+ cd->drive = (*drives[cd - pcd])[D_SLV]; -+ -+ cd->name = &cd->info.name[0]; -+ snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit); -+ cd->info.ops = &pcd_dops; -+ cd->info.handle = cd; -+ cd->info.speed = 0; -+ cd->info.capacity = 1; -+ cd->info.mask = 0; -+ disk->major = major; -+ disk->first_minor = unit; -+ disk->minors = 1; -+ strcpy(disk->disk_name, cd->name); /* umm... */ -+ disk->fops = &pcd_bdops; -+ disk->events = DISK_EVENT_MEDIA_CHANGE; -+ disk->event_flags = DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE; -+ -+ if (!pi_init(cd->pi, autoprobe, port, mode, unit, protocol, delay, -+ pcd_buffer, PI_PCD, verbose, cd->name)) { -+ ret = -ENODEV; -+ goto out_free_disk; -+ } -+ ret = pcd_probe(cd, ms); -+ if (ret) -+ goto out_pi_release; -+ -+ cd->present = 1; -+ pcd_probe_capabilities(cd); -+ register_cdrom(cd->disk, &cd->info); -+ add_disk(cd->disk); -+ return 0; -+ -+out_pi_release: -+ pi_release(cd->pi); -+out_free_disk: -+ blk_cleanup_disk(cd->disk); -+out_free_tag_set: -+ blk_mq_free_tag_set(&cd->tag_set); -+ return ret; -+} -+ - static int __init pcd_init(void) - { -- struct pcd_unit *cd; -- int unit; -+ int found = 0, unit; - - if (disable) - return -EINVAL; - -- pcd_init_units(); -+ if (register_blkdev(major, name)) -+ return -EBUSY; - -- if (pcd_detect()) -- return -ENODEV; -+ pr_info("%s: %s version %s, major %d, nice %d\n", -+ name, name, PCD_VERSION, major, nice); - -- /* get the atapi capabilities page */ -- pcd_probe_capabilities(); -+ par_drv = pi_register_driver(name); -+ if (!par_drv) { -+ pr_err("failed to register %s driver\n", name); -+ goto out_unregister_blkdev; -+ } - -- if (register_blkdev(major, name)) { -- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -- if (!cd->disk) -- continue; -+ for (unit = 0; unit < PCD_UNITS; unit++) { -+ if ((*drives[unit])[D_PRT]) -+ pcd_drive_count++; -+ } -+ -+ if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */ -+ if (!pcd_init_unit(pcd, 1, -1, -1, -1, -1, -1, -1)) -+ found++; -+ } else { -+ for (unit = 0; unit < PCD_UNITS; unit++) { -+ struct pcd_unit *cd = &pcd[unit]; -+ int *conf = *drives[unit]; - -- blk_cleanup_queue(cd->disk->queue); -- blk_mq_free_tag_set(&cd->tag_set); -- put_disk(cd->disk); -+ if (!conf[D_PRT]) -+ continue; -+ if (!pcd_init_unit(cd, 0, conf[D_PRT], conf[D_MOD], -+ conf[D_UNI], conf[D_PRO], conf[D_DLY], -+ conf[D_SLV])) -+ found++; - } -- return -EBUSY; - } - -- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -- if (cd->present) { -- register_cdrom(cd->disk, &cd->info); -- cd->disk->private_data = cd; -- add_disk(cd->disk); -- } -+ if (!found) { -+ pr_info("%s: No CD-ROM drive found\n", name); -+ goto out_unregister_pi_driver; - } - - return 0; -+ -+out_unregister_pi_driver: -+ pi_unregister_driver(par_drv); -+out_unregister_blkdev: -+ unregister_blkdev(major, name); -+ return -ENODEV; - } - - static void __exit pcd_exit(void) -@@ -1044,20 +1016,18 @@ static void __exit pcd_exit(void) - int unit; - - for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { -- if (!cd->disk) -+ if (!cd->present) - continue; - -- if (cd->present) { -- del_gendisk(cd->disk); -- pi_release(cd->pi); -- unregister_cdrom(&cd->info); -- } -- blk_cleanup_queue(cd->disk->queue); -+ del_gendisk(cd->disk); -+ pi_release(cd->pi); -+ unregister_cdrom(&cd->info); -+ blk_cleanup_disk(cd->disk); -+ - blk_mq_free_tag_set(&cd->tag_set); -- put_disk(cd->disk); - } -- unregister_blkdev(major, name); - pi_unregister_driver(par_drv); -+ unregister_blkdev(major, name); - } - - MODULE_LICENSE("GPL"); -diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c -index 0f26b2510a756..ca2ab977ef8ef 100644 ---- a/drivers/block/pktcdvd.c -+++ b/drivers/block/pktcdvd.c -@@ -2407,6 +2407,8 @@ static blk_qc_t pkt_submit_bio(struct bio *bio) - struct bio *split; - - blk_queue_split(&bio); -+ if (!bio) -+ return BLK_QC_T_NONE; - - pd = bio->bi_bdev->bd_disk->queue->queuedata; - if (!pd) { -diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c -index c7b19e128b03c..c79aa4d8ccf73 100644 ---- a/drivers/block/ps3vram.c -+++ b/drivers/block/ps3vram.c -@@ -587,6 +587,8 @@ static blk_qc_t ps3vram_submit_bio(struct bio *bio) - dev_dbg(&dev->core, "%s\n", __func__); - - blk_queue_split(&bio); -+ if (!bio) -+ return BLK_QC_T_NONE; - - spin_lock_irq(&priv->lock); - busy = !bio_list_empty(&priv->list); -diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c -index e65c9d706f6fb..fe8bdbf4616bc 100644 ---- a/drivers/block/rbd.c -+++ b/drivers/block/rbd.c -@@ -1335,14 +1335,30 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req) - /* - * Must be called after rbd_obj_calc_img_extents(). - */ --static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req) -+static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req) - { -- if (!obj_req->num_img_extents || -- (rbd_obj_is_entire(obj_req) && -- !obj_req->img_request->snapc->num_snaps)) -- return false; -+ rbd_assert(obj_req->img_request->snapc); - -- return true; -+ if (obj_req->img_request->op_type == OBJ_OP_DISCARD) { -+ dout("%s %p objno %llu discard\n", __func__, obj_req, -+ obj_req->ex.oe_objno); -+ return; -+ } -+ -+ if (!obj_req->num_img_extents) { -+ dout("%s %p objno %llu not overlapping\n", __func__, obj_req, -+ obj_req->ex.oe_objno); -+ return; -+ } -+ -+ if (rbd_obj_is_entire(obj_req) && -+ !obj_req->img_request->snapc->num_snaps) { -+ dout("%s %p objno %llu entire\n", __func__, obj_req, -+ obj_req->ex.oe_objno); -+ return; -+ } -+ -+ obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; - } - - static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req) -@@ -1443,6 +1459,7 @@ __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, - static struct ceph_osd_request * - rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops) - { -+ rbd_assert(obj_req->img_request->snapc); - return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc, - num_ops); - } -@@ -1579,15 +1596,18 @@ static void rbd_img_request_init(struct rbd_img_request *img_request, - mutex_init(&img_request->state_mutex); - } - -+/* -+ * Only snap_id is captured here, for reads. For writes, snapshot -+ * context is captured in rbd_img_object_requests() after exclusive -+ * lock is ensured to be held. -+ */ - static void rbd_img_capture_header(struct rbd_img_request *img_req) - { - struct rbd_device *rbd_dev = img_req->rbd_dev; - - lockdep_assert_held(&rbd_dev->header_rwsem); - -- if (rbd_img_is_write(img_req)) -- img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); -- else -+ if (!rbd_img_is_write(img_req)) - img_req->snap_id = rbd_dev->spec->snap_id; - - if (rbd_dev_parent_get(rbd_dev)) -@@ -2234,9 +2254,6 @@ static int rbd_obj_init_write(struct rbd_obj_request *obj_req) - if (ret) - return ret; - -- if (rbd_obj_copyup_enabled(obj_req)) -- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; -- - obj_req->write_state = RBD_OBJ_WRITE_START; - return 0; - } -@@ -2342,8 +2359,6 @@ static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req) - if (ret) - return ret; - -- if (rbd_obj_copyup_enabled(obj_req)) -- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; - if (!obj_req->num_img_extents) { - obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT; - if (rbd_obj_is_entire(obj_req)) -@@ -3288,6 +3303,7 @@ again: - case RBD_OBJ_WRITE_START: - rbd_assert(!*result); - -+ rbd_obj_set_copyup_enabled(obj_req); - if (rbd_obj_write_is_noop(obj_req)) - return true; - -@@ -3474,9 +3490,19 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req) - - static void rbd_img_object_requests(struct rbd_img_request *img_req) - { -+ struct rbd_device *rbd_dev = img_req->rbd_dev; - struct rbd_obj_request *obj_req; - - rbd_assert(!img_req->pending.result && !img_req->pending.num_pending); -+ rbd_assert(!need_exclusive_lock(img_req) || -+ __rbd_is_lock_owner(rbd_dev)); -+ -+ if (rbd_img_is_write(img_req)) { -+ rbd_assert(!img_req->snapc); -+ down_read(&rbd_dev->header_rwsem); -+ img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); -+ up_read(&rbd_dev->header_rwsem); -+ } - - for_each_obj_request(img_req, obj_req) { - int result = 0; -@@ -3494,7 +3520,6 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req) - - static bool rbd_img_advance(struct rbd_img_request *img_req, int *result) - { -- struct rbd_device *rbd_dev = img_req->rbd_dev; - int ret; - - again: -@@ -3515,9 +3540,6 @@ again: - if (*result) - return true; - -- rbd_assert(!need_exclusive_lock(img_req) || -- __rbd_is_lock_owner(rbd_dev)); -- - rbd_img_object_requests(img_req); - if (!img_req->pending.num_pending) { - *result = img_req->pending.result; -@@ -3655,7 +3677,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) - ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, - RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, - RBD_LOCK_TAG, "", 0); -- if (ret) -+ if (ret && ret != -EEXIST) - return ret; - - __rbd_lock(rbd_dev, cookie); -@@ -3829,51 +3851,82 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result) - list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list); - } - --static int get_lock_owner_info(struct rbd_device *rbd_dev, -- struct ceph_locker **lockers, u32 *num_lockers) -+static bool locker_equal(const struct ceph_locker *lhs, -+ const struct ceph_locker *rhs) -+{ -+ return lhs->id.name.type == rhs->id.name.type && -+ lhs->id.name.num == rhs->id.name.num && -+ !strcmp(lhs->id.cookie, rhs->id.cookie) && -+ ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr); -+} -+ -+static void free_locker(struct ceph_locker *locker) -+{ -+ if (locker) -+ ceph_free_lockers(locker, 1); -+} -+ -+static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) - { - struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; -+ struct ceph_locker *lockers; -+ u32 num_lockers; - u8 lock_type; - char *lock_tag; -+ u64 handle; - int ret; - -- dout("%s rbd_dev %p\n", __func__, rbd_dev); -- - ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid, - &rbd_dev->header_oloc, RBD_LOCK_NAME, -- &lock_type, &lock_tag, lockers, num_lockers); -- if (ret) -- return ret; -+ &lock_type, &lock_tag, &lockers, &num_lockers); -+ if (ret) { -+ rbd_warn(rbd_dev, "failed to get header lockers: %d", ret); -+ return ERR_PTR(ret); -+ } - -- if (*num_lockers == 0) { -+ if (num_lockers == 0) { - dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev); -+ lockers = NULL; - goto out; - } - - if (strcmp(lock_tag, RBD_LOCK_TAG)) { - rbd_warn(rbd_dev, "locked by external mechanism, tag %s", - lock_tag); -- ret = -EBUSY; -- goto out; -+ goto err_busy; - } - -- if (lock_type == CEPH_CLS_LOCK_SHARED) { -- rbd_warn(rbd_dev, "shared lock type detected"); -- ret = -EBUSY; -- goto out; -+ if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) { -+ rbd_warn(rbd_dev, "incompatible lock type detected"); -+ goto err_busy; - } - -- if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, -- strlen(RBD_LOCK_COOKIE_PREFIX))) { -+ WARN_ON(num_lockers != 1); -+ ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", -+ &handle); -+ if (ret != 1) { - rbd_warn(rbd_dev, "locked by external mechanism, cookie %s", -- (*lockers)[0].id.cookie); -- ret = -EBUSY; -- goto out; -+ lockers[0].id.cookie); -+ goto err_busy; - } -+ if (ceph_addr_is_blank(&lockers[0].info.addr)) { -+ rbd_warn(rbd_dev, "locker has a blank address"); -+ goto err_busy; -+ } -+ -+ dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n", -+ __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name), -+ &lockers[0].info.addr.in_addr, -+ le32_to_cpu(lockers[0].info.addr.nonce), handle); - - out: - kfree(lock_tag); -- return ret; -+ return lockers; -+ -+err_busy: -+ kfree(lock_tag); -+ ceph_free_lockers(lockers, num_lockers); -+ return ERR_PTR(-EBUSY); - } - - static int find_watcher(struct rbd_device *rbd_dev, -@@ -3889,8 +3942,10 @@ static int find_watcher(struct rbd_device *rbd_dev, - ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, - &rbd_dev->header_oloc, &watchers, - &num_watchers); -- if (ret) -+ if (ret) { -+ rbd_warn(rbd_dev, "failed to get watchers: %d", ret); - return ret; -+ } - - sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); - for (i = 0; i < num_watchers; i++) { -@@ -3927,51 +3982,72 @@ out: - static int rbd_try_lock(struct rbd_device *rbd_dev) - { - struct ceph_client *client = rbd_dev->rbd_client->client; -- struct ceph_locker *lockers; -- u32 num_lockers; -+ struct ceph_locker *locker, *refreshed_locker; - int ret; - - for (;;) { -+ locker = refreshed_locker = NULL; -+ - ret = rbd_lock(rbd_dev); -- if (ret != -EBUSY) -- return ret; -+ if (!ret) -+ goto out; -+ if (ret != -EBUSY) { -+ rbd_warn(rbd_dev, "failed to lock header: %d", ret); -+ goto out; -+ } - - /* determine if the current lock holder is still alive */ -- ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers); -- if (ret) -- return ret; -- -- if (num_lockers == 0) -+ locker = get_lock_owner_info(rbd_dev); -+ if (IS_ERR(locker)) { -+ ret = PTR_ERR(locker); -+ locker = NULL; -+ goto out; -+ } -+ if (!locker) - goto again; - -- ret = find_watcher(rbd_dev, lockers); -+ ret = find_watcher(rbd_dev, locker); - if (ret) - goto out; /* request lock or error */ - -+ refreshed_locker = get_lock_owner_info(rbd_dev); -+ if (IS_ERR(refreshed_locker)) { -+ ret = PTR_ERR(refreshed_locker); -+ refreshed_locker = NULL; -+ goto out; -+ } -+ if (!refreshed_locker || -+ !locker_equal(locker, refreshed_locker)) -+ goto again; -+ - rbd_warn(rbd_dev, "breaking header lock owned by %s%llu", -- ENTITY_NAME(lockers[0].id.name)); -+ ENTITY_NAME(locker->id.name)); - - ret = ceph_monc_blocklist_add(&client->monc, -- &lockers[0].info.addr); -+ &locker->info.addr); - if (ret) { -- rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d", -- ENTITY_NAME(lockers[0].id.name), ret); -+ rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d", -+ ENTITY_NAME(locker->id.name), ret); - goto out; - } - - ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid, - &rbd_dev->header_oloc, RBD_LOCK_NAME, -- lockers[0].id.cookie, -- &lockers[0].id.name); -- if (ret && ret != -ENOENT) -+ locker->id.cookie, &locker->id.name); -+ if (ret && ret != -ENOENT) { -+ rbd_warn(rbd_dev, "failed to break header lock: %d", -+ ret); - goto out; -+ } - - again: -- ceph_free_lockers(lockers, num_lockers); -+ free_locker(refreshed_locker); -+ free_locker(locker); - } - - out: -- ceph_free_lockers(lockers, num_lockers); -+ free_locker(refreshed_locker); -+ free_locker(locker); - return ret; - } - -@@ -3979,6 +4055,10 @@ static int rbd_post_acquire_action(struct rbd_device *rbd_dev) - { - int ret; - -+ ret = rbd_dev_refresh(rbd_dev); -+ if (ret) -+ return ret; -+ - if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) { - ret = rbd_object_map_open(rbd_dev); - if (ret) -@@ -4017,11 +4097,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev) - - ret = rbd_try_lock(rbd_dev); - if (ret < 0) { -- rbd_warn(rbd_dev, "failed to lock header: %d", ret); -- if (ret == -EBLOCKLISTED) -- goto out; -- -- ret = 1; /* request lock anyway */ -+ rbd_warn(rbd_dev, "failed to acquire lock: %d", ret); -+ goto out; - } - if (ret > 0) { - up_write(&rbd_dev->lock_rwsem); -@@ -5296,8 +5373,7 @@ static void rbd_dev_release(struct device *dev) - module_put(THIS_MODULE); - } - --static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, -- struct rbd_spec *spec) -+static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec) - { - struct rbd_device *rbd_dev; - -@@ -5342,9 +5418,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, - rbd_dev->dev.parent = &rbd_root_dev; - device_initialize(&rbd_dev->dev); - -- rbd_dev->rbd_client = rbdc; -- rbd_dev->spec = spec; -- - return rbd_dev; - } - -@@ -5357,12 +5430,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, - { - struct rbd_device *rbd_dev; - -- rbd_dev = __rbd_dev_create(rbdc, spec); -+ rbd_dev = __rbd_dev_create(spec); - if (!rbd_dev) - return NULL; - -- rbd_dev->opts = opts; -- - /* get an id and fill in device name */ - rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, - minor_to_rbd_dev_id(1 << MINORBITS), -@@ -5379,6 +5450,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, - /* we have a ref from do_rbd_add() */ - __module_get(THIS_MODULE); - -+ rbd_dev->rbd_client = rbdc; -+ rbd_dev->spec = spec; -+ rbd_dev->opts = opts; -+ - dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id); - return rbd_dev; - -@@ -6559,12 +6634,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) - cancel_delayed_work_sync(&rbd_dev->lock_dwork); - if (!ret) - ret = -ETIMEDOUT; -- } - -- if (ret) { -- rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret); -- return ret; -+ rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret); - } -+ if (ret) -+ return ret; - - /* - * The lock may have been released by now, unless automatic lock -@@ -6739,7 +6813,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) - goto out_err; - } - -- parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec); -+ parent = __rbd_dev_create(rbd_dev->parent_spec); - if (!parent) { - ret = -ENOMEM; - goto out_err; -@@ -6749,8 +6823,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) - * Images related by parent/child relationships always share - * rbd_client and spec/parent_spec, so bump their refcounts. - */ -- __rbd_get_client(rbd_dev->rbd_client); -- rbd_spec_get(rbd_dev->parent_spec); -+ parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client); -+ parent->spec = rbd_spec_get(rbd_dev->parent_spec); - - __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags); - -@@ -7182,7 +7256,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus, - * IO to complete/fail. - */ - blk_mq_freeze_queue(rbd_dev->disk->queue); -- blk_set_queue_dying(rbd_dev->disk->queue); -+ blk_mark_disk_dead(rbd_dev->disk); - } - - del_gendisk(rbd_dev->disk); -diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h -index c1bc5c0fef71d..e0fe0a9aa3708 100644 ---- a/drivers/block/rnbd/rnbd-proto.h -+++ b/drivers/block/rnbd/rnbd-proto.h -@@ -241,7 +241,7 @@ static inline u32 rnbd_to_bio_flags(u32 rnbd_opf) - bio_opf = REQ_OP_WRITE; - break; - case RNBD_OP_FLUSH: -- bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH; -+ bio_opf = REQ_OP_WRITE | REQ_PREFLUSH; - break; - case RNBD_OP_DISCARD: - bio_opf = REQ_OP_DISCARD; -diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c -index aafecfe970558..1896cde8135e4 100644 ---- a/drivers/block/rnbd/rnbd-srv.c -+++ b/drivers/block/rnbd/rnbd-srv.c -@@ -266,12 +266,12 @@ out: - static int create_sess(struct rtrs_srv *rtrs) - { - struct rnbd_srv_session *srv_sess; -- char sessname[NAME_MAX]; -+ char pathname[NAME_MAX]; - int err; - -- err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname)); -+ err = rtrs_srv_get_path_name(rtrs, pathname, sizeof(pathname)); - if (err) { -- pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err); -+ pr_err("rtrs_srv_get_path_name(%s): %d\n", pathname, err); - - return err; - } -@@ -284,8 +284,8 @@ static int create_sess(struct rtrs_srv *rtrs) - offsetof(struct rnbd_dev_blk_io, bio), - BIOSET_NEED_BVECS); - if (err) { -- pr_err("Allocating srv_session for session %s failed\n", -- sessname); -+ pr_err("Allocating srv_session for path %s failed\n", -+ pathname); - kfree(srv_sess); - return err; - } -@@ -298,7 +298,7 @@ static int create_sess(struct rtrs_srv *rtrs) - mutex_unlock(&sess_lock); - - srv_sess->rtrs = rtrs; -- strscpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname)); -+ strscpy(srv_sess->sessname, pathname, sizeof(srv_sess->sessname)); - - rtrs_srv_set_sess_priv(rtrs, srv_sess); - -@@ -333,10 +333,11 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev, - { - struct rnbd_srv_session *sess = sess_dev->sess; - -- sess_dev->keep_id = true; - /* It is already started to close by client's close message. */ - if (!mutex_trylock(&sess->lock)) - return; -+ -+ sess_dev->keep_id = true; - /* first remove sysfs itself to avoid deadlock */ - sysfs_remove_file_self(&sess_dev->kobj, &attr->attr); - rnbd_srv_destroy_dev_session_sysfs(sess_dev); -diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c -index 1cc40b0ea7619..6b253d99bc48d 100644 ---- a/drivers/block/rsxx/dev.c -+++ b/drivers/block/rsxx/dev.c -@@ -127,6 +127,8 @@ static blk_qc_t rsxx_submit_bio(struct bio *bio) - blk_status_t st = BLK_STS_IOERR; - - blk_queue_split(&bio); -+ if (!bio) -+ return BLK_QC_T_NONE; - - might_sleep(); - -diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c -index 4d4bb810c2aea..656d99faf40a2 100644 ---- a/drivers/block/sunvdc.c -+++ b/drivers/block/sunvdc.c -@@ -964,6 +964,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) - print_version(); - - hp = mdesc_grab(); -+ if (!hp) -+ return -ENODEV; - - err = -ENODEV; - if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { -diff --git a/drivers/block/swim.c b/drivers/block/swim.c -index 7ccc8d2a41bc6..3911d0833e1b9 100644 ---- a/drivers/block/swim.c -+++ b/drivers/block/swim.c -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - #include - #include - #include -diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c -deleted file mode 100644 -index 420cd952ddc4b..0000000000000 ---- a/drivers/block/sx8.c -+++ /dev/null -@@ -1,1575 +0,0 @@ --/* -- * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware -- * -- * Copyright 2004-2005 Red Hat, Inc. -- * -- * Author/maintainer: Jeff Garzik -- * -- * This file is subject to the terms and conditions of the GNU General Public -- * License. See the file "COPYING" in the main directory of this archive -- * for more details. -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --#if 0 --#define CARM_DEBUG --#define CARM_VERBOSE_DEBUG --#else --#undef CARM_DEBUG --#undef CARM_VERBOSE_DEBUG --#endif --#undef CARM_NDEBUG -- --#define DRV_NAME "sx8" --#define DRV_VERSION "1.0" --#define PFX DRV_NAME ": " -- --MODULE_AUTHOR("Jeff Garzik"); --MODULE_LICENSE("GPL"); --MODULE_DESCRIPTION("Promise SATA SX8 block driver"); --MODULE_VERSION(DRV_VERSION); -- --/* -- * SX8 hardware has a single message queue for all ATA ports. -- * When this driver was written, the hardware (firmware?) would -- * corrupt data eventually, if more than one request was outstanding. -- * As one can imagine, having 8 ports bottlenecking on a single -- * command hurts performance. -- * -- * Based on user reports, later versions of the hardware (firmware?) -- * seem to be able to survive with more than one command queued. -- * -- * Therefore, we default to the safe option -- 1 command -- but -- * allow the user to increase this. -- * -- * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ), -- * but problems seem to occur when you exceed ~30, even on newer hardware. -- */ --static int max_queue = 1; --module_param(max_queue, int, 0444); --MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)"); -- -- --#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN) -- --/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */ --#define TAG_ENCODE(tag) (((tag) << 16) | 0xf) --#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f) --#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32)) -- --/* note: prints function name for you */ --#ifdef CARM_DEBUG --#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) --#ifdef CARM_VERBOSE_DEBUG --#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) --#else --#define VPRINTK(fmt, args...) --#endif /* CARM_VERBOSE_DEBUG */ --#else --#define DPRINTK(fmt, args...) --#define VPRINTK(fmt, args...) --#endif /* CARM_DEBUG */ -- --#ifdef CARM_NDEBUG --#define assert(expr) --#else --#define assert(expr) \ -- if(unlikely(!(expr))) { \ -- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ -- #expr, __FILE__, __func__, __LINE__); \ -- } --#endif -- --/* defines only for the constants which don't work well as enums */ --struct carm_host; -- --enum { -- /* adapter-wide limits */ -- CARM_MAX_PORTS = 8, -- CARM_SHM_SIZE = (4096 << 7), -- CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS, -- CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1, -- -- /* command message queue limits */ -- CARM_MAX_REQ = 64, /* max command msgs per host */ -- CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */ -- -- /* S/G limits, host-wide and per-request */ -- CARM_MAX_REQ_SG = 32, /* max s/g entries per request */ -- CARM_MAX_HOST_SG = 600, /* max s/g entries per host */ -- CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */ -- -- /* hardware registers */ -- CARM_IHQP = 0x1c, -- CARM_INT_STAT = 0x10, /* interrupt status */ -- CARM_INT_MASK = 0x14, /* interrupt mask */ -- CARM_HMUC = 0x18, /* host message unit control */ -- RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */ -- RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */ -- RBUF_BYTE_SZ = 0x28, -- CARM_RESP_IDX = 0x2c, -- CARM_CMS0 = 0x30, /* command message size reg 0 */ -- CARM_LMUC = 0x48, -- CARM_HMPHA = 0x6c, -- CARM_INITC = 0xb5, -- -- /* bits in CARM_INT_{STAT,MASK} */ -- INT_RESERVED = 0xfffffff0, -- INT_WATCHDOG = (1 << 3), /* watchdog timer */ -- INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */ -- INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */ -- INT_RESPONSE = (1 << 0), /* response msg available */ -- INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW, -- INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW | -- INT_RESPONSE, -- -- /* command messages, and related register bits */ -- CARM_HAVE_RESP = 0x01, -- CARM_MSG_READ = 1, -- CARM_MSG_WRITE = 2, -- CARM_MSG_VERIFY = 3, -- CARM_MSG_GET_CAPACITY = 4, -- CARM_MSG_FLUSH = 5, -- CARM_MSG_IOCTL = 6, -- CARM_MSG_ARRAY = 8, -- CARM_MSG_MISC = 9, -- CARM_CME = (1 << 2), -- CARM_RME = (1 << 1), -- CARM_WZBC = (1 << 0), -- CARM_RMI = (1 << 0), -- CARM_Q_FULL = (1 << 3), -- CARM_MSG_SIZE = 288, -- CARM_Q_LEN = 48, -- -- /* CARM_MSG_IOCTL messages */ -- CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */ -- CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */ -- CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */ -- -- IOC_SCAN_CHAN_NODEV = 0x1f, -- IOC_SCAN_CHAN_OFFSET = 0x40, -- -- /* CARM_MSG_ARRAY messages */ -- CARM_ARRAY_INFO = 0, -- -- ARRAY_NO_EXIST = (1 << 31), -- -- /* response messages */ -- RMSG_SZ = 8, /* sizeof(struct carm_response) */ -- RMSG_Q_LEN = 48, /* resp. msg list length */ -- RMSG_OK = 1, /* bit indicating msg was successful */ -- /* length of entire resp. msg buffer */ -- RBUF_LEN = RMSG_SZ * RMSG_Q_LEN, -- -- PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */ -- -- /* CARM_MSG_MISC messages */ -- MISC_GET_FW_VER = 2, -- MISC_ALLOC_MEM = 3, -- MISC_SET_TIME = 5, -- -- /* MISC_GET_FW_VER feature bits */ -- FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */ -- FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */ -- FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */ -- -- /* carm_host flags */ -- FL_NON_RAID = FW_VER_NON_RAID, -- FL_4PORT = FW_VER_4PORT, -- FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT), -- FL_DYN_MAJOR = (1 << 17), --}; -- --enum { -- CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */ --}; -- --enum scatter_gather_types { -- SGT_32BIT = 0, -- SGT_64BIT = 1, --}; -- --enum host_states { -- HST_INVALID, /* invalid state; never used */ -- HST_ALLOC_BUF, /* setting up master SHM area */ -- HST_ERROR, /* we never leave here */ -- HST_PORT_SCAN, /* start dev scan */ -- HST_DEV_SCAN_START, /* start per-device probe */ -- HST_DEV_SCAN, /* continue per-device probe */ -- HST_DEV_ACTIVATE, /* activate devices we found */ -- HST_PROBE_FINISHED, /* probe is complete */ -- HST_PROBE_START, /* initiate probe */ -- HST_SYNC_TIME, /* tell firmware what time it is */ -- HST_GET_FW_VER, /* get firmware version, adapter port cnt */ --}; -- --#ifdef CARM_DEBUG --static const char *state_name[] = { -- "HST_INVALID", -- "HST_ALLOC_BUF", -- "HST_ERROR", -- "HST_PORT_SCAN", -- "HST_DEV_SCAN_START", -- "HST_DEV_SCAN", -- "HST_DEV_ACTIVATE", -- "HST_PROBE_FINISHED", -- "HST_PROBE_START", -- "HST_SYNC_TIME", -- "HST_GET_FW_VER", --}; --#endif -- --struct carm_port { -- unsigned int port_no; -- struct gendisk *disk; -- struct carm_host *host; -- -- /* attached device characteristics */ -- u64 capacity; -- char name[41]; -- u16 dev_geom_head; -- u16 dev_geom_sect; -- u16 dev_geom_cyl; --}; -- --struct carm_request { -- int n_elem; -- unsigned int msg_type; -- unsigned int msg_subtype; -- unsigned int msg_bucket; -- struct scatterlist sg[CARM_MAX_REQ_SG]; --}; -- --struct carm_host { -- unsigned long flags; -- void __iomem *mmio; -- void *shm; -- dma_addr_t shm_dma; -- -- int major; -- int id; -- char name[32]; -- -- spinlock_t lock; -- struct pci_dev *pdev; -- unsigned int state; -- u32 fw_ver; -- -- struct blk_mq_tag_set tag_set; -- struct request_queue *oob_q; -- unsigned int n_oob; -- -- unsigned int hw_sg_used; -- -- unsigned int resp_idx; -- -- unsigned int wait_q_prod; -- unsigned int wait_q_cons; -- struct request_queue *wait_q[CARM_MAX_WAIT_Q]; -- -- void *msg_base; -- dma_addr_t msg_dma; -- -- int cur_scan_dev; -- unsigned long dev_active; -- unsigned long dev_present; -- struct carm_port port[CARM_MAX_PORTS]; -- -- struct work_struct fsm_task; -- -- struct completion probe_comp; --}; -- --struct carm_response { -- __le32 ret_handle; -- __le32 status; --} __attribute__((packed)); -- --struct carm_msg_sg { -- __le32 start; -- __le32 len; --} __attribute__((packed)); -- --struct carm_msg_rw { -- u8 type; -- u8 id; -- u8 sg_count; -- u8 sg_type; -- __le32 handle; -- __le32 lba; -- __le16 lba_count; -- __le16 lba_high; -- struct carm_msg_sg sg[32]; --} __attribute__((packed)); -- --struct carm_msg_allocbuf { -- u8 type; -- u8 subtype; -- u8 n_sg; -- u8 sg_type; -- __le32 handle; -- __le32 addr; -- __le32 len; -- __le32 evt_pool; -- __le32 n_evt; -- __le32 rbuf_pool; -- __le32 n_rbuf; -- __le32 msg_pool; -- __le32 n_msg; -- struct carm_msg_sg sg[8]; --} __attribute__((packed)); -- --struct carm_msg_ioctl { -- u8 type; -- u8 subtype; -- u8 array_id; -- u8 reserved1; -- __le32 handle; -- __le32 data_addr; -- u32 reserved2; --} __attribute__((packed)); -- --struct carm_msg_sync_time { -- u8 type; -- u8 subtype; -- u16 reserved1; -- __le32 handle; -- u32 reserved2; -- __le32 timestamp; --} __attribute__((packed)); -- --struct carm_msg_get_fw_ver { -- u8 type; -- u8 subtype; -- u16 reserved1; -- __le32 handle; -- __le32 data_addr; -- u32 reserved2; --} __attribute__((packed)); -- --struct carm_fw_ver { -- __le32 version; -- u8 features; -- u8 reserved1; -- u16 reserved2; --} __attribute__((packed)); -- --struct carm_array_info { -- __le32 size; -- -- __le16 size_hi; -- __le16 stripe_size; -- -- __le32 mode; -- -- __le16 stripe_blk_sz; -- __le16 reserved1; -- -- __le16 cyl; -- __le16 head; -- -- __le16 sect; -- u8 array_id; -- u8 reserved2; -- -- char name[40]; -- -- __le32 array_status; -- -- /* device list continues beyond this point? */ --} __attribute__((packed)); -- --static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); --static void carm_remove_one (struct pci_dev *pdev); --static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo); -- --static const struct pci_device_id carm_pci_tbl[] = { -- { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, -- { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, -- { } /* terminate list */ --}; --MODULE_DEVICE_TABLE(pci, carm_pci_tbl); -- --static struct pci_driver carm_driver = { -- .name = DRV_NAME, -- .id_table = carm_pci_tbl, -- .probe = carm_init_one, -- .remove = carm_remove_one, --}; -- --static const struct block_device_operations carm_bd_ops = { -- .owner = THIS_MODULE, -- .getgeo = carm_bdev_getgeo, --}; -- --static unsigned int carm_host_id; --static unsigned long carm_major_alloc; -- -- -- --static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) --{ -- struct carm_port *port = bdev->bd_disk->private_data; -- -- geo->heads = (u8) port->dev_geom_head; -- geo->sectors = (u8) port->dev_geom_sect; -- geo->cylinders = port->dev_geom_cyl; -- return 0; --} -- --static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE }; -- --static inline int carm_lookup_bucket(u32 msg_size) --{ -- int i; -- -- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) -- if (msg_size <= msg_sizes[i]) -- return i; -- -- return -ENOENT; --} -- --static void carm_init_buckets(void __iomem *mmio) --{ -- unsigned int i; -- -- for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) -- writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i)); --} -- --static inline void *carm_ref_msg(struct carm_host *host, -- unsigned int msg_idx) --{ -- return host->msg_base + (msg_idx * CARM_MSG_SIZE); --} -- --static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host, -- unsigned int msg_idx) --{ -- return host->msg_dma + (msg_idx * CARM_MSG_SIZE); --} -- --static int carm_send_msg(struct carm_host *host, -- struct carm_request *crq, unsigned tag) --{ -- void __iomem *mmio = host->mmio; -- u32 msg = (u32) carm_ref_msg_dma(host, tag); -- u32 cm_bucket = crq->msg_bucket; -- u32 tmp; -- int rc = 0; -- -- VPRINTK("ENTER\n"); -- -- tmp = readl(mmio + CARM_HMUC); -- if (tmp & CARM_Q_FULL) { --#if 0 -- tmp = readl(mmio + CARM_INT_MASK); -- tmp |= INT_Q_AVAILABLE; -- writel(tmp, mmio + CARM_INT_MASK); -- readl(mmio + CARM_INT_MASK); /* flush */ --#endif -- DPRINTK("host msg queue full\n"); -- rc = -EBUSY; -- } else { -- writel(msg | (cm_bucket << 1), mmio + CARM_IHQP); -- readl(mmio + CARM_IHQP); /* flush */ -- } -- -- return rc; --} -- --static int carm_array_info (struct carm_host *host, unsigned int array_idx) --{ -- struct carm_msg_ioctl *ioc; -- u32 msg_data; -- dma_addr_t msg_dma; -- struct carm_request *crq; -- struct request *rq; -- int rc; -- -- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0); -- if (IS_ERR(rq)) { -- rc = -ENOMEM; -- goto err_out; -- } -- crq = blk_mq_rq_to_pdu(rq); -- -- ioc = carm_ref_msg(host, rq->tag); -- msg_dma = carm_ref_msg_dma(host, rq->tag); -- msg_data = (u32) (msg_dma + sizeof(struct carm_array_info)); -- -- crq->msg_type = CARM_MSG_ARRAY; -- crq->msg_subtype = CARM_ARRAY_INFO; -- rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) + -- sizeof(struct carm_array_info)); -- BUG_ON(rc < 0); -- crq->msg_bucket = (u32) rc; -- -- memset(ioc, 0, sizeof(*ioc)); -- ioc->type = CARM_MSG_ARRAY; -- ioc->subtype = CARM_ARRAY_INFO; -- ioc->array_id = (u8) array_idx; -- ioc->handle = cpu_to_le32(TAG_ENCODE(rq->tag)); -- ioc->data_addr = cpu_to_le32(msg_data); -- -- spin_lock_irq(&host->lock); -- assert(host->state == HST_DEV_SCAN_START || -- host->state == HST_DEV_SCAN); -- spin_unlock_irq(&host->lock); -- -- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); -- blk_execute_rq_nowait(NULL, rq, true, NULL); -- -- return 0; -- --err_out: -- spin_lock_irq(&host->lock); -- host->state = HST_ERROR; -- spin_unlock_irq(&host->lock); -- return rc; --} -- --typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *); -- --static int carm_send_special (struct carm_host *host, carm_sspc_t func) --{ -- struct request *rq; -- struct carm_request *crq; -- struct carm_msg_ioctl *ioc; -- void *mem; -- unsigned int msg_size; -- int rc; -- -- rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0); -- if (IS_ERR(rq)) -- return -ENOMEM; -- crq = blk_mq_rq_to_pdu(rq); -- -- mem = carm_ref_msg(host, rq->tag); -- -- msg_size = func(host, rq->tag, mem); -- -- ioc = mem; -- crq->msg_type = ioc->type; -- crq->msg_subtype = ioc->subtype; -- rc = carm_lookup_bucket(msg_size); -- BUG_ON(rc < 0); -- crq->msg_bucket = (u32) rc; -- -- DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag); -- blk_execute_rq_nowait(NULL, rq, true, NULL); -- -- return 0; --} -- --static unsigned int carm_fill_sync_time(struct carm_host *host, -- unsigned int idx, void *mem) --{ -- struct carm_msg_sync_time *st = mem; -- -- time64_t tv = ktime_get_real_seconds(); -- -- memset(st, 0, sizeof(*st)); -- st->type = CARM_MSG_MISC; -- st->subtype = MISC_SET_TIME; -- st->handle = cpu_to_le32(TAG_ENCODE(idx)); -- st->timestamp = cpu_to_le32(tv); -- -- return sizeof(struct carm_msg_sync_time); --} -- --static unsigned int carm_fill_alloc_buf(struct carm_host *host, -- unsigned int idx, void *mem) --{ -- struct carm_msg_allocbuf *ab = mem; -- -- memset(ab, 0, sizeof(*ab)); -- ab->type = CARM_MSG_MISC; -- ab->subtype = MISC_ALLOC_MEM; -- ab->handle = cpu_to_le32(TAG_ENCODE(idx)); -- ab->n_sg = 1; -- ab->sg_type = SGT_32BIT; -- ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); -- ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1); -- ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024)); -- ab->n_evt = cpu_to_le32(1024); -- ab->rbuf_pool = cpu_to_le32(host->shm_dma); -- ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN); -- ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN); -- ab->n_msg = cpu_to_le32(CARM_Q_LEN); -- ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); -- ab->sg[0].len = cpu_to_le32(65536); -- -- return sizeof(struct carm_msg_allocbuf); --} -- --static unsigned int carm_fill_scan_channels(struct carm_host *host, -- unsigned int idx, void *mem) --{ -- struct carm_msg_ioctl *ioc = mem; -- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + -- IOC_SCAN_CHAN_OFFSET); -- -- memset(ioc, 0, sizeof(*ioc)); -- ioc->type = CARM_MSG_IOCTL; -- ioc->subtype = CARM_IOC_SCAN_CHAN; -- ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); -- ioc->data_addr = cpu_to_le32(msg_data); -- -- /* fill output data area with "no device" default values */ -- mem += IOC_SCAN_CHAN_OFFSET; -- memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS); -- -- return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS; --} -- --static unsigned int carm_fill_get_fw_ver(struct carm_host *host, -- unsigned int idx, void *mem) --{ -- struct carm_msg_get_fw_ver *ioc = mem; -- u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc)); -- -- memset(ioc, 0, sizeof(*ioc)); -- ioc->type = CARM_MSG_MISC; -- ioc->subtype = MISC_GET_FW_VER; -- ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); -- ioc->data_addr = cpu_to_le32(msg_data); -- -- return sizeof(struct carm_msg_get_fw_ver) + -- sizeof(struct carm_fw_ver); --} -- --static inline void carm_push_q (struct carm_host *host, struct request_queue *q) --{ -- unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; -- -- blk_mq_stop_hw_queues(q); -- VPRINTK("STOPPED QUEUE %p\n", q); -- -- host->wait_q[idx] = q; -- host->wait_q_prod++; -- BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ --} -- --static inline struct request_queue *carm_pop_q(struct carm_host *host) --{ -- unsigned int idx; -- -- if (host->wait_q_prod == host->wait_q_cons) -- return NULL; -- -- idx = host->wait_q_cons % CARM_MAX_WAIT_Q; -- host->wait_q_cons++; -- -- return host->wait_q[idx]; --} -- --static inline void carm_round_robin(struct carm_host *host) --{ -- struct request_queue *q = carm_pop_q(host); -- if (q) { -- blk_mq_start_hw_queues(q); -- VPRINTK("STARTED QUEUE %p\n", q); -- } --} -- --static inline enum dma_data_direction carm_rq_dir(struct request *rq) --{ -- return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; --} -- --static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx, -- const struct blk_mq_queue_data *bd) --{ -- struct request_queue *q = hctx->queue; -- struct request *rq = bd->rq; -- struct carm_port *port = q->queuedata; -- struct carm_host *host = port->host; -- struct carm_request *crq = blk_mq_rq_to_pdu(rq); -- struct carm_msg_rw *msg; -- struct scatterlist *sg; -- int i, n_elem = 0, rc; -- unsigned int msg_size; -- u32 tmp; -- -- crq->n_elem = 0; -- sg_init_table(crq->sg, CARM_MAX_REQ_SG); -- -- blk_mq_start_request(rq); -- -- spin_lock_irq(&host->lock); -- if (req_op(rq) == REQ_OP_DRV_OUT) -- goto send_msg; -- -- /* get scatterlist from block layer */ -- sg = &crq->sg[0]; -- n_elem = blk_rq_map_sg(q, rq, sg); -- if (n_elem <= 0) -- goto out_ioerr; -- -- /* map scatterlist to PCI bus addresses */ -- n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq)); -- if (n_elem <= 0) -- goto out_ioerr; -- -- /* obey global hardware limit on S/G entries */ -- if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem) -- goto out_resource; -- -- crq->n_elem = n_elem; -- host->hw_sg_used += n_elem; -- -- /* -- * build read/write message -- */ -- -- VPRINTK("build msg\n"); -- msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag); -- -- if (rq_data_dir(rq) == WRITE) { -- msg->type = CARM_MSG_WRITE; -- crq->msg_type = CARM_MSG_WRITE; -- } else { -- msg->type = CARM_MSG_READ; -- crq->msg_type = CARM_MSG_READ; -- } -- -- msg->id = port->port_no; -- msg->sg_count = n_elem; -- msg->sg_type = SGT_32BIT; -- msg->handle = cpu_to_le32(TAG_ENCODE(rq->tag)); -- msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff); -- tmp = (blk_rq_pos(rq) >> 16) >> 16; -- msg->lba_high = cpu_to_le16( (u16) tmp ); -- msg->lba_count = cpu_to_le16(blk_rq_sectors(rq)); -- -- msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); -- for (i = 0; i < n_elem; i++) { -- struct carm_msg_sg *carm_sg = &msg->sg[i]; -- carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i])); -- carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i])); -- msg_size += sizeof(struct carm_msg_sg); -- } -- -- rc = carm_lookup_bucket(msg_size); -- BUG_ON(rc < 0); -- crq->msg_bucket = (u32) rc; --send_msg: -- /* -- * queue read/write message to hardware -- */ -- VPRINTK("send msg, tag == %u\n", rq->tag); -- rc = carm_send_msg(host, crq, rq->tag); -- if (rc) { -- host->hw_sg_used -= n_elem; -- goto out_resource; -- } -- -- spin_unlock_irq(&host->lock); -- return BLK_STS_OK; --out_resource: -- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq)); -- carm_push_q(host, q); -- spin_unlock_irq(&host->lock); -- return BLK_STS_DEV_RESOURCE; --out_ioerr: -- carm_round_robin(host); -- spin_unlock_irq(&host->lock); -- return BLK_STS_IOERR; --} -- --static void carm_handle_array_info(struct carm_host *host, -- struct carm_request *crq, u8 *mem, -- blk_status_t error) --{ -- struct carm_port *port; -- u8 *msg_data = mem + sizeof(struct carm_array_info); -- struct carm_array_info *desc = (struct carm_array_info *) msg_data; -- u64 lo, hi; -- int cur_port; -- size_t slen; -- -- DPRINTK("ENTER\n"); -- -- if (error) -- goto out; -- if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST) -- goto out; -- -- cur_port = host->cur_scan_dev; -- -- /* should never occur */ -- if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) { -- printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n", -- cur_port, (int) desc->array_id); -- goto out; -- } -- -- port = &host->port[cur_port]; -- -- lo = (u64) le32_to_cpu(desc->size); -- hi = (u64) le16_to_cpu(desc->size_hi); -- -- port->capacity = lo | (hi << 32); -- port->dev_geom_head = le16_to_cpu(desc->head); -- port->dev_geom_sect = le16_to_cpu(desc->sect); -- port->dev_geom_cyl = le16_to_cpu(desc->cyl); -- -- host->dev_active |= (1 << cur_port); -- -- strncpy(port->name, desc->name, sizeof(port->name)); -- port->name[sizeof(port->name) - 1] = 0; -- slen = strlen(port->name); -- while (slen && (port->name[slen - 1] == ' ')) { -- port->name[slen - 1] = 0; -- slen--; -- } -- -- printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n", -- pci_name(host->pdev), port->port_no, -- (unsigned long long) port->capacity); -- printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n", -- pci_name(host->pdev), port->port_no, port->name); -- --out: -- assert(host->state == HST_DEV_SCAN); -- schedule_work(&host->fsm_task); --} -- --static void carm_handle_scan_chan(struct carm_host *host, -- struct carm_request *crq, u8 *mem, -- blk_status_t error) --{ -- u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET; -- unsigned int i, dev_count = 0; -- int new_state = HST_DEV_SCAN_START; -- -- DPRINTK("ENTER\n"); -- -- if (error) { -- new_state = HST_ERROR; -- goto out; -- } -- -- /* TODO: scan and support non-disk devices */ -- for (i = 0; i < 8; i++) -- if (msg_data[i] == 0) { /* direct-access device (disk) */ -- host->dev_present |= (1 << i); -- dev_count++; -- } -- -- printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n", -- pci_name(host->pdev), dev_count); -- --out: -- assert(host->state == HST_PORT_SCAN); -- host->state = new_state; -- schedule_work(&host->fsm_task); --} -- --static void carm_handle_generic(struct carm_host *host, -- struct carm_request *crq, blk_status_t error, -- int cur_state, int next_state) --{ -- DPRINTK("ENTER\n"); -- -- assert(host->state == cur_state); -- if (error) -- host->state = HST_ERROR; -- else -- host->state = next_state; -- schedule_work(&host->fsm_task); --} -- --static inline void carm_handle_resp(struct carm_host *host, -- __le32 ret_handle_le, u32 status) --{ -- u32 handle = le32_to_cpu(ret_handle_le); -- unsigned int msg_idx; -- struct request *rq; -- struct carm_request *crq; -- blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR; -- u8 *mem; -- -- VPRINTK("ENTER, handle == 0x%x\n", handle); -- -- if (unlikely(!TAG_VALID(handle))) { -- printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n", -- pci_name(host->pdev), handle); -- return; -- } -- -- msg_idx = TAG_DECODE(handle); -- VPRINTK("tag == %u\n", msg_idx); -- -- rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx); -- crq = blk_mq_rq_to_pdu(rq); -- -- /* fast path */ -- if (likely(crq->msg_type == CARM_MSG_READ || -- crq->msg_type == CARM_MSG_WRITE)) { -- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem, -- carm_rq_dir(rq)); -- goto done; -- } -- -- mem = carm_ref_msg(host, msg_idx); -- -- switch (crq->msg_type) { -- case CARM_MSG_IOCTL: { -- switch (crq->msg_subtype) { -- case CARM_IOC_SCAN_CHAN: -- carm_handle_scan_chan(host, crq, mem, error); -- goto done; -- default: -- /* unknown / invalid response */ -- goto err_out; -- } -- break; -- } -- -- case CARM_MSG_MISC: { -- switch (crq->msg_subtype) { -- case MISC_ALLOC_MEM: -- carm_handle_generic(host, crq, error, -- HST_ALLOC_BUF, HST_SYNC_TIME); -- goto done; -- case MISC_SET_TIME: -- carm_handle_generic(host, crq, error, -- HST_SYNC_TIME, HST_GET_FW_VER); -- goto done; -- case MISC_GET_FW_VER: { -- struct carm_fw_ver *ver = (struct carm_fw_ver *) -- (mem + sizeof(struct carm_msg_get_fw_ver)); -- if (!error) { -- host->fw_ver = le32_to_cpu(ver->version); -- host->flags |= (ver->features & FL_FW_VER_MASK); -- } -- carm_handle_generic(host, crq, error, -- HST_GET_FW_VER, HST_PORT_SCAN); -- goto done; -- } -- default: -- /* unknown / invalid response */ -- goto err_out; -- } -- break; -- } -- -- case CARM_MSG_ARRAY: { -- switch (crq->msg_subtype) { -- case CARM_ARRAY_INFO: -- carm_handle_array_info(host, crq, mem, error); -- break; -- default: -- /* unknown / invalid response */ -- goto err_out; -- } -- break; -- } -- -- default: -- /* unknown / invalid response */ -- goto err_out; -- } -- -- return; -- --err_out: -- printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n", -- pci_name(host->pdev), crq->msg_type, crq->msg_subtype); -- error = BLK_STS_IOERR; --done: -- host->hw_sg_used -= crq->n_elem; -- blk_mq_end_request(blk_mq_rq_from_pdu(crq), error); -- -- if (host->hw_sg_used <= CARM_SG_LOW_WATER) -- carm_round_robin(host); --} -- --static inline void carm_handle_responses(struct carm_host *host) --{ -- void __iomem *mmio = host->mmio; -- struct carm_response *resp = (struct carm_response *) host->shm; -- unsigned int work = 0; -- unsigned int idx = host->resp_idx % RMSG_Q_LEN; -- -- while (1) { -- u32 status = le32_to_cpu(resp[idx].status); -- -- if (status == 0xffffffff) { -- VPRINTK("ending response on index %u\n", idx); -- writel(idx << 3, mmio + CARM_RESP_IDX); -- break; -- } -- -- /* response to a message we sent */ -- else if ((status & (1 << 31)) == 0) { -- VPRINTK("handling msg response on index %u\n", idx); -- carm_handle_resp(host, resp[idx].ret_handle, status); -- resp[idx].status = cpu_to_le32(0xffffffff); -- } -- -- /* asynchronous events the hardware throws our way */ -- else if ((status & 0xff000000) == (1 << 31)) { -- u8 *evt_type_ptr = (u8 *) &resp[idx]; -- u8 evt_type = *evt_type_ptr; -- printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n", -- pci_name(host->pdev), (int) evt_type); -- resp[idx].status = cpu_to_le32(0xffffffff); -- } -- -- idx = NEXT_RESP(idx); -- work++; -- } -- -- VPRINTK("EXIT, work==%u\n", work); -- host->resp_idx += work; --} -- --static irqreturn_t carm_interrupt(int irq, void *__host) --{ -- struct carm_host *host = __host; -- void __iomem *mmio; -- u32 mask; -- int handled = 0; -- unsigned long flags; -- -- if (!host) { -- VPRINTK("no host\n"); -- return IRQ_NONE; -- } -- -- spin_lock_irqsave(&host->lock, flags); -- -- mmio = host->mmio; -- -- /* reading should also clear interrupts */ -- mask = readl(mmio + CARM_INT_STAT); -- -- if (mask == 0 || mask == 0xffffffff) { -- VPRINTK("no work, mask == 0x%x\n", mask); -- goto out; -- } -- -- if (mask & INT_ACK_MASK) -- writel(mask, mmio + CARM_INT_STAT); -- -- if (unlikely(host->state == HST_INVALID)) { -- VPRINTK("not initialized yet, mask = 0x%x\n", mask); -- goto out; -- } -- -- if (mask & CARM_HAVE_RESP) { -- handled = 1; -- carm_handle_responses(host); -- } -- --out: -- spin_unlock_irqrestore(&host->lock, flags); -- VPRINTK("EXIT\n"); -- return IRQ_RETVAL(handled); --} -- --static void carm_fsm_task (struct work_struct *work) --{ -- struct carm_host *host = -- container_of(work, struct carm_host, fsm_task); -- unsigned long flags; -- unsigned int state; -- int rc, i, next_dev; -- int reschedule = 0; -- int new_state = HST_INVALID; -- -- spin_lock_irqsave(&host->lock, flags); -- state = host->state; -- spin_unlock_irqrestore(&host->lock, flags); -- -- DPRINTK("ENTER, state == %s\n", state_name[state]); -- -- switch (state) { -- case HST_PROBE_START: -- new_state = HST_ALLOC_BUF; -- reschedule = 1; -- break; -- -- case HST_ALLOC_BUF: -- rc = carm_send_special(host, carm_fill_alloc_buf); -- if (rc) { -- new_state = HST_ERROR; -- reschedule = 1; -- } -- break; -- -- case HST_SYNC_TIME: -- rc = carm_send_special(host, carm_fill_sync_time); -- if (rc) { -- new_state = HST_ERROR; -- reschedule = 1; -- } -- break; -- -- case HST_GET_FW_VER: -- rc = carm_send_special(host, carm_fill_get_fw_ver); -- if (rc) { -- new_state = HST_ERROR; -- reschedule = 1; -- } -- break; -- -- case HST_PORT_SCAN: -- rc = carm_send_special(host, carm_fill_scan_channels); -- if (rc) { -- new_state = HST_ERROR; -- reschedule = 1; -- } -- break; -- -- case HST_DEV_SCAN_START: -- host->cur_scan_dev = -1; -- new_state = HST_DEV_SCAN; -- reschedule = 1; -- break; -- -- case HST_DEV_SCAN: -- next_dev = -1; -- for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++) -- if (host->dev_present & (1 << i)) { -- next_dev = i; -- break; -- } -- -- if (next_dev >= 0) { -- host->cur_scan_dev = next_dev; -- rc = carm_array_info(host, next_dev); -- if (rc) { -- new_state = HST_ERROR; -- reschedule = 1; -- } -- } else { -- new_state = HST_DEV_ACTIVATE; -- reschedule = 1; -- } -- break; -- -- case HST_DEV_ACTIVATE: { -- int activated = 0; -- for (i = 0; i < CARM_MAX_PORTS; i++) -- if (host->dev_active & (1 << i)) { -- struct carm_port *port = &host->port[i]; -- struct gendisk *disk = port->disk; -- -- set_capacity(disk, port->capacity); -- add_disk(disk); -- activated++; -- } -- -- printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n", -- pci_name(host->pdev), activated); -- -- new_state = HST_PROBE_FINISHED; -- reschedule = 1; -- break; -- } -- -- case HST_PROBE_FINISHED: -- complete(&host->probe_comp); -- break; -- -- case HST_ERROR: -- /* FIXME: TODO */ -- break; -- -- default: -- /* should never occur */ -- printk(KERN_ERR PFX "BUG: unknown state %d\n", state); -- assert(0); -- break; -- } -- -- if (new_state != HST_INVALID) { -- spin_lock_irqsave(&host->lock, flags); -- host->state = new_state; -- spin_unlock_irqrestore(&host->lock, flags); -- } -- if (reschedule) -- schedule_work(&host->fsm_task); --} -- --static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit) --{ -- unsigned int i; -- -- for (i = 0; i < 50000; i++) { -- u32 tmp = readl(mmio + CARM_LMUC); -- udelay(100); -- -- if (test_bit) { -- if ((tmp & bits) == bits) -- return 0; -- } else { -- if ((tmp & bits) == 0) -- return 0; -- } -- -- cond_resched(); -- } -- -- printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n", -- bits, test_bit ? "yes" : "no"); -- return -EBUSY; --} -- --static void carm_init_responses(struct carm_host *host) --{ -- void __iomem *mmio = host->mmio; -- unsigned int i; -- struct carm_response *resp = (struct carm_response *) host->shm; -- -- for (i = 0; i < RMSG_Q_LEN; i++) -- resp[i].status = cpu_to_le32(0xffffffff); -- -- writel(0, mmio + CARM_RESP_IDX); --} -- --static int carm_init_host(struct carm_host *host) --{ -- void __iomem *mmio = host->mmio; -- u32 tmp; -- u8 tmp8; -- int rc; -- -- DPRINTK("ENTER\n"); -- -- writel(0, mmio + CARM_INT_MASK); -- -- tmp8 = readb(mmio + CARM_INITC); -- if (tmp8 & 0x01) { -- tmp8 &= ~0x01; -- writeb(tmp8, mmio + CARM_INITC); -- readb(mmio + CARM_INITC); /* flush */ -- -- DPRINTK("snooze...\n"); -- msleep(5000); -- } -- -- tmp = readl(mmio + CARM_HMUC); -- if (tmp & CARM_CME) { -- DPRINTK("CME bit present, waiting\n"); -- rc = carm_init_wait(mmio, CARM_CME, 1); -- if (rc) { -- DPRINTK("EXIT, carm_init_wait 1 failed\n"); -- return rc; -- } -- } -- if (tmp & CARM_RME) { -- DPRINTK("RME bit present, waiting\n"); -- rc = carm_init_wait(mmio, CARM_RME, 1); -- if (rc) { -- DPRINTK("EXIT, carm_init_wait 2 failed\n"); -- return rc; -- } -- } -- -- tmp &= ~(CARM_RME | CARM_CME); -- writel(tmp, mmio + CARM_HMUC); -- readl(mmio + CARM_HMUC); /* flush */ -- -- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0); -- if (rc) { -- DPRINTK("EXIT, carm_init_wait 3 failed\n"); -- return rc; -- } -- -- carm_init_buckets(mmio); -- -- writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO); -- writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI); -- writel(RBUF_LEN, mmio + RBUF_BYTE_SZ); -- -- tmp = readl(mmio + CARM_HMUC); -- tmp |= (CARM_RME | CARM_CME | CARM_WZBC); -- writel(tmp, mmio + CARM_HMUC); -- readl(mmio + CARM_HMUC); /* flush */ -- -- rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1); -- if (rc) { -- DPRINTK("EXIT, carm_init_wait 4 failed\n"); -- return rc; -- } -- -- writel(0, mmio + CARM_HMPHA); -- writel(INT_DEF_MASK, mmio + CARM_INT_MASK); -- -- carm_init_responses(host); -- -- /* start initialization, probing state machine */ -- spin_lock_irq(&host->lock); -- assert(host->state == HST_INVALID); -- host->state = HST_PROBE_START; -- spin_unlock_irq(&host->lock); -- schedule_work(&host->fsm_task); -- -- DPRINTK("EXIT\n"); -- return 0; --} -- --static const struct blk_mq_ops carm_mq_ops = { -- .queue_rq = carm_queue_rq, --}; -- --static int carm_init_disk(struct carm_host *host, unsigned int port_no) --{ -- struct carm_port *port = &host->port[port_no]; -- struct gendisk *disk; -- -- port->host = host; -- port->port_no = port_no; -- -- disk = blk_mq_alloc_disk(&host->tag_set, port); -- if (IS_ERR(disk)) -- return PTR_ERR(disk); -- -- port->disk = disk; -- sprintf(disk->disk_name, DRV_NAME "/%u", -- (unsigned int)host->id * CARM_MAX_PORTS + port_no); -- disk->major = host->major; -- disk->first_minor = port_no * CARM_MINORS_PER_MAJOR; -- disk->minors = CARM_MINORS_PER_MAJOR; -- disk->fops = &carm_bd_ops; -- disk->private_data = port; -- -- blk_queue_max_segments(disk->queue, CARM_MAX_REQ_SG); -- blk_queue_segment_boundary(disk->queue, CARM_SG_BOUNDARY); -- return 0; --} -- --static void carm_free_disk(struct carm_host *host, unsigned int port_no) --{ -- struct carm_port *port = &host->port[port_no]; -- struct gendisk *disk = port->disk; -- -- if (!disk) -- return; -- -- if (host->state > HST_DEV_ACTIVATE) -- del_gendisk(disk); -- blk_cleanup_disk(disk); --} -- --static int carm_init_shm(struct carm_host *host) --{ -- host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE, -- &host->shm_dma, GFP_KERNEL); -- if (!host->shm) -- return -ENOMEM; -- -- host->msg_base = host->shm + RBUF_LEN; -- host->msg_dma = host->shm_dma + RBUF_LEN; -- -- memset(host->shm, 0xff, RBUF_LEN); -- memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN); -- -- return 0; --} -- --static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) --{ -- struct carm_host *host; -- int rc; -- struct request_queue *q; -- unsigned int i; -- -- printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); -- -- rc = pci_enable_device(pdev); -- if (rc) -- return rc; -- -- rc = pci_request_regions(pdev, DRV_NAME); -- if (rc) -- goto err_out; -- -- rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); -- if (rc) { -- printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n", -- pci_name(pdev)); -- goto err_out_regions; -- } -- -- host = kzalloc(sizeof(*host), GFP_KERNEL); -- if (!host) { -- rc = -ENOMEM; -- goto err_out_regions; -- } -- -- host->pdev = pdev; -- spin_lock_init(&host->lock); -- INIT_WORK(&host->fsm_task, carm_fsm_task); -- init_completion(&host->probe_comp); -- -- host->mmio = ioremap(pci_resource_start(pdev, 0), -- pci_resource_len(pdev, 0)); -- if (!host->mmio) { -- printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n", -- pci_name(pdev)); -- rc = -ENOMEM; -- goto err_out_kfree; -- } -- -- rc = carm_init_shm(host); -- if (rc) { -- printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n", -- pci_name(pdev)); -- goto err_out_iounmap; -- } -- -- memset(&host->tag_set, 0, sizeof(host->tag_set)); -- host->tag_set.ops = &carm_mq_ops; -- host->tag_set.cmd_size = sizeof(struct carm_request); -- host->tag_set.nr_hw_queues = 1; -- host->tag_set.nr_maps = 1; -- host->tag_set.queue_depth = max_queue; -- host->tag_set.numa_node = NUMA_NO_NODE; -- host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; -- -- rc = blk_mq_alloc_tag_set(&host->tag_set); -- if (rc) -- goto err_out_dma_free; -- -- q = blk_mq_init_queue(&host->tag_set); -- if (IS_ERR(q)) { -- rc = PTR_ERR(q); -- blk_mq_free_tag_set(&host->tag_set); -- goto err_out_dma_free; -- } -- -- host->oob_q = q; -- q->queuedata = host; -- -- /* -- * Figure out which major to use: 160, 161, or dynamic -- */ -- if (!test_and_set_bit(0, &carm_major_alloc)) -- host->major = 160; -- else if (!test_and_set_bit(1, &carm_major_alloc)) -- host->major = 161; -- else -- host->flags |= FL_DYN_MAJOR; -- -- host->id = carm_host_id; -- sprintf(host->name, DRV_NAME "%d", carm_host_id); -- -- rc = register_blkdev(host->major, host->name); -- if (rc < 0) -- goto err_out_free_majors; -- if (host->flags & FL_DYN_MAJOR) -- host->major = rc; -- -- for (i = 0; i < CARM_MAX_PORTS; i++) { -- rc = carm_init_disk(host, i); -- if (rc) -- goto err_out_blkdev_disks; -- } -- -- pci_set_master(pdev); -- -- rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host); -- if (rc) { -- printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n", -- pci_name(pdev)); -- goto err_out_blkdev_disks; -- } -- -- rc = carm_init_host(host); -- if (rc) -- goto err_out_free_irq; -- -- DPRINTK("waiting for probe_comp\n"); -- wait_for_completion(&host->probe_comp); -- -- printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n", -- host->name, pci_name(pdev), (int) CARM_MAX_PORTS, -- (unsigned long long)pci_resource_start(pdev, 0), -- pdev->irq, host->major); -- -- carm_host_id++; -- pci_set_drvdata(pdev, host); -- return 0; -- --err_out_free_irq: -- free_irq(pdev->irq, host); --err_out_blkdev_disks: -- for (i = 0; i < CARM_MAX_PORTS; i++) -- carm_free_disk(host, i); -- unregister_blkdev(host->major, host->name); --err_out_free_majors: -- if (host->major == 160) -- clear_bit(0, &carm_major_alloc); -- else if (host->major == 161) -- clear_bit(1, &carm_major_alloc); -- blk_cleanup_queue(host->oob_q); -- blk_mq_free_tag_set(&host->tag_set); --err_out_dma_free: -- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma); --err_out_iounmap: -- iounmap(host->mmio); --err_out_kfree: -- kfree(host); --err_out_regions: -- pci_release_regions(pdev); --err_out: -- pci_disable_device(pdev); -- return rc; --} -- --static void carm_remove_one (struct pci_dev *pdev) --{ -- struct carm_host *host = pci_get_drvdata(pdev); -- unsigned int i; -- -- if (!host) { -- printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n", -- pci_name(pdev)); -- return; -- } -- -- free_irq(pdev->irq, host); -- for (i = 0; i < CARM_MAX_PORTS; i++) -- carm_free_disk(host, i); -- unregister_blkdev(host->major, host->name); -- if (host->major == 160) -- clear_bit(0, &carm_major_alloc); -- else if (host->major == 161) -- clear_bit(1, &carm_major_alloc); -- blk_cleanup_queue(host->oob_q); -- blk_mq_free_tag_set(&host->tag_set); -- dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma); -- iounmap(host->mmio); -- kfree(host); -- pci_release_regions(pdev); -- pci_disable_device(pdev); --} -- --module_pci_driver(carm_driver); -diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c -index 303caf2d17d0c..d2ba849bb8d19 100644 ---- a/drivers/block/virtio_blk.c -+++ b/drivers/block/virtio_blk.c -@@ -24,6 +24,12 @@ - /* The maximum number of sg elements that fit into a virtqueue */ - #define VIRTIO_BLK_MAX_SG_ELEMS 32768 - -+#ifdef CONFIG_ARCH_NO_SG_CHAIN -+#define VIRTIO_BLK_INLINE_SG_CNT 0 -+#else -+#define VIRTIO_BLK_INLINE_SG_CNT 2 -+#endif -+ - static int major; - static DEFINE_IDA(vd_index_ida); - -@@ -77,6 +83,7 @@ struct virtio_blk { - struct virtblk_req { - struct virtio_blk_outhdr out_hdr; - u8 status; -+ struct sg_table sg_table; - struct scatterlist sg[]; - }; - -@@ -162,12 +169,92 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap) - return 0; - } - --static inline void virtblk_request_done(struct request *req) -+static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr) - { -- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); -+ if (blk_rq_nr_phys_segments(req)) -+ sg_free_table_chained(&vbr->sg_table, -+ VIRTIO_BLK_INLINE_SG_CNT); -+} - -+static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req, -+ struct virtblk_req *vbr) -+{ -+ int err; -+ -+ if (!blk_rq_nr_phys_segments(req)) -+ return 0; -+ -+ vbr->sg_table.sgl = vbr->sg; -+ err = sg_alloc_table_chained(&vbr->sg_table, -+ blk_rq_nr_phys_segments(req), -+ vbr->sg_table.sgl, -+ VIRTIO_BLK_INLINE_SG_CNT); -+ if (unlikely(err)) -+ return -ENOMEM; -+ -+ return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); -+} -+ -+static void virtblk_cleanup_cmd(struct request *req) -+{ - if (req->rq_flags & RQF_SPECIAL_PAYLOAD) - kfree(bvec_virt(&req->special_vec)); -+} -+ -+static int virtblk_setup_cmd(struct virtio_device *vdev, struct request *req, -+ struct virtblk_req *vbr) -+{ -+ bool unmap = false; -+ u32 type; -+ -+ vbr->out_hdr.sector = 0; -+ -+ switch (req_op(req)) { -+ case REQ_OP_READ: -+ type = VIRTIO_BLK_T_IN; -+ vbr->out_hdr.sector = cpu_to_virtio64(vdev, -+ blk_rq_pos(req)); -+ break; -+ case REQ_OP_WRITE: -+ type = VIRTIO_BLK_T_OUT; -+ vbr->out_hdr.sector = cpu_to_virtio64(vdev, -+ blk_rq_pos(req)); -+ break; -+ case REQ_OP_FLUSH: -+ type = VIRTIO_BLK_T_FLUSH; -+ break; -+ case REQ_OP_DISCARD: -+ type = VIRTIO_BLK_T_DISCARD; -+ break; -+ case REQ_OP_WRITE_ZEROES: -+ type = VIRTIO_BLK_T_WRITE_ZEROES; -+ unmap = !(req->cmd_flags & REQ_NOUNMAP); -+ break; -+ case REQ_OP_DRV_IN: -+ type = VIRTIO_BLK_T_GET_ID; -+ break; -+ default: -+ WARN_ON_ONCE(1); -+ return BLK_STS_IOERR; -+ } -+ -+ vbr->out_hdr.type = cpu_to_virtio32(vdev, type); -+ vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req)); -+ -+ if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) { -+ if (virtblk_setup_discard_write_zeroes(req, unmap)) -+ return BLK_STS_RESOURCE; -+ } -+ -+ return 0; -+} -+ -+static inline void virtblk_request_done(struct request *req) -+{ -+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); -+ -+ virtblk_unmap_data(req, vbr); -+ virtblk_cleanup_cmd(req); - blk_mq_end_request(req, virtblk_result(vbr)); - } - -@@ -221,61 +308,27 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, - struct request *req = bd->rq; - struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); - unsigned long flags; -- unsigned int num; -+ int num; - int qid = hctx->queue_num; - int err; - bool notify = false; -- bool unmap = false; -- u32 type; - - BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); - -- switch (req_op(req)) { -- case REQ_OP_READ: -- case REQ_OP_WRITE: -- type = 0; -- break; -- case REQ_OP_FLUSH: -- type = VIRTIO_BLK_T_FLUSH; -- break; -- case REQ_OP_DISCARD: -- type = VIRTIO_BLK_T_DISCARD; -- break; -- case REQ_OP_WRITE_ZEROES: -- type = VIRTIO_BLK_T_WRITE_ZEROES; -- unmap = !(req->cmd_flags & REQ_NOUNMAP); -- break; -- case REQ_OP_DRV_IN: -- type = VIRTIO_BLK_T_GET_ID; -- break; -- default: -- WARN_ON_ONCE(1); -- return BLK_STS_IOERR; -- } -- -- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type); -- vbr->out_hdr.sector = type ? -- 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req)); -- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req)); -+ err = virtblk_setup_cmd(vblk->vdev, req, vbr); -+ if (unlikely(err)) -+ return err; - - blk_mq_start_request(req); - -- if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) { -- err = virtblk_setup_discard_write_zeroes(req, unmap); -- if (err) -- return BLK_STS_RESOURCE; -- } -- -- num = blk_rq_map_sg(hctx->queue, req, vbr->sg); -- if (num) { -- if (rq_data_dir(req) == WRITE) -- vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT); -- else -- vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN); -+ num = virtblk_map_data(hctx, req, vbr); -+ if (unlikely(num < 0)) { -+ virtblk_cleanup_cmd(req); -+ return BLK_STS_RESOURCE; - } - - spin_lock_irqsave(&vblk->vqs[qid].lock, flags); -- err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); -+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num); - if (err) { - virtqueue_kick(vblk->vqs[qid].vq); - /* Don't stop the queue if -ENOMEM: we may have failed to -@@ -284,6 +337,8 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, - if (err == -ENOSPC) - blk_mq_stop_hw_queue(hctx); - spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); -+ virtblk_unmap_data(req, vbr); -+ virtblk_cleanup_cmd(req); - switch (err) { - case -ENOSPC: - return BLK_STS_DEV_RESOURCE; -@@ -660,16 +715,6 @@ static const struct attribute_group *virtblk_attr_groups[] = { - NULL, - }; - --static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq, -- unsigned int hctx_idx, unsigned int numa_node) --{ -- struct virtio_blk *vblk = set->driver_data; -- struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); -- -- sg_init_table(vbr->sg, vblk->sg_elems); -- return 0; --} -- - static int virtblk_map_queues(struct blk_mq_tag_set *set) - { - struct virtio_blk *vblk = set->driver_data; -@@ -682,7 +727,6 @@ static const struct blk_mq_ops virtio_mq_ops = { - .queue_rq = virtio_queue_rq, - .commit_rqs = virtio_commit_rqs, - .complete = virtblk_request_done, -- .init_request = virtblk_init_request, - .map_queues = virtblk_map_queues, - }; - -@@ -762,7 +806,7 @@ static int virtblk_probe(struct virtio_device *vdev) - vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; - vblk->tag_set.cmd_size = - sizeof(struct virtblk_req) + -- sizeof(struct scatterlist) * sg_elems; -+ sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT; - vblk->tag_set.driver_data = vblk; - vblk->tag_set.nr_hw_queues = vblk->num_vqs; - -@@ -815,9 +859,17 @@ static int virtblk_probe(struct virtio_device *vdev) - err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, - struct virtio_blk_config, blk_size, - &blk_size); -- if (!err) -+ if (!err) { -+ err = blk_validate_block_size(blk_size); -+ if (err) { -+ dev_err(&vdev->dev, -+ "virtio_blk: invalid block size: 0x%x\n", -+ blk_size); -+ goto out_cleanup_disk; -+ } -+ - blk_queue_logical_block_size(q, blk_size); -- else -+ } else - blk_size = queue_logical_block_size(q); - - /* Use topology information if available */ -@@ -847,11 +899,12 @@ static int virtblk_probe(struct virtio_device *vdev) - blk_queue_io_opt(q, blk_size * opt_io_size); - - if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { -- q->limits.discard_granularity = blk_size; -- - virtio_cread(vdev, struct virtio_blk_config, - discard_sector_alignment, &v); -- q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0; -+ if (v) -+ q->limits.discard_granularity = v << SECTOR_SHIFT; -+ else -+ q->limits.discard_granularity = blk_size; - - virtio_cread(vdev, struct virtio_blk_config, - max_discard_sectors, &v); -@@ -859,9 +912,15 @@ static int virtblk_probe(struct virtio_device *vdev) - - virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, - &v); -+ -+ /* -+ * max_discard_seg == 0 is out of spec but we always -+ * handled it. -+ */ -+ if (!v) -+ v = sg_elems - 2; - blk_queue_max_discard_segments(q, -- min_not_zero(v, -- MAX_DISCARD_SEGMENTS)); -+ min(v, MAX_DISCARD_SEGMENTS)); - - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - } -diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h -index bda5c815e4415..a28473470e662 100644 ---- a/drivers/block/xen-blkback/common.h -+++ b/drivers/block/xen-blkback/common.h -@@ -226,6 +226,9 @@ struct xen_vbd { - sector_t size; - unsigned int flush_support:1; - unsigned int discard_secure:1; -+ /* Connect-time cached feature_persistent parameter value */ -+ unsigned int feature_gnt_persistent_parm:1; -+ /* Persistent grants feature negotiation result */ - unsigned int feature_gnt_persistent:1; - unsigned int overflow_max_grants:1; - }; -diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c -index 33eba3df4dd9a..1525e28c5d703 100644 ---- a/drivers/block/xen-blkback/xenbus.c -+++ b/drivers/block/xen-blkback/xenbus.c -@@ -156,6 +156,11 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) - return 0; - } - -+/* Enable the persistent grants feature. */ -+static bool feature_persistent = true; -+module_param(feature_persistent, bool, 0644); -+MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature"); -+ - static struct xen_blkif *xen_blkif_alloc(domid_t domid) - { - struct xen_blkif *blkif; -@@ -471,12 +476,6 @@ static void xen_vbd_free(struct xen_vbd *vbd) - vbd->bdev = NULL; - } - --/* Enable the persistent grants feature. */ --static bool feature_persistent = true; --module_param(feature_persistent, bool, 0644); --MODULE_PARM_DESC(feature_persistent, -- "Enables the persistent grants feature"); -- - static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, - unsigned major, unsigned minor, int readonly, - int cdrom) -@@ -522,8 +521,6 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, - if (q && blk_queue_secure_erase(q)) - vbd->discard_secure = true; - -- vbd->feature_gnt_persistent = feature_persistent; -- - pr_debug("Successful creation of handle=%04x (dom=%u)\n", - handle, blkif->domid); - return 0; -@@ -913,7 +910,7 @@ again: - xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support); - - err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", -- be->blkif->vbd.feature_gnt_persistent); -+ be->blkif->vbd.feature_gnt_persistent_parm); - if (err) { - xenbus_dev_fatal(dev, err, "writing %s/feature-persistent", - dev->nodename); -@@ -1090,10 +1087,11 @@ static int connect_ring(struct backend_info *be) - xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); - return -ENOSYS; - } -- if (blkif->vbd.feature_gnt_persistent) -- blkif->vbd.feature_gnt_persistent = -- xenbus_read_unsigned(dev->otherend, -- "feature-persistent", 0); -+ -+ blkif->vbd.feature_gnt_persistent_parm = feature_persistent; -+ blkif->vbd.feature_gnt_persistent = -+ blkif->vbd.feature_gnt_persistent_parm && -+ xenbus_read_unsigned(dev->otherend, "feature-persistent", 0); - - blkif->vbd.overflow_max_grants = 0; - -diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c -index 72902104f1112..831747ba8113c 100644 ---- a/drivers/block/xen-blkfront.c -+++ b/drivers/block/xen-blkfront.c -@@ -42,6 +42,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -151,6 +152,10 @@ static unsigned int xen_blkif_max_ring_order; - module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); - MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); - -+static bool __read_mostly xen_blkif_trusted = true; -+module_param_named(trusted, xen_blkif_trusted, bool, 0644); -+MODULE_PARM_DESC(trusted, "Is the backend trusted"); -+ - #define BLK_RING_SIZE(info) \ - __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages) - -@@ -207,7 +212,11 @@ struct blkfront_info - unsigned int feature_fua:1; - unsigned int feature_discard:1; - unsigned int feature_secdiscard:1; -+ /* Connect-time cached feature_persistent parameter */ -+ unsigned int feature_persistent_parm:1; -+ /* Persistent grants feature negotiation result */ - unsigned int feature_persistent:1; -+ unsigned int bounce:1; - unsigned int discard_granularity; - unsigned int discard_alignment; - /* Number of 4KB segments handled */ -@@ -310,8 +319,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) - if (!gnt_list_entry) - goto out_of_memory; - -- if (info->feature_persistent) { -- granted_page = alloc_page(GFP_NOIO); -+ if (info->bounce) { -+ granted_page = alloc_page(GFP_NOIO | __GFP_ZERO); - if (!granted_page) { - kfree(gnt_list_entry); - goto out_of_memory; -@@ -330,7 +339,7 @@ out_of_memory: - list_for_each_entry_safe(gnt_list_entry, n, - &rinfo->grants, node) { - list_del(&gnt_list_entry->node); -- if (info->feature_persistent) -+ if (info->bounce) - __free_page(gnt_list_entry->page); - kfree(gnt_list_entry); - i--; -@@ -376,7 +385,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, - /* Assign a gref to this page */ - gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); - BUG_ON(gnt_list_entry->gref == -ENOSPC); -- if (info->feature_persistent) -+ if (info->bounce) - grant_foreign_access(gnt_list_entry, info); - else { - /* Grant access to the GFN passed by the caller */ -@@ -400,7 +409,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head, - /* Assign a gref to this page */ - gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); - BUG_ON(gnt_list_entry->gref == -ENOSPC); -- if (!info->feature_persistent) { -+ if (!info->bounce) { - struct page *indirect_page; - - /* Fetch a pre-allocated page to use for indirect grefs */ -@@ -702,7 +711,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri - .grant_idx = 0, - .segments = NULL, - .rinfo = rinfo, -- .need_copy = rq_data_dir(req) && info->feature_persistent, -+ .need_copy = rq_data_dir(req) && info->bounce, - }; - - /* -@@ -771,7 +780,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri - ring_req->u.rw.handle = info->handle; - ring_req->operation = rq_data_dir(req) ? - BLKIF_OP_WRITE : BLKIF_OP_READ; -- if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { -+ if (req_op(req) == REQ_OP_FLUSH || -+ (req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) { - /* - * Ideally we can do an unordered flush-to-disk. - * In case the backend onlysupports barriers, use that. -@@ -980,11 +990,12 @@ static void xlvbd_flush(struct blkfront_info *info) - { - blk_queue_write_cache(info->rq, info->feature_flush ? true : false, - info->feature_fua ? true : false); -- pr_info("blkfront: %s: %s %s %s %s %s\n", -+ pr_info("blkfront: %s: %s %s %s %s %s %s %s\n", - info->gd->disk_name, flush_info(info), - "persistent grants:", info->feature_persistent ? - "enabled;" : "disabled;", "indirect descriptors:", -- info->max_indirect_segments ? "enabled;" : "disabled;"); -+ info->max_indirect_segments ? "enabled;" : "disabled;", -+ "bounce buffer:", info->bounce ? "enabled" : "disabled;"); - } - - static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) -@@ -1211,7 +1222,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) - if (!list_empty(&rinfo->indirect_pages)) { - struct page *indirect_page, *n; - -- BUG_ON(info->feature_persistent); -+ BUG_ON(info->bounce); - list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { - list_del(&indirect_page->lru); - __free_page(indirect_page); -@@ -1228,7 +1239,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) - 0, 0UL); - rinfo->persistent_gnts_c--; - } -- if (info->feature_persistent) -+ if (info->bounce) - __free_page(persistent_gnt->page); - kfree(persistent_gnt); - } -@@ -1249,7 +1260,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) - for (j = 0; j < segs; j++) { - persistent_gnt = rinfo->shadow[i].grants_used[j]; - gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); -- if (info->feature_persistent) -+ if (info->bounce) - __free_page(persistent_gnt->page); - kfree(persistent_gnt); - } -@@ -1290,7 +1301,8 @@ free_shadow: - rinfo->ring_ref[i] = GRANT_INVALID_REF; - } - } -- free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); -+ free_pages_exact(rinfo->ring.sring, -+ info->nr_ring_pages * XEN_PAGE_SIZE); - rinfo->ring.sring = NULL; - - if (rinfo->irq) -@@ -1374,9 +1386,15 @@ static int blkif_get_final_status(enum blk_req_status s1, - return BLKIF_RSP_OKAY; - } - --static bool blkif_completion(unsigned long *id, -- struct blkfront_ring_info *rinfo, -- struct blkif_response *bret) -+/* -+ * Return values: -+ * 1 response processed. -+ * 0 missing further responses. -+ * -1 error while processing. -+ */ -+static int blkif_completion(unsigned long *id, -+ struct blkfront_ring_info *rinfo, -+ struct blkif_response *bret) - { - int i = 0; - struct scatterlist *sg; -@@ -1399,7 +1417,7 @@ static bool blkif_completion(unsigned long *id, - - /* Wait the second response if not yet here. */ - if (s2->status < REQ_DONE) -- return false; -+ return 0; - - bret->status = blkif_get_final_status(s->status, - s2->status); -@@ -1432,7 +1450,7 @@ static bool blkif_completion(unsigned long *id, - data.s = s; - num_sg = s->num_sg; - -- if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { -+ if (bret->operation == BLKIF_OP_READ && info->bounce) { - for_each_sg(s->sg, sg, num_sg, i) { - BUG_ON(sg->offset + sg->length > PAGE_SIZE); - -@@ -1450,47 +1468,48 @@ static bool blkif_completion(unsigned long *id, - } - /* Add the persistent grant into the list of free grants */ - for (i = 0; i < num_grant; i++) { -- if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { -+ if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) { - /* - * If the grant is still mapped by the backend (the - * backend has chosen to make this grant persistent) - * we add it at the head of the list, so it will be - * reused first. - */ -- if (!info->feature_persistent) -- pr_alert_ratelimited("backed has not unmapped grant: %u\n", -- s->grants_used[i]->gref); -+ if (!info->feature_persistent) { -+ pr_alert("backed has not unmapped grant: %u\n", -+ s->grants_used[i]->gref); -+ return -1; -+ } - list_add(&s->grants_used[i]->node, &rinfo->grants); - rinfo->persistent_gnts_c++; - } else { - /* -- * If the grant is not mapped by the backend we end the -- * foreign access and add it to the tail of the list, -- * so it will not be picked again unless we run out of -- * persistent grants. -+ * If the grant is not mapped by the backend we add it -+ * to the tail of the list, so it will not be picked -+ * again unless we run out of persistent grants. - */ -- gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); - s->grants_used[i]->gref = GRANT_INVALID_REF; - list_add_tail(&s->grants_used[i]->node, &rinfo->grants); - } - } - if (s->req.operation == BLKIF_OP_INDIRECT) { - for (i = 0; i < INDIRECT_GREFS(num_grant); i++) { -- if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { -- if (!info->feature_persistent) -- pr_alert_ratelimited("backed has not unmapped grant: %u\n", -- s->indirect_grants[i]->gref); -+ if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) { -+ if (!info->feature_persistent) { -+ pr_alert("backed has not unmapped grant: %u\n", -+ s->indirect_grants[i]->gref); -+ return -1; -+ } - list_add(&s->indirect_grants[i]->node, &rinfo->grants); - rinfo->persistent_gnts_c++; - } else { - struct page *indirect_page; - -- gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); - /* - * Add the used indirect page back to the list of - * available pages for indirect grefs. - */ -- if (!info->feature_persistent) { -+ if (!info->bounce) { - indirect_page = s->indirect_grants[i]->page; - list_add(&indirect_page->lru, &rinfo->indirect_pages); - } -@@ -1500,7 +1519,7 @@ static bool blkif_completion(unsigned long *id, - } - } - -- return true; -+ return 1; - } - - static irqreturn_t blkif_interrupt(int irq, void *dev_id) -@@ -1511,9 +1530,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) - unsigned long flags; - struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; - struct blkfront_info *info = rinfo->dev_info; -+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; - -- if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) -+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { -+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); - return IRQ_HANDLED; -+ } - - spin_lock_irqsave(&rinfo->ring_lock, flags); - again: -@@ -1529,6 +1551,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) - unsigned long id; - unsigned int op; - -+ eoiflag = 0; -+ - RING_COPY_RESPONSE(&rinfo->ring, i, &bret); - id = bret.id; - -@@ -1561,12 +1585,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) - } - - if (bret.operation != BLKIF_OP_DISCARD) { -+ int ret; -+ - /* - * We may need to wait for an extra response if the - * I/O request is split in 2 - */ -- if (!blkif_completion(&id, rinfo, &bret)) -+ ret = blkif_completion(&id, rinfo, &bret); -+ if (!ret) - continue; -+ if (unlikely(ret < 0)) -+ goto err; - } - - if (add_id_to_freelist(rinfo, id)) { -@@ -1645,6 +1674,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) - - spin_unlock_irqrestore(&rinfo->ring_lock, flags); - -+ xen_irq_lateeoi(irq, eoiflag); -+ - return IRQ_HANDLED; - - err: -@@ -1652,6 +1683,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) - - spin_unlock_irqrestore(&rinfo->ring_lock, flags); - -+ /* No EOI in order to avoid further interrupts. */ -+ - pr_alert("%s disabled for further use\n", info->gd->disk_name); - return IRQ_HANDLED; - } -@@ -1669,8 +1702,7 @@ static int setup_blkring(struct xenbus_device *dev, - for (i = 0; i < info->nr_ring_pages; i++) - rinfo->ring_ref[i] = GRANT_INVALID_REF; - -- sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH, -- get_order(ring_size)); -+ sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO); - if (!sring) { - xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); - return -ENOMEM; -@@ -1680,7 +1712,7 @@ static int setup_blkring(struct xenbus_device *dev, - - err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); - if (err < 0) { -- free_pages((unsigned long)sring, get_order(ring_size)); -+ free_pages_exact(sring, ring_size); - rinfo->ring.sring = NULL; - goto fail; - } -@@ -1691,8 +1723,8 @@ static int setup_blkring(struct xenbus_device *dev, - if (err) - goto fail; - -- err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0, -- "blkif", rinfo); -+ err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt, -+ 0, "blkif", rinfo); - if (err <= 0) { - xenbus_dev_fatal(dev, err, - "bind_evtchn_to_irqhandler failed"); -@@ -1754,6 +1786,12 @@ abort_transaction: - return err; - } - -+/* Enable the persistent grants feature. */ -+static bool feature_persistent = true; -+module_param(feature_persistent, bool, 0644); -+MODULE_PARM_DESC(feature_persistent, -+ "Enables the persistent grants feature"); -+ - /* Common code used when first setting up, and when resuming. */ - static int talk_to_blkback(struct xenbus_device *dev, - struct blkfront_info *info) -@@ -1768,6 +1806,10 @@ static int talk_to_blkback(struct xenbus_device *dev, - if (!info) - return -ENODEV; - -+ /* Check if backend is trusted. */ -+ info->bounce = !xen_blkif_trusted || -+ !xenbus_read_unsigned(dev->nodename, "trusted", 1); -+ - max_page_order = xenbus_read_unsigned(info->xbdev->otherend, - "max-ring-page-order", 0); - ring_page_order = min(xen_blkif_max_ring_order, max_page_order); -@@ -1841,8 +1883,9 @@ again: - message = "writing protocol"; - goto abort_transaction; - } -+ info->feature_persistent_parm = feature_persistent; - err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", -- info->feature_persistent); -+ info->feature_persistent_parm); - if (err) - dev_warn(&dev->dev, - "writing persistent grants feature to xenbus"); -@@ -1910,12 +1953,6 @@ static int negotiate_mq(struct blkfront_info *info) - return 0; - } - --/* Enable the persistent grants feature. */ --static bool feature_persistent = true; --module_param(feature_persistent, bool, 0644); --MODULE_PARM_DESC(feature_persistent, -- "Enables the persistent grants feature"); -- - /* - * Entry point to this code when a new device is created. Allocate the basic - * structures and the ring buffer for communication with the backend, and -@@ -1982,8 +2019,6 @@ static int blkfront_probe(struct xenbus_device *dev, - info->vdevice = vdevice; - info->connected = BLKIF_STATE_DISCONNECTED; - -- info->feature_persistent = feature_persistent; -- - /* Front end dir is a number, which is used as the id. */ - info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); - dev_set_drvdata(&dev->dev, info); -@@ -2118,9 +2153,11 @@ static void blkfront_closing(struct blkfront_info *info) - return; - - /* No more blkif_request(). */ -- blk_mq_stop_hw_queues(info->rq); -- blk_set_queue_dying(info->rq); -- set_capacity(info->gd, 0); -+ if (info->rq && info->gd) { -+ blk_mq_stop_hw_queues(info->rq); -+ blk_mark_disk_dead(info->gd); -+ set_capacity(info->gd, 0); -+ } - - for_each_rinfo(info, rinfo, i) { - /* No more gnttab callback work. */ -@@ -2175,17 +2212,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) - if (err) - goto out_of_memory; - -- if (!info->feature_persistent && info->max_indirect_segments) { -+ if (!info->bounce && info->max_indirect_segments) { - /* -- * We are using indirect descriptors but not persistent -- * grants, we need to allocate a set of pages that can be -+ * We are using indirect descriptors but don't have a bounce -+ * buffer, we need to allocate a set of pages that can be - * used for mapping indirect grefs - */ - int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info); - - BUG_ON(!list_empty(&rinfo->indirect_pages)); - for (i = 0; i < num; i++) { -- struct page *indirect_page = alloc_page(GFP_KERNEL); -+ struct page *indirect_page = alloc_page(GFP_KERNEL | -+ __GFP_ZERO); - if (!indirect_page) - goto out_of_memory; - list_add(&indirect_page->lru, &rinfo->indirect_pages); -@@ -2274,10 +2312,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) - if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0)) - blkfront_setup_discard(info); - -- if (info->feature_persistent) -+ if (info->feature_persistent_parm) - info->feature_persistent = - !!xenbus_read_unsigned(info->xbdev->otherend, - "feature-persistent", 0); -+ if (info->feature_persistent) -+ info->bounce = true; - - indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, - "feature-max-indirect-segments", 0); -@@ -2456,16 +2496,19 @@ static int blkfront_remove(struct xenbus_device *xbdev) - - dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); - -- del_gendisk(info->gd); -+ if (info->gd) -+ del_gendisk(info->gd); - - mutex_lock(&blkfront_mutex); - list_del(&info->info_list); - mutex_unlock(&blkfront_mutex); - - blkif_free(info, 0); -- xlbd_release_minors(info->gd->first_minor, info->gd->minors); -- blk_cleanup_disk(info->gd); -- blk_mq_free_tag_set(&info->tag_set); -+ if (info->gd) { -+ xlbd_release_minors(info->gd->first_minor, info->gd->minors); -+ blk_cleanup_disk(info->gd); -+ blk_mq_free_tag_set(&info->tag_set); -+ } - - kfree(info); - return 0; -@@ -2520,11 +2563,10 @@ static void purge_persistent_grants(struct blkfront_info *info) - list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, - node) { - if (gnt_list_entry->gref == GRANT_INVALID_REF || -- gnttab_query_foreign_access(gnt_list_entry->gref)) -+ !gnttab_try_end_foreign_access(gnt_list_entry->gref)) - continue; - - list_del(&gnt_list_entry->node); -- gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); - rinfo->persistent_gnts_c--; - gnt_list_entry->gref = GRANT_INVALID_REF; - list_add_tail(&gnt_list_entry->node, &rinfo->grants); -@@ -2539,6 +2581,13 @@ static void blkfront_delay_work(struct work_struct *work) - struct blkfront_info *info; - bool need_schedule_work = false; - -+ /* -+ * Note that when using bounce buffers but not persistent grants -+ * there's no need to run blkfront_delay_work because grants are -+ * revoked in blkif_completion or else an error is reported and the -+ * connection is closed. -+ */ -+ - mutex_lock(&blkfront_mutex); - - list_for_each_entry(info, &info_list, info_list) { -diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c -index 052aa3f65514e..0916de952e091 100644 ---- a/drivers/block/zram/zcomp.c -+++ b/drivers/block/zram/zcomp.c -@@ -63,12 +63,6 @@ static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp) - - bool zcomp_available_algorithm(const char *comp) - { -- int i; -- -- i = sysfs_match_string(backends, comp); -- if (i >= 0) -- return true; -- - /* - * Crypto does not ignore a trailing new line symbol, - * so make sure you don't supply a string containing -@@ -217,6 +211,11 @@ struct zcomp *zcomp_create(const char *compress) - struct zcomp *comp; - int error; - -+ /* -+ * Crypto API will execute /sbin/modprobe if the compression module -+ * is not loaded yet. We must do it here, otherwise we are about to -+ * call /sbin/modprobe under CPU hot-plug lock. -+ */ - if (!zcomp_available_algorithm(compress)) - return ERR_PTR(-EINVAL); - -diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c -index fcaf2750f68f7..6383c81ac5b37 100644 ---- a/drivers/block/zram/zram_drv.c -+++ b/drivers/block/zram/zram_drv.c -@@ -910,7 +910,7 @@ static ssize_t read_block_state(struct file *file, char __user *buf, - zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.', - zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.'); - -- if (count < copied) { -+ if (count <= copied) { - zram_slot_unlock(zram, index); - break; - } -diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c -index 5a321b4076aab..cab93935cc7f1 100644 ---- a/drivers/bluetooth/bfusb.c -+++ b/drivers/bluetooth/bfusb.c -@@ -628,6 +628,9 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i - data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress; - data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize); - -+ if (!data->bulk_pkt_size) -+ goto done; -+ - rwlock_init(&data->lock); - - data->reassembly = NULL; -diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c -index e4182acee488c..f228cdbccaee3 100644 ---- a/drivers/bluetooth/btbcm.c -+++ b/drivers/bluetooth/btbcm.c -@@ -6,8 +6,10 @@ - * Copyright (C) 2015 Intel Corporation - */ - -+#include - #include - #include -+#include - #include - - #include -@@ -32,6 +34,43 @@ - /* For kmalloc-ing the fw-name array instead of putting it on the stack */ - typedef char bcm_fw_name[BCM_FW_NAME_LEN]; - -+#ifdef CONFIG_EFI -+static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev) -+{ -+ efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f, -+ 0x43, 0x26, 0x81, 0x23, 0xd1, 0x13); -+ bdaddr_t efi_bdaddr, bdaddr; -+ efi_status_t status; -+ unsigned long len; -+ int ret; -+ -+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) -+ return -EOPNOTSUPP; -+ -+ len = sizeof(efi_bdaddr); -+ status = efi.get_variable(L"BDADDR", &guid, NULL, &len, &efi_bdaddr); -+ if (status != EFI_SUCCESS) -+ return -ENXIO; -+ -+ if (len != sizeof(efi_bdaddr)) -+ return -EIO; -+ -+ baswap(&bdaddr, &efi_bdaddr); -+ -+ ret = btbcm_set_bdaddr(hdev, &bdaddr); -+ if (ret) -+ return ret; -+ -+ bt_dev_info(hdev, "BCM: Using EFI device address (%pMR)", &bdaddr); -+ return 0; -+} -+#else -+static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev) -+{ -+ return -EOPNOTSUPP; -+} -+#endif -+ - int btbcm_check_bdaddr(struct hci_dev *hdev) - { - struct hci_rp_read_bd_addr *bda; -@@ -85,9 +124,12 @@ int btbcm_check_bdaddr(struct hci_dev *hdev) - !bacmp(&bda->bdaddr, BDADDR_BCM4345C5) || - !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) || - !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) { -- bt_dev_info(hdev, "BCM: Using default device address (%pMR)", -- &bda->bdaddr); -- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); -+ /* Try falling back to BDADDR EFI variable */ -+ if (btbcm_set_bdaddr_from_efi(hdev) != 0) { -+ bt_dev_info(hdev, "BCM: Using default device address (%pMR)", -+ &bda->bdaddr); -+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); -+ } - } - - kfree_skb(skb); -@@ -343,6 +385,52 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev) - return skb; - } - -+static const struct dmi_system_id disable_broken_read_transmit_power[] = { -+ { -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,1"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,4"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir8,1"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir8,2"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,1"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,2"), -+ }, -+ }, -+ { } -+}; -+ - static int btbcm_read_info(struct hci_dev *hdev) - { - struct sk_buff *skb; -@@ -363,6 +451,10 @@ static int btbcm_read_info(struct hci_dev *hdev) - bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]); - kfree_skb(skb); - -+ /* Read DMI and disable broken Read LE Min/Max Tx Power */ -+ if (dmi_first_match(disable_broken_read_transmit_power)) -+ set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks); -+ - return 0; - } - -@@ -402,6 +494,8 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { - { 0x6606, "BCM4345C5" }, /* 003.006.006 */ - { 0x230f, "BCM4356A2" }, /* 001.003.015 */ - { 0x220e, "BCM20702A1" }, /* 001.002.014 */ -+ { 0x420d, "BCM4349B1" }, /* 002.002.013 */ -+ { 0x420e, "BCM4349B1" }, /* 002.002.014 */ - { 0x4217, "BCM4329B1" }, /* 002.002.023 */ - { 0x6106, "BCM4359C0" }, /* 003.001.006 */ - { 0x4106, "BCM4335A0" }, /* 002.001.006 */ -diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c -index f1705b46fc889..2a4cc5d8c2d40 100644 ---- a/drivers/bluetooth/btintel.c -+++ b/drivers/bluetooth/btintel.c -@@ -2193,8 +2193,15 @@ static int btintel_setup_combined(struct hci_dev *hdev) - * As a workaround, send HCI Reset command first which will reset the - * number of completed commands and allow normal command processing - * from now on. -+ * -+ * Regarding the INTEL_BROKEN_SHUTDOWN_LED flag, these devices maybe -+ * in the SW_RFKILL ON state as a workaround of fixing LED issue during -+ * the shutdown() procedure, and once the device is in SW_RFKILL ON -+ * state, the only way to exit out of it is sending the HCI_Reset -+ * command. - */ -- if (btintel_test_flag(hdev, INTEL_BROKEN_INITIAL_NCMD)) { -+ if (btintel_test_flag(hdev, INTEL_BROKEN_INITIAL_NCMD) || -+ btintel_test_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED)) { - skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, - HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { -@@ -2256,27 +2263,31 @@ static int btintel_setup_combined(struct hci_dev *hdev) - - /* Apply the device specific HCI quirks - * -- * WBS for SdP - SdP and Stp have a same hw_varaint but -- * different fw_variant -+ * WBS for SdP - For the Legacy ROM products, only SdP -+ * supports the WBS. But the version information is not -+ * enough to use here because the StP2 and SdP have same -+ * hw_variant and fw_variant. So, this flag is set by -+ * the transport driver (btusb) based on the HW info -+ * (idProduct) - */ -- if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22) -+ if (!btintel_test_flag(hdev, -+ INTEL_ROM_LEGACY_NO_WBS_SUPPORT)) - set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, - &hdev->quirks); -- -- /* These devices have an issue with LED which doesn't -- * go off immediately during shutdown. Set the flag -- * here to send the LED OFF command during shutdown. -- */ -- btintel_set_flag(hdev, INTEL_BROKEN_LED); -+ if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22) -+ set_bit(HCI_QUIRK_VALID_LE_STATES, -+ &hdev->quirks); - - err = btintel_legacy_rom_setup(hdev, &ver); - break; - case 0x0b: /* SfP */ -- case 0x0c: /* WsP */ - case 0x11: /* JfP */ - case 0x12: /* ThP */ - case 0x13: /* HrP */ - case 0x14: /* CcP */ -+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); -+ fallthrough; -+ case 0x0c: /* WsP */ - /* Apply the device specific HCI quirks - * - * All Legacy bootloader devices support WBS -@@ -2284,11 +2295,6 @@ static int btintel_setup_combined(struct hci_dev *hdev) - set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, - &hdev->quirks); - -- /* Valid LE States quirk for JfP/ThP familiy */ -- if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12) -- set_bit(HCI_QUIRK_VALID_LE_STATES, -- &hdev->quirks); -- - /* Setup MSFT Extension support */ - btintel_set_msft_opcode(hdev, ver.hw_variant); - -@@ -2329,10 +2335,14 @@ static int btintel_setup_combined(struct hci_dev *hdev) - case 0x12: /* ThP */ - case 0x13: /* HrP */ - case 0x14: /* CcP */ -- /* Some legacy bootloader devices from JfP supports both old -- * and TLV based HCI_Intel_Read_Version command. But we don't -- * want to use the TLV based setup routines for those legacy -- * bootloader device. -+ /* Some legacy bootloader devices starting from JfP, -+ * the operational firmware supports both old and TLV based -+ * HCI_Intel_Read_Version command based on the command -+ * parameter. -+ * -+ * For upgrading firmware case, the TLV based version cannot -+ * be used because the firmware filename for legacy bootloader -+ * is based on the old format. - * - * Also, it is not easy to convert TLV based version from the - * legacy version format. -@@ -2343,7 +2353,20 @@ static int btintel_setup_combined(struct hci_dev *hdev) - */ - err = btintel_read_version(hdev, &ver); - if (err) -- return err; -+ break; -+ -+ /* Apply the device specific HCI quirks -+ * -+ * All Legacy bootloader devices support WBS -+ */ -+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); -+ -+ /* Set Valid LE States quirk */ -+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); -+ -+ /* Setup MSFT Extension support */ -+ btintel_set_msft_opcode(hdev, ver.hw_variant); -+ - err = btintel_bootloader_setup(hdev, &ver); - break; - case 0x17: -@@ -2358,9 +2381,8 @@ static int btintel_setup_combined(struct hci_dev *hdev) - */ - set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); - -- /* Valid LE States quirk for GfP */ -- if (INTEL_HW_VARIANT(ver_tlv.cnvi_bt) == 0x18) -- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); -+ /* Apply LE States quirk from solar onwards */ -+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); - - /* Setup MSFT Extension support */ - btintel_set_msft_opcode(hdev, -@@ -2371,7 +2393,8 @@ static int btintel_setup_combined(struct hci_dev *hdev) - default: - bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", - INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); -- return -EINVAL; -+ err = -EINVAL; -+ break; - } - - exit_error: -@@ -2399,9 +2422,10 @@ static int btintel_shutdown_combined(struct hci_dev *hdev) - - /* Some platforms have an issue with BT LED when the interface is - * down or BT radio is turned off, which takes 5 seconds to BT LED -- * goes off. This command turns off the BT LED immediately. -+ * goes off. As a workaround, sends HCI_Intel_SW_RFKILL to put the -+ * device in the RFKILL ON state which turns off the BT LED immediately. - */ -- if (btintel_test_flag(hdev, INTEL_BROKEN_LED)) { -+ if (btintel_test_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED)) { - skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - ret = PTR_ERR(skb); -diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h -index aa64072bbe68d..2b85ebf633211 100644 ---- a/drivers/bluetooth/btintel.h -+++ b/drivers/bluetooth/btintel.h -@@ -145,8 +145,9 @@ enum { - INTEL_FIRMWARE_FAILED, - INTEL_BOOTING, - INTEL_BROKEN_INITIAL_NCMD, -- INTEL_BROKEN_LED, -+ INTEL_BROKEN_SHUTDOWN_LED, - INTEL_ROM_LEGACY, -+ INTEL_ROM_LEGACY_NO_WBS_SUPPORT, - - __INTEL_NUM_FLAGS, - }; -diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c -index 9872ef18f9fea..d66e4df171d20 100644 ---- a/drivers/bluetooth/btmtksdio.c -+++ b/drivers/bluetooth/btmtksdio.c -@@ -331,6 +331,7 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) - { - struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); - struct hci_event_hdr *hdr = (void *)skb->data; -+ u8 evt = hdr->evt; - int err; - - /* Fix up the vendor event id with 0xff for vendor specific instead -@@ -355,7 +356,7 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) - if (err < 0) - goto err_free_skb; - -- if (hdr->evt == HCI_EV_VENDOR) { -+ if (evt == HCI_EV_VENDOR) { - if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, - &bdev->tx_state)) { - /* Barrier to sync with other CPUs */ -@@ -981,6 +982,8 @@ static int btmtksdio_probe(struct sdio_func *func, - hdev->manufacturer = 70; - set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); - -+ sdio_set_drvdata(func, bdev); -+ - err = hci_register_dev(hdev); - if (err < 0) { - dev_err(&func->dev, "Can't register HCI device\n"); -@@ -988,8 +991,6 @@ static int btmtksdio_probe(struct sdio_func *func, - return err; - } - -- sdio_set_drvdata(func, bdev); -- - /* pm_runtime_enable would be done after the firmware is being - * downloaded because the core layer probably already enables - * runtime PM for this func such as the case host->caps & -@@ -1042,6 +1043,8 @@ static int btmtksdio_runtime_suspend(struct device *dev) - if (!bdev) - return 0; - -+ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); -+ - sdio_claim_host(bdev->func); - - sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err); -diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c -index e9d91d7c0db48..9ba22b13b4fa0 100644 ---- a/drivers/bluetooth/btmtkuart.c -+++ b/drivers/bluetooth/btmtkuart.c -@@ -158,8 +158,10 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, - int err; - - hlen = sizeof(*hdr) + wmt_params->dlen; -- if (hlen > 255) -- return -EINVAL; -+ if (hlen > 255) { -+ err = -EINVAL; -+ goto err_free_skb; -+ } - - hdr = (struct mtk_wmt_hdr *)&wc; - hdr->dir = 1; -@@ -173,7 +175,7 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, - err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); - if (err < 0) { - clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); -- return err; -+ goto err_free_skb; - } - - /* The vendor specific WMT commands are all answered by a vendor -@@ -190,13 +192,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, - if (err == -EINTR) { - bt_dev_err(hdev, "Execution of wmt command interrupted"); - clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); -- return err; -+ goto err_free_skb; - } - - if (err) { - bt_dev_err(hdev, "Execution of wmt command timed out"); - clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); -- return -ETIMEDOUT; -+ err = -ETIMEDOUT; -+ goto err_free_skb; - } - - /* Parse and handle the return WMT event */ -diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c -index 2acb719e596f5..11c7e04bf3947 100644 ---- a/drivers/bluetooth/btqcomsmd.c -+++ b/drivers/bluetooth/btqcomsmd.c -@@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev) - return 0; - } - -+static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) -+{ -+ int ret; -+ -+ ret = qca_set_bdaddr_rome(hdev, bdaddr); -+ if (ret) -+ return ret; -+ -+ /* The firmware stops responding for a while after setting the bdaddr, -+ * causing timeouts for subsequent commands. Sleep a bit to avoid this. -+ */ -+ usleep_range(1000, 10000); -+ return 0; -+} -+ - static int btqcomsmd_probe(struct platform_device *pdev) - { - struct btqcomsmd *btq; -@@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev) - hdev->close = btqcomsmd_close; - hdev->send = btqcomsmd_send; - hdev->setup = btqcomsmd_setup; -- hdev->set_bdaddr = qca_set_bdaddr_rome; -+ hdev->set_bdaddr = btqcomsmd_set_bdaddr; - - ret = hci_register_dev(hdev); - if (ret < 0) -diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c -index 199e8f7d426d9..2e4ac39dd9751 100644 ---- a/drivers/bluetooth/btsdio.c -+++ b/drivers/bluetooth/btsdio.c -@@ -355,6 +355,7 @@ static void btsdio_remove(struct sdio_func *func) - if (!data) - return; - -+ cancel_work_sync(&data->work); - hdev = data->hdev; - - sdio_set_drvdata(func, NULL); -diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c -index 60d2fce59a71d..84a42348b3bcb 100644 ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -59,7 +59,9 @@ static struct usb_driver btusb_driver; - #define BTUSB_WIDEBAND_SPEECH 0x400000 - #define BTUSB_VALID_LE_STATES 0x800000 - #define BTUSB_QCA_WCN6855 0x1000000 -+#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED 0x2000000 - #define BTUSB_INTEL_BROKEN_INITIAL_NCMD 0x4000000 -+#define BTUSB_INTEL_NO_WBS_SUPPORT 0x8000000 - - static const struct usb_device_id btusb_table[] = { - /* Generic Bluetooth USB device */ -@@ -295,6 +297,24 @@ static const struct usb_device_id blacklist_table[] = { - { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, - - /* Broadcom BCM2035 */ - { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, -@@ -365,16 +385,25 @@ static const struct usb_device_id blacklist_table[] = { - { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED }, - { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, - { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED | -- BTUSB_INTEL_BROKEN_INITIAL_NCMD }, -- { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL_COMBINED }, -+ BTUSB_INTEL_NO_WBS_SUPPORT | -+ BTUSB_INTEL_BROKEN_INITIAL_NCMD | -+ BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, -+ { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL_COMBINED | -+ BTUSB_INTEL_NO_WBS_SUPPORT | -+ BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, - { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_COMBINED }, -- { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL_COMBINED }, -+ { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL_COMBINED | -+ BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, - { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_COMBINED }, - - /* Other Intel Bluetooth devices */ - { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), - .driver_info = BTUSB_IGNORE }, - -+ /* Realtek 8821CE Bluetooth devices */ -+ { USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ - /* Realtek 8822CE Bluetooth devices */ - { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK | - BTUSB_WIDEBAND_SPEECH }, -@@ -382,8 +411,30 @@ static const struct usb_device_id blacklist_table[] = { - BTUSB_WIDEBAND_SPEECH }, - - /* Realtek 8852AE Bluetooth devices */ -+ { USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, - { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK | - BTUSB_WIDEBAND_SPEECH }, -+ { USB_DEVICE(0x0bda, 0x385a), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ -+ /* Realtek 8852CE Bluetooth devices */ -+ { USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ { USB_DEVICE(0x04c5, 0x1675), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ { USB_DEVICE(0x0cb8, 0xc558), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ { USB_DEVICE(0x13d3, 0x3587), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ { USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, - - /* Realtek Bluetooth devices */ - { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), -@@ -410,10 +461,27 @@ static const struct usb_device_id blacklist_table[] = { - { USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, - { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | - BTUSB_WIDEBAND_SPEECH | - BTUSB_VALID_LE_STATES }, - -+ /* MediaTek MT7922A Bluetooth devices */ -+ { USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, -+ { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | -+ BTUSB_WIDEBAND_SPEECH | -+ BTUSB_VALID_LE_STATES }, -+ - /* Additional Realtek 8723AE Bluetooth devices */ - { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, - { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK }, -@@ -433,9 +501,15 @@ static const struct usb_device_id blacklist_table[] = { - { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK }, - { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, - -+ /* Additional Realtek 8761B Bluetooth devices */ -+ { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, -+ - /* Additional Realtek 8761BU Bluetooth devices */ - { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK | - BTUSB_WIDEBAND_SPEECH }, -+ { USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK | -+ BTUSB_WIDEBAND_SPEECH }, - - /* Additional Realtek 8821AE Bluetooth devices */ - { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, -@@ -451,10 +525,6 @@ static const struct usb_device_id blacklist_table[] = { - /* Additional Realtek 8822CE Bluetooth devices */ - { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK | - BTUSB_WIDEBAND_SPEECH }, -- /* Bluetooth component of Realtek 8852AE device */ -- { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK | -- BTUSB_WIDEBAND_SPEECH }, -- - { USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK | - BTUSB_WIDEBAND_SPEECH }, - { USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK | -@@ -672,13 +742,13 @@ static inline void btusb_free_frags(struct btusb_data *data) - - spin_lock_irqsave(&data->rxlock, flags); - -- kfree_skb(data->evt_skb); -+ dev_kfree_skb_irq(data->evt_skb); - data->evt_skb = NULL; - -- kfree_skb(data->acl_skb); -+ dev_kfree_skb_irq(data->acl_skb); - data->acl_skb = NULL; - -- kfree_skb(data->sco_skb); -+ dev_kfree_skb_irq(data->sco_skb); - data->sco_skb = NULL; - - spin_unlock_irqrestore(&data->rxlock, flags); -@@ -1686,7 +1756,7 @@ static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts) - * alternate setting. - */ - spin_lock_irqsave(&data->rxlock, flags); -- kfree_skb(data->sco_skb); -+ dev_kfree_skb_irq(data->sco_skb); - data->sco_skb = NULL; - spin_unlock_irqrestore(&data->rxlock, flags); - -@@ -1838,6 +1908,11 @@ static int btusb_setup_csr(struct hci_dev *hdev) - - rp = (struct hci_rp_read_local_version *)skb->data; - -+ bt_dev_info(hdev, "CSR: Setting up dongle with HCI ver=%u rev=%04x; LMP ver=%u subver=%04x; manufacturer=%u", -+ le16_to_cpu(rp->hci_ver), le16_to_cpu(rp->hci_rev), -+ le16_to_cpu(rp->lmp_ver), le16_to_cpu(rp->lmp_subver), -+ le16_to_cpu(rp->manufacturer)); -+ - /* Detect a wide host of Chinese controllers that aren't CSR. - * - * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891 -@@ -2217,6 +2292,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) - skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); - if (!skb) { - hdev->stat.err_rx++; -+ kfree(urb->setup_packet); - return; - } - -@@ -2237,6 +2313,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) - data->evt_skb = skb_clone(skb, GFP_ATOMIC); - if (!data->evt_skb) { - kfree_skb(skb); -+ kfree(urb->setup_packet); - return; - } - } -@@ -2245,6 +2322,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) - if (err < 0) { - kfree_skb(data->evt_skb); - data->evt_skb = NULL; -+ kfree(urb->setup_packet); - return; - } - -@@ -2255,6 +2333,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) - wake_up_bit(&data->flags, - BTUSB_TX_WAIT_VND_EVT); - } -+ kfree(urb->setup_packet); - return; - } else if (urb->status == -ENOENT) { - /* Avoid suspend failed when usb_kill_urb */ -@@ -2275,6 +2354,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) - usb_anchor_urb(urb, &data->ctrl_anchor); - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) { -+ kfree(urb->setup_packet); - /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected - */ -@@ -2367,15 +2447,29 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, - - set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - -+ /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, -+ * it needs constantly polling control pipe until the host received the -+ * WMT event, thus, we should require to specifically acquire PM counter -+ * on the USB to prevent the interface from entering auto suspended -+ * while WMT cmd/event in progress. -+ */ -+ err = usb_autopm_get_interface(data->intf); -+ if (err < 0) -+ goto err_free_wc; -+ - err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); - - if (err < 0) { - clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); -+ usb_autopm_put_interface(data->intf); - goto err_free_wc; - } - - /* Submit control IN URB on demand to process the WMT event */ - err = btusb_mtk_submit_wmt_recv_urb(hdev); -+ -+ usb_autopm_put_interface(data->intf); -+ - if (err < 0) - goto err_free_wc; - -@@ -2515,6 +2609,7 @@ static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwnam - } else { - bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)", - status); -+ err = -EIO; - goto err_release_fw; - } - } -@@ -2804,11 +2899,16 @@ static int btusb_mtk_setup(struct hci_dev *hdev) - case 0x7668: - fwname = FIRMWARE_MT7668; - break; -+ case 0x7922: - case 0x7961: - snprintf(fw_bin_name, sizeof(fw_bin_name), - "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", - dev_id & 0xffff, (fw_version & 0xff) + 1); - err = btusb_mtk_setup_firmware_79xx(hdev, fw_bin_name); -+ if (err < 0) { -+ bt_dev_err(hdev, "Failed to set up firmware (%d)", err); -+ return err; -+ } - - /* It's Device EndPoint Reset Option Register */ - btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); -@@ -2828,6 +2928,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev) - } - - hci_set_msft_opcode(hdev, 0xFD30); -+ hci_set_aosp_capable(hdev); - goto done; - default: - bt_dev_err(hdev, "Unsupported hardware variant (%08x)", -@@ -3806,8 +3907,14 @@ static int btusb_probe(struct usb_interface *intf, - hdev->send = btusb_send_frame_intel; - hdev->cmd_timeout = btusb_intel_cmd_timeout; - -+ if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT) -+ btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT); -+ - if (id->driver_info & BTUSB_INTEL_BROKEN_INITIAL_NCMD) - btintel_set_flag(hdev, INTEL_BROKEN_INITIAL_NCMD); -+ -+ if (id->driver_info & BTUSB_INTEL_BROKEN_SHUTDOWN_LED) -+ btintel_set_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED); - } - - if (id->driver_info & BTUSB_MARVELL) -diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c -index ef54afa293574..cf622e4596055 100644 ---- a/drivers/bluetooth/hci_bcm.c -+++ b/drivers/bluetooth/hci_bcm.c -@@ -1188,7 +1188,12 @@ static int bcm_probe(struct platform_device *pdev) - return -ENOMEM; - - dev->dev = &pdev->dev; -- dev->irq = platform_get_irq(pdev, 0); -+ -+ ret = platform_get_irq(pdev, 0); -+ if (ret < 0) -+ return ret; -+ -+ dev->irq = ret; - - /* Initialize routing field to an unused value */ - dev->pcm_int_params[0] = 0xff; -@@ -1510,8 +1515,10 @@ static const struct of_device_id bcm_bluetooth_of_match[] = { - { .compatible = "brcm,bcm4345c5" }, - { .compatible = "brcm,bcm4330-bt" }, - { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data }, -+ { .compatible = "brcm,bcm4349-bt", .data = &bcm43438_device_data }, - { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data }, - { .compatible = "brcm,bcm4335a0" }, -+ { .compatible = "infineon,cyw55572-bt" }, - { }, - }; - MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match); -diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c -index cf4a560958173..8055f63603f45 100644 ---- a/drivers/bluetooth/hci_bcsp.c -+++ b/drivers/bluetooth/hci_bcsp.c -@@ -378,7 +378,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp) - i++; - - __skb_unlink(skb, &bcsp->unack); -- kfree_skb(skb); -+ dev_kfree_skb_irq(skb); - } - - if (skb_queue_empty(&bcsp->unack)) -diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c -index 0c0dedece59c5..1363b21c81b73 100644 ---- a/drivers/bluetooth/hci_h5.c -+++ b/drivers/bluetooth/hci_h5.c -@@ -313,7 +313,7 @@ static void h5_pkt_cull(struct h5 *h5) - break; - - __skb_unlink(skb, &h5->unack); -- kfree_skb(skb); -+ dev_kfree_skb_irq(skb); - } - - if (skb_queue_empty(&h5->unack)) -@@ -587,9 +587,11 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count) - count -= processed; - } - -- pm_runtime_get(&hu->serdev->dev); -- pm_runtime_mark_last_busy(&hu->serdev->dev); -- pm_runtime_put_autosuspend(&hu->serdev->dev); -+ if (hu->serdev) { -+ pm_runtime_get(&hu->serdev->dev); -+ pm_runtime_mark_last_busy(&hu->serdev->dev); -+ pm_runtime_put_autosuspend(&hu->serdev->dev); -+ } - - return 0; - } -@@ -627,9 +629,11 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb) - break; - } - -- pm_runtime_get_sync(&hu->serdev->dev); -- pm_runtime_mark_last_busy(&hu->serdev->dev); -- pm_runtime_put_autosuspend(&hu->serdev->dev); -+ if (hu->serdev) { -+ pm_runtime_get_sync(&hu->serdev->dev); -+ pm_runtime_mark_last_busy(&hu->serdev->dev); -+ pm_runtime_put_autosuspend(&hu->serdev->dev); -+ } - - return 0; - } -@@ -846,6 +850,8 @@ static int h5_serdev_probe(struct serdev_device *serdev) - h5->vnd = data->vnd; - } - -+ if (data->driver_info & H5_INFO_WAKEUP_DISABLE) -+ set_bit(H5_WAKEUP_DISABLE, &h5->flags); - - h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(h5->enable_gpio)) -@@ -860,9 +866,6 @@ static int h5_serdev_probe(struct serdev_device *serdev) - if (err) - return err; - -- if (data->driver_info & H5_INFO_WAKEUP_DISABLE) -- set_bit(H5_WAKEUP_DISABLE, &h5->flags); -- - return 0; - } - -@@ -962,11 +965,13 @@ static void h5_btrtl_open(struct h5 *h5) - serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN); - serdev_device_set_baudrate(h5->hu->serdev, 115200); - -- pm_runtime_set_active(&h5->hu->serdev->dev); -- pm_runtime_use_autosuspend(&h5->hu->serdev->dev); -- pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev, -- SUSPEND_TIMEOUT_MS); -- pm_runtime_enable(&h5->hu->serdev->dev); -+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) { -+ pm_runtime_set_active(&h5->hu->serdev->dev); -+ pm_runtime_use_autosuspend(&h5->hu->serdev->dev); -+ pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev, -+ SUSPEND_TIMEOUT_MS); -+ pm_runtime_enable(&h5->hu->serdev->dev); -+ } - - /* The controller needs up to 500ms to wakeup */ - gpiod_set_value_cansleep(h5->enable_gpio, 1); -@@ -976,7 +981,8 @@ static void h5_btrtl_open(struct h5 *h5) - - static void h5_btrtl_close(struct h5 *h5) - { -- pm_runtime_disable(&h5->hu->serdev->dev); -+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) -+ pm_runtime_disable(&h5->hu->serdev->dev); - - gpiod_set_value_cansleep(h5->device_wake_gpio, 0); - gpiod_set_value_cansleep(h5->enable_gpio, 0); -diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c -index 7249b91d9b91a..78afb9a348e70 100644 ---- a/drivers/bluetooth/hci_intel.c -+++ b/drivers/bluetooth/hci_intel.c -@@ -1217,7 +1217,11 @@ static struct platform_driver intel_driver = { - - int __init intel_init(void) - { -- platform_driver_register(&intel_driver); -+ int err; -+ -+ err = platform_driver_register(&intel_driver); -+ if (err) -+ return err; - - return hci_uart_register_proto(&intel_proto); - } -diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c -index 5ed2cfa7da1d9..2d960a5e36793 100644 ---- a/drivers/bluetooth/hci_ldisc.c -+++ b/drivers/bluetooth/hci_ldisc.c -@@ -490,6 +490,11 @@ static int hci_uart_tty_open(struct tty_struct *tty) - BT_ERR("Can't allocate control structure"); - return -ENFILE; - } -+ if (percpu_init_rwsem(&hu->proto_lock)) { -+ BT_ERR("Can't allocate semaphore structure"); -+ kfree(hu); -+ return -ENOMEM; -+ } - - tty->disc_data = hu; - hu->tty = tty; -@@ -502,8 +507,6 @@ static int hci_uart_tty_open(struct tty_struct *tty) - INIT_WORK(&hu->init_ready, hci_uart_init_work); - INIT_WORK(&hu->write_work, hci_uart_write_work); - -- percpu_init_rwsem(&hu->proto_lock); -- - /* Flush any pending characters in the driver */ - tty_driver_flush_buffer(tty); - -diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c -index eb1e736efeebb..e4e5b26e2c33b 100644 ---- a/drivers/bluetooth/hci_ll.c -+++ b/drivers/bluetooth/hci_ll.c -@@ -345,7 +345,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) - default: - BT_ERR("illegal hcill state: %ld (losing packet)", - ll->hcill_state); -- kfree_skb(skb); -+ dev_kfree_skb_irq(skb); - break; - } - -diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c -index 05f7f6de6863d..97da0b2bfd17e 100644 ---- a/drivers/bluetooth/hci_nokia.c -+++ b/drivers/bluetooth/hci_nokia.c -@@ -734,7 +734,11 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev) - return err; - } - -- clk_prepare_enable(sysclk); -+ err = clk_prepare_enable(sysclk); -+ if (err) { -+ dev_err(dev, "could not enable sysclk: %d", err); -+ return err; -+ } - btdev->sysclk_speed = clk_get_rate(sysclk); - clk_disable_unprepare(sysclk); - -diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c -index 53deea2eb7b4d..56b4b7248483a 100644 ---- a/drivers/bluetooth/hci_qca.c -+++ b/drivers/bluetooth/hci_qca.c -@@ -78,7 +78,8 @@ enum qca_flags { - QCA_HW_ERROR_EVENT, - QCA_SSR_TRIGGERED, - QCA_BT_OFF, -- QCA_ROM_FW -+ QCA_ROM_FW, -+ QCA_DEBUGFS_CREATED, - }; - - enum qca_capabilities { -@@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_dev *hdev) - if (!hdev->debugfs) - return; - -+ if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) -+ return; -+ - ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); - - /* read only */ -@@ -696,9 +700,9 @@ static int qca_close(struct hci_uart *hu) - skb_queue_purge(&qca->tx_wait_q); - skb_queue_purge(&qca->txq); - skb_queue_purge(&qca->rx_memdump_q); -- del_timer(&qca->tx_idle_timer); -- del_timer(&qca->wake_retrans_timer); - destroy_workqueue(qca->workqueue); -+ del_timer_sync(&qca->tx_idle_timer); -+ del_timer_sync(&qca->wake_retrans_timer); - qca->hu = NULL; - - kfree_skb(qca->rx_skb); -@@ -912,7 +916,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) - default: - BT_ERR("Illegal tx state: %d (losing packet)", - qca->tx_ibs_state); -- kfree_skb(skb); -+ dev_kfree_skb_irq(skb); - break; - } - -@@ -1582,10 +1586,11 @@ static bool qca_prevent_wake(struct hci_dev *hdev) - struct hci_uart *hu = hci_get_drvdata(hdev); - bool wakeup; - -- /* UART driver handles the interrupt from BT SoC.So we need to use -- * device handle of UART driver to get the status of device may wakeup. -+ /* BT SoC attached through the serial bus is handled by the serdev driver. -+ * So we need to use the device handle of the serdev driver to get the -+ * status of device may wakeup. - */ -- wakeup = device_may_wakeup(hu->serdev->ctrl->dev.parent); -+ wakeup = device_may_wakeup(&hu->serdev->ctrl->dev); - bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup); - - return !wakeup; -@@ -1927,6 +1932,9 @@ static int qca_power_off(struct hci_dev *hdev) - hu->hdev->hw_error = NULL; - hu->hdev->cmd_timeout = NULL; - -+ del_timer_sync(&qca->wake_retrans_timer); -+ del_timer_sync(&qca->tx_idle_timer); -+ - /* Stop sending shutdown command if soc crashes. */ - if (soc_type != QCA_ROME - && qca->memdump_state == QCA_MEMDUMP_IDLE) { -@@ -2055,14 +2063,14 @@ static int qca_serdev_probe(struct serdev_device *serdev) - - qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", - GPIOD_OUT_LOW); -- if (!qcadev->bt_en && data->soc_type == QCA_WCN6750) { -+ if (IS_ERR_OR_NULL(qcadev->bt_en) && data->soc_type == QCA_WCN6750) { - dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n"); - power_ctrl_enabled = false; - } - - qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl", - GPIOD_IN); -- if (!qcadev->sw_ctrl && data->soc_type == QCA_WCN6750) -+ if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && data->soc_type == QCA_WCN6750) - dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n"); - - qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); -@@ -2084,7 +2092,7 @@ static int qca_serdev_probe(struct serdev_device *serdev) - - qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", - GPIOD_OUT_LOW); -- if (!qcadev->bt_en) { -+ if (IS_ERR_OR_NULL(qcadev->bt_en)) { - dev_warn(&serdev->dev, "failed to acquire enable gpio\n"); - power_ctrl_enabled = false; - } -@@ -2153,10 +2161,17 @@ static void qca_serdev_shutdown(struct device *dev) - int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); - struct serdev_device *serdev = to_serdev_device(dev); - struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); -+ struct hci_uart *hu = &qcadev->serdev_hu; -+ struct hci_dev *hdev = hu->hdev; -+ struct qca_data *qca = hu->priv; - const u8 ibs_wake_cmd[] = { 0xFD }; - const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 }; - - if (qcadev->btsoc_type == QCA_QCA6390) { -+ if (test_bit(QCA_BT_OFF, &qca->flags) || -+ !test_bit(HCI_RUNNING, &hdev->flags)) -+ return; -+ - serdev_device_write_flush(serdev); - ret = serdev_device_write_buf(serdev, ibs_wake_cmd, - sizeof(ibs_wake_cmd)); -diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c -index 3b00d82d36cf7..649d112eea787 100644 ---- a/drivers/bluetooth/hci_serdev.c -+++ b/drivers/bluetooth/hci_serdev.c -@@ -301,9 +301,12 @@ int hci_uart_register_device(struct hci_uart *hu, - - serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops); - -+ if (percpu_init_rwsem(&hu->proto_lock)) -+ return -ENOMEM; -+ - err = serdev_device_open(hu->serdev); - if (err) -- return err; -+ goto err_rwsem; - - err = p->open(hu); - if (err) -@@ -327,7 +330,6 @@ int hci_uart_register_device(struct hci_uart *hu, - - INIT_WORK(&hu->init_ready, hci_uart_init_work); - INIT_WORK(&hu->write_work, hci_uart_write_work); -- percpu_init_rwsem(&hu->proto_lock); - - /* Only when vendor specific setup callback is provided, consider - * the manufacturer information valid. This avoids filling in the -@@ -377,6 +379,8 @@ err_alloc: - p->close(hu); - err_open: - serdev_device_close(hu->serdev); -+err_rwsem: -+ percpu_free_rwsem(&hu->proto_lock); - return err; - } - EXPORT_SYMBOL_GPL(hci_uart_register_device); -@@ -398,5 +402,6 @@ void hci_uart_unregister_device(struct hci_uart *hu) - clear_bit(HCI_UART_PROTO_READY, &hu->flags); - serdev_device_close(hu->serdev); - } -+ percpu_free_rwsem(&hu->proto_lock); - } - EXPORT_SYMBOL_GPL(hci_uart_unregister_device); -diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c -index 8ab26dec5f6e8..8469f9876dd26 100644 ---- a/drivers/bluetooth/hci_vhci.c -+++ b/drivers/bluetooth/hci_vhci.c -@@ -121,6 +121,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) - if (opcode & 0x80) - set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); - -+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); -+ - if (hci_register_dev(hdev) < 0) { - BT_ERR("Can't register HCI device"); - hci_free_dev(hdev); -diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c -index 57908ce4fae85..612f10456849f 100644 ---- a/drivers/bluetooth/virtio_bt.c -+++ b/drivers/bluetooth/virtio_bt.c -@@ -202,6 +202,9 @@ static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb) - hci_skb_pkt_type(skb) = pkt_type; - hci_recv_frame(vbt->hdev, skb); - break; -+ default: -+ kfree_skb(skb); -+ break; - } - } - -@@ -216,7 +219,7 @@ static void virtbt_rx_work(struct work_struct *work) - if (!skb) - return; - -- skb->len = len; -+ skb_put(skb, len); - virtbt_rx_handle(vbt, skb); - - if (virtbt_add_inbuf(vbt) < 0) -diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile -index 52c2f35a26a99..16da51130d1a1 100644 ---- a/drivers/bus/Makefile -+++ b/drivers/bus/Makefile -@@ -39,4 +39,4 @@ obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o - obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o - - # MHI --obj-$(CONFIG_MHI_BUS) += mhi/ -+obj-y += mhi/ -diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c -index 8fd4a356a86ec..74593a1722fe0 100644 ---- a/drivers/bus/fsl-mc/fsl-mc-bus.c -+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c -@@ -1236,14 +1236,14 @@ error_cleanup_mc_io: - static int fsl_mc_bus_remove(struct platform_device *pdev) - { - struct fsl_mc *mc = platform_get_drvdata(pdev); -+ struct fsl_mc_io *mc_io; - - if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)) - return -EINVAL; - -+ mc_io = mc->root_mc_bus_dev->mc_io; - fsl_mc_device_remove(mc->root_mc_bus_dev); -- -- fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); -- mc->root_mc_bus_dev->mc_io = NULL; -+ fsl_destroy_mc_io(mc_io); - - bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb); - -diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c -index 378f5d62a9912..e7eaa8784fee0 100644 ---- a/drivers/bus/hisi_lpc.c -+++ b/drivers/bus/hisi_lpc.c -@@ -503,13 +503,13 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) - { - struct acpi_device *adev = ACPI_COMPANION(hostdev); - struct acpi_device *child; -+ struct platform_device *pdev; - int ret; - - /* Only consider the children of the host */ - list_for_each_entry(child, &adev->children, node) { - const char *hid = acpi_device_hid(child); - const struct hisi_lpc_acpi_cell *cell; -- struct platform_device *pdev; - const struct resource *res; - bool found = false; - int num_res; -@@ -571,22 +571,24 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) - - ret = platform_device_add_resources(pdev, res, num_res); - if (ret) -- goto fail; -+ goto fail_put_device; - - ret = platform_device_add_data(pdev, cell->pdata, - cell->pdata_size); - if (ret) -- goto fail; -+ goto fail_put_device; - - ret = platform_device_add(pdev); - if (ret) -- goto fail; -+ goto fail_put_device; - - acpi_device_set_enumerated(child); - } - - return 0; - -+fail_put_device: -+ platform_device_put(pdev); - fail: - hisi_lpc_acpi_remove(hostdev); - return ret; -diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c -index 28bb65a5613fd..201767823edb5 100644 ---- a/drivers/bus/imx-weim.c -+++ b/drivers/bus/imx-weim.c -@@ -192,8 +192,8 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) - const struct of_device_id *of_id = of_match_device(weim_id_table, - &pdev->dev); - const struct imx_weim_devtype *devtype = of_id->data; -+ int ret = 0, have_child = 0; - struct device_node *child; -- int ret, have_child = 0; - struct cs_timing_state ts = {}; - u32 reg; - -diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c -index a4388440aca7a..972603ed06a6c 100644 ---- a/drivers/bus/intel-ixp4xx-eb.c -+++ b/drivers/bus/intel-ixp4xx-eb.c -@@ -33,7 +33,7 @@ - #define IXP4XX_EXP_TIMING_STRIDE 0x04 - #define IXP4XX_EXP_CS_EN BIT(31) - #define IXP456_EXP_PAR_EN BIT(30) /* Only on IXP45x and IXP46x */ --#define IXP4XX_EXP_T1_MASK GENMASK(28, 27) -+#define IXP4XX_EXP_T1_MASK GENMASK(29, 28) - #define IXP4XX_EXP_T1_SHIFT 28 - #define IXP4XX_EXP_T2_MASK GENMASK(27, 26) - #define IXP4XX_EXP_T2_SHIFT 26 -@@ -49,7 +49,7 @@ - #define IXP4XX_EXP_SIZE_SHIFT 10 - #define IXP4XX_EXP_CNFG_0 BIT(9) /* Always zero */ - #define IXP43X_EXP_SYNC_INTEL BIT(8) /* Only on IXP43x */ --#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x */ -+#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x, dangerous to touch on IXP42x */ - #define IXP4XX_EXP_BYTE_RD16 BIT(6) - #define IXP4XX_EXP_HRDY_POL BIT(5) /* Only on IXP42x */ - #define IXP4XX_EXP_MUX_EN BIT(4) -@@ -57,8 +57,6 @@ - #define IXP4XX_EXP_WORD BIT(2) /* Always zero */ - #define IXP4XX_EXP_WR_EN BIT(1) - #define IXP4XX_EXP_BYTE_EN BIT(0) --#define IXP42X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(8)|BIT(7)|IXP4XX_EXP_WORD) --#define IXP43X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(5)|IXP4XX_EXP_WORD) - - #define IXP4XX_EXP_CNFG0 0x20 - #define IXP4XX_EXP_CNFG0_MEM_MAP BIT(31) -@@ -252,10 +250,9 @@ static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb, - cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT; - } - -- if (eb->is_42x) -- cs_cfg &= ~IXP42X_RESERVED; - if (eb->is_43x) { -- cs_cfg &= ~IXP43X_RESERVED; -+ /* Should always be zero */ -+ cs_cfg &= ~IXP4XX_EXP_WORD; - /* - * This bit for Intel strata flash is currently unused, but let's - * report it if we find one. -diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig -index da5cd0c9fc620..4748df7f9cd58 100644 ---- a/drivers/bus/mhi/Kconfig -+++ b/drivers/bus/mhi/Kconfig -@@ -2,30 +2,7 @@ - # - # MHI bus - # --# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -+# Copyright (c) 2021, Linaro Ltd. - # - --config MHI_BUS -- tristate "Modem Host Interface (MHI) bus" -- help -- Bus driver for MHI protocol. Modem Host Interface (MHI) is a -- communication protocol used by the host processors to control -- and communicate with modem devices over a high speed peripheral -- bus or shared memory. -- --config MHI_BUS_DEBUG -- bool "Debugfs support for the MHI bus" -- depends on MHI_BUS && DEBUG_FS -- help -- Enable debugfs support for use with the MHI transport. Allows -- reading and/or modifying some values within the MHI controller -- for debug and test purposes. -- --config MHI_BUS_PCI_GENERIC -- tristate "MHI PCI controller driver" -- depends on MHI_BUS -- depends on PCI -- help -- This driver provides MHI PCI controller driver for devices such as -- Qualcomm SDX55 based PCIe modems. -- -+source "drivers/bus/mhi/host/Kconfig" -diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile -index 0a2d778d6fb42..5f5708a249f54 100644 ---- a/drivers/bus/mhi/Makefile -+++ b/drivers/bus/mhi/Makefile -@@ -1,6 +1,2 @@ --# core layer --obj-y += core/ -- --obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o --mhi_pci_generic-y += pci_generic.o -- -+# Host MHI stack -+obj-y += host/ -diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile -deleted file mode 100644 -index c3feb4130aa37..0000000000000 ---- a/drivers/bus/mhi/core/Makefile -+++ /dev/null -@@ -1,4 +0,0 @@ --obj-$(CONFIG_MHI_BUS) += mhi.o -- --mhi-y := init.o main.o pm.o boot.o --mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o -diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c -deleted file mode 100644 -index 0a972620a4030..0000000000000 ---- a/drivers/bus/mhi/core/boot.c -+++ /dev/null -@@ -1,533 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 --/* -- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -- * -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include "internal.h" -- --/* Setup RDDM vector table for RDDM transfer and program RXVEC */ --void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, -- struct image_info *img_info) --{ -- struct mhi_buf *mhi_buf = img_info->mhi_buf; -- struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; -- void __iomem *base = mhi_cntrl->bhie; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- u32 sequence_id; -- unsigned int i; -- -- for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { -- bhi_vec->dma_addr = mhi_buf->dma_addr; -- bhi_vec->size = mhi_buf->len; -- } -- -- dev_dbg(dev, "BHIe programming for RDDM\n"); -- -- mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, -- upper_32_bits(mhi_buf->dma_addr)); -- -- mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, -- lower_32_bits(mhi_buf->dma_addr)); -- -- mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); -- sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); -- -- mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, -- BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, -- sequence_id); -- -- dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n", -- &mhi_buf->dma_addr, mhi_buf->len, sequence_id); --} -- --/* Collect RDDM buffer during kernel panic */ --static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) --{ -- int ret; -- u32 rx_status; -- enum mhi_ee_type ee; -- const u32 delayus = 2000; -- u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; -- const u32 rddm_timeout_us = 200000; -- int rddm_retry = rddm_timeout_us / delayus; -- void __iomem *base = mhi_cntrl->bhie; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- -- dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n", -- to_mhi_pm_state_str(mhi_cntrl->pm_state), -- TO_MHI_STATE_STR(mhi_cntrl->dev_state), -- TO_MHI_EXEC_STR(mhi_cntrl->ee)); -- -- /* -- * This should only be executing during a kernel panic, we expect all -- * other cores to shutdown while we're collecting RDDM buffer. After -- * returning from this function, we expect the device to reset. -- * -- * Normaly, we read/write pm_state only after grabbing the -- * pm_lock, since we're in a panic, skipping it. Also there is no -- * gurantee that this state change would take effect since -- * we're setting it w/o grabbing pm_lock -- */ -- mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; -- /* update should take the effect immediately */ -- smp_wmb(); -- -- /* -- * Make sure device is not already in RDDM. In case the device asserts -- * and a kernel panic follows, device will already be in RDDM. -- * Do not trigger SYS ERR again and proceed with waiting for -- * image download completion. -- */ -- ee = mhi_get_exec_env(mhi_cntrl); -- if (ee == MHI_EE_MAX) -- goto error_exit_rddm; -- -- if (ee != MHI_EE_RDDM) { -- dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n"); -- mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); -- -- dev_dbg(dev, "Waiting for device to enter RDDM\n"); -- while (rddm_retry--) { -- ee = mhi_get_exec_env(mhi_cntrl); -- if (ee == MHI_EE_RDDM) -- break; -- -- udelay(delayus); -- } -- -- if (rddm_retry <= 0) { -- /* Hardware reset so force device to enter RDDM */ -- dev_dbg(dev, -- "Did not enter RDDM, do a host req reset\n"); -- mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, -- MHI_SOC_RESET_REQ_OFFSET, -- MHI_SOC_RESET_REQ); -- udelay(delayus); -- } -- -- ee = mhi_get_exec_env(mhi_cntrl); -- } -- -- dev_dbg(dev, -- "Waiting for RDDM image download via BHIe, current EE:%s\n", -- TO_MHI_EXEC_STR(ee)); -- -- while (retry--) { -- ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, -- BHIE_RXVECSTATUS_STATUS_BMSK, -- BHIE_RXVECSTATUS_STATUS_SHFT, -- &rx_status); -- if (ret) -- return -EIO; -- -- if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) -- return 0; -- -- udelay(delayus); -- } -- -- ee = mhi_get_exec_env(mhi_cntrl); -- ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); -- -- dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status); -- --error_exit_rddm: -- dev_err(dev, "RDDM transfer failed. Current EE: %s\n", -- TO_MHI_EXEC_STR(ee)); -- -- return -EIO; --} -- --/* Download RDDM image from device */ --int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) --{ -- void __iomem *base = mhi_cntrl->bhie; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- u32 rx_status; -- -- if (in_panic) -- return __mhi_download_rddm_in_panic(mhi_cntrl); -- -- dev_dbg(dev, "Waiting for RDDM image download via BHIe\n"); -- -- /* Wait for the image download to complete */ -- wait_event_timeout(mhi_cntrl->state_event, -- mhi_read_reg_field(mhi_cntrl, base, -- BHIE_RXVECSTATUS_OFFS, -- BHIE_RXVECSTATUS_STATUS_BMSK, -- BHIE_RXVECSTATUS_STATUS_SHFT, -- &rx_status) || rx_status, -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- -- return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; --} --EXPORT_SYMBOL_GPL(mhi_download_rddm_image); -- --static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, -- const struct mhi_buf *mhi_buf) --{ -- void __iomem *base = mhi_cntrl->bhie; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- rwlock_t *pm_lock = &mhi_cntrl->pm_lock; -- u32 tx_status, sequence_id; -- int ret; -- -- read_lock_bh(pm_lock); -- if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -- read_unlock_bh(pm_lock); -- return -EIO; -- } -- -- sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); -- dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n", -- sequence_id); -- mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, -- upper_32_bits(mhi_buf->dma_addr)); -- -- mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, -- lower_32_bits(mhi_buf->dma_addr)); -- -- mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); -- -- mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, -- BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, -- sequence_id); -- read_unlock_bh(pm_lock); -- -- /* Wait for the image download to complete */ -- ret = wait_event_timeout(mhi_cntrl->state_event, -- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || -- mhi_read_reg_field(mhi_cntrl, base, -- BHIE_TXVECSTATUS_OFFS, -- BHIE_TXVECSTATUS_STATUS_BMSK, -- BHIE_TXVECSTATUS_STATUS_SHFT, -- &tx_status) || tx_status, -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || -- tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL) -- return -EIO; -- -- return (!ret) ? -ETIMEDOUT : 0; --} -- --static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, -- dma_addr_t dma_addr, -- size_t size) --{ -- u32 tx_status, val, session_id; -- int i, ret; -- void __iomem *base = mhi_cntrl->bhi; -- rwlock_t *pm_lock = &mhi_cntrl->pm_lock; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- struct { -- char *name; -- u32 offset; -- } error_reg[] = { -- { "ERROR_CODE", BHI_ERRCODE }, -- { "ERROR_DBG1", BHI_ERRDBG1 }, -- { "ERROR_DBG2", BHI_ERRDBG2 }, -- { "ERROR_DBG3", BHI_ERRDBG3 }, -- { NULL }, -- }; -- -- read_lock_bh(pm_lock); -- if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -- read_unlock_bh(pm_lock); -- goto invalid_pm_state; -- } -- -- session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); -- dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n", -- session_id); -- mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); -- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, -- upper_32_bits(dma_addr)); -- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, -- lower_32_bits(dma_addr)); -- mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); -- mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); -- read_unlock_bh(pm_lock); -- -- /* Wait for the image download to complete */ -- ret = wait_event_timeout(mhi_cntrl->state_event, -- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || -- mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, -- BHI_STATUS_MASK, BHI_STATUS_SHIFT, -- &tx_status) || tx_status, -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) -- goto invalid_pm_state; -- -- if (tx_status == BHI_STATUS_ERROR) { -- dev_err(dev, "Image transfer failed\n"); -- read_lock_bh(pm_lock); -- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -- for (i = 0; error_reg[i].name; i++) { -- ret = mhi_read_reg(mhi_cntrl, base, -- error_reg[i].offset, &val); -- if (ret) -- break; -- dev_err(dev, "Reg: %s value: 0x%x\n", -- error_reg[i].name, val); -- } -- } -- read_unlock_bh(pm_lock); -- goto invalid_pm_state; -- } -- -- return (!ret) ? -ETIMEDOUT : 0; -- --invalid_pm_state: -- -- return -EIO; --} -- --void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, -- struct image_info *image_info) --{ -- int i; -- struct mhi_buf *mhi_buf = image_info->mhi_buf; -- -- for (i = 0; i < image_info->entries; i++, mhi_buf++) -- dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, -- mhi_buf->buf, mhi_buf->dma_addr); -- -- kfree(image_info->mhi_buf); -- kfree(image_info); --} -- --int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, -- struct image_info **image_info, -- size_t alloc_size) --{ -- size_t seg_size = mhi_cntrl->seg_len; -- int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; -- int i; -- struct image_info *img_info; -- struct mhi_buf *mhi_buf; -- -- img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); -- if (!img_info) -- return -ENOMEM; -- -- /* Allocate memory for entries */ -- img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), -- GFP_KERNEL); -- if (!img_info->mhi_buf) -- goto error_alloc_mhi_buf; -- -- /* Allocate and populate vector table */ -- mhi_buf = img_info->mhi_buf; -- for (i = 0; i < segments; i++, mhi_buf++) { -- size_t vec_size = seg_size; -- -- /* Vector table is the last entry */ -- if (i == segments - 1) -- vec_size = sizeof(struct bhi_vec_entry) * i; -- -- mhi_buf->len = vec_size; -- mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, -- vec_size, &mhi_buf->dma_addr, -- GFP_KERNEL); -- if (!mhi_buf->buf) -- goto error_alloc_segment; -- } -- -- img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; -- img_info->entries = segments; -- *image_info = img_info; -- -- return 0; -- --error_alloc_segment: -- for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) -- dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, -- mhi_buf->buf, mhi_buf->dma_addr); -- --error_alloc_mhi_buf: -- kfree(img_info); -- -- return -ENOMEM; --} -- --static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, -- const struct firmware *firmware, -- struct image_info *img_info) --{ -- size_t remainder = firmware->size; -- size_t to_cpy; -- const u8 *buf = firmware->data; -- struct mhi_buf *mhi_buf = img_info->mhi_buf; -- struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; -- -- while (remainder) { -- to_cpy = min(remainder, mhi_buf->len); -- memcpy(mhi_buf->buf, buf, to_cpy); -- bhi_vec->dma_addr = mhi_buf->dma_addr; -- bhi_vec->size = to_cpy; -- -- buf += to_cpy; -- remainder -= to_cpy; -- bhi_vec++; -- mhi_buf++; -- } --} -- --void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) --{ -- const struct firmware *firmware = NULL; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- const char *fw_name; -- void *buf; -- dma_addr_t dma_addr; -- size_t size; -- int i, ret; -- -- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -- dev_err(dev, "Device MHI is not in valid state\n"); -- return; -- } -- -- /* save hardware info from BHI */ -- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU, -- &mhi_cntrl->serial_number); -- if (ret) -- dev_err(dev, "Could not capture serial number via BHI\n"); -- -- for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) { -- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), -- &mhi_cntrl->oem_pk_hash[i]); -- if (ret) { -- dev_err(dev, "Could not capture OEM PK HASH via BHI\n"); -- break; -- } -- } -- -- /* wait for ready on pass through or any other execution environment */ -- if (mhi_cntrl->ee != MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_PBL) -- goto fw_load_ready_state; -- -- fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? -- mhi_cntrl->edl_image : mhi_cntrl->fw_image; -- -- if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || -- !mhi_cntrl->seg_len))) { -- dev_err(dev, -- "No firmware image defined or !sbl_size || !seg_len\n"); -- goto error_fw_load; -- } -- -- ret = request_firmware(&firmware, fw_name, dev); -- if (ret) { -- dev_err(dev, "Error loading firmware: %d\n", ret); -- goto error_fw_load; -- } -- -- size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; -- -- /* SBL size provided is maximum size, not necessarily the image size */ -- if (size > firmware->size) -- size = firmware->size; -- -- buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, -- GFP_KERNEL); -- if (!buf) { -- release_firmware(firmware); -- goto error_fw_load; -- } -- -- /* Download image using BHI */ -- memcpy(buf, firmware->data, size); -- ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); -- dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); -- -- /* Error or in EDL mode, we're done */ -- if (ret) { -- dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret); -- release_firmware(firmware); -- goto error_fw_load; -- } -- -- /* Wait for ready since EDL image was loaded */ -- if (fw_name == mhi_cntrl->edl_image) { -- release_firmware(firmware); -- goto fw_load_ready_state; -- } -- -- write_lock_irq(&mhi_cntrl->pm_lock); -- mhi_cntrl->dev_state = MHI_STATE_RESET; -- write_unlock_irq(&mhi_cntrl->pm_lock); -- -- /* -- * If we're doing fbc, populate vector tables while -- * device transitioning into MHI READY state -- */ -- if (mhi_cntrl->fbc_download) { -- ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, -- firmware->size); -- if (ret) { -- release_firmware(firmware); -- goto error_fw_load; -- } -- -- /* Load the firmware into BHIE vec table */ -- mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); -- } -- -- release_firmware(firmware); -- --fw_load_ready_state: -- /* Transitioning into MHI RESET->READY state */ -- ret = mhi_ready_state_transition(mhi_cntrl); -- if (ret) { -- dev_err(dev, "MHI did not enter READY state\n"); -- goto error_ready_state; -- } -- -- dev_info(dev, "Wait for device to enter SBL or Mission mode\n"); -- return; -- --error_ready_state: -- if (mhi_cntrl->fbc_download) { -- mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); -- mhi_cntrl->fbc_image = NULL; -- } -- --error_fw_load: -- mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; -- wake_up_all(&mhi_cntrl->state_event); --} -- --int mhi_download_amss_image(struct mhi_controller *mhi_cntrl) --{ -- struct image_info *image_info = mhi_cntrl->fbc_image; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- int ret; -- -- if (!image_info) -- return -EIO; -- -- ret = mhi_fw_load_bhie(mhi_cntrl, -- /* Vector table is the last entry */ -- &image_info->mhi_buf[image_info->entries - 1]); -- if (ret) { -- dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); -- mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; -- wake_up_all(&mhi_cntrl->state_event); -- } -- -- return ret; --} -diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c -deleted file mode 100644 -index 858d7516410bb..0000000000000 ---- a/drivers/bus/mhi/core/debugfs.c -+++ /dev/null -@@ -1,413 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 --/* -- * Copyright (c) 2020, The Linux Foundation. All rights reserved. -- * -- */ -- --#include --#include --#include --#include --#include --#include --#include "internal.h" -- --static int mhi_debugfs_states_show(struct seq_file *m, void *d) --{ -- struct mhi_controller *mhi_cntrl = m->private; -- -- /* states */ -- seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n", -- to_mhi_pm_state_str(mhi_cntrl->pm_state), -- mhi_is_active(mhi_cntrl) ? "Active" : "Inactive", -- TO_MHI_STATE_STR(mhi_cntrl->dev_state), -- TO_MHI_EXEC_STR(mhi_cntrl->ee), -- mhi_cntrl->wake_set ? "true" : "false"); -- -- /* counters */ -- seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2, -- mhi_cntrl->M3); -- -- seq_printf(m, " device wake: %u pending packets: %u\n", -- atomic_read(&mhi_cntrl->dev_wake), -- atomic_read(&mhi_cntrl->pending_pkts)); -- -- return 0; --} -- --static int mhi_debugfs_events_show(struct seq_file *m, void *d) --{ -- struct mhi_controller *mhi_cntrl = m->private; -- struct mhi_event *mhi_event; -- struct mhi_event_ctxt *er_ctxt; -- int i; -- -- if (!mhi_is_active(mhi_cntrl)) { -- seq_puts(m, "Device not ready\n"); -- return -ENODEV; -- } -- -- er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < mhi_cntrl->total_ev_rings; -- i++, er_ctxt++, mhi_event++) { -- struct mhi_ring *ring = &mhi_event->ring; -- -- if (mhi_event->offload_ev) { -- seq_printf(m, "Index: %d is an offload event ring\n", -- i); -- continue; -- } -- -- seq_printf(m, "Index: %d intmod count: %lu time: %lu", -- i, (er_ctxt->intmod & EV_CTX_INTMODC_MASK) >> -- EV_CTX_INTMODC_SHIFT, -- (er_ctxt->intmod & EV_CTX_INTMODT_MASK) >> -- EV_CTX_INTMODT_SHIFT); -- -- seq_printf(m, " base: 0x%0llx len: 0x%llx", er_ctxt->rbase, -- er_ctxt->rlen); -- -- seq_printf(m, " rp: 0x%llx wp: 0x%llx", er_ctxt->rp, -- er_ctxt->wp); -- -- seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp, -- &mhi_event->db_cfg.db_val); -- } -- -- return 0; --} -- --static int mhi_debugfs_channels_show(struct seq_file *m, void *d) --{ -- struct mhi_controller *mhi_cntrl = m->private; -- struct mhi_chan *mhi_chan; -- struct mhi_chan_ctxt *chan_ctxt; -- int i; -- -- if (!mhi_is_active(mhi_cntrl)) { -- seq_puts(m, "Device not ready\n"); -- return -ENODEV; -- } -- -- mhi_chan = mhi_cntrl->mhi_chan; -- chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; -- for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { -- struct mhi_ring *ring = &mhi_chan->tre_ring; -- -- if (mhi_chan->offload_ch) { -- seq_printf(m, "%s(%u) is an offload channel\n", -- mhi_chan->name, mhi_chan->chan); -- continue; -- } -- -- if (!mhi_chan->mhi_dev) -- continue; -- -- seq_printf(m, -- "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx", -- mhi_chan->name, mhi_chan->chan, (chan_ctxt->chcfg & -- CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT, -- (chan_ctxt->chcfg & CHAN_CTX_BRSTMODE_MASK) >> -- CHAN_CTX_BRSTMODE_SHIFT, (chan_ctxt->chcfg & -- CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT); -- -- seq_printf(m, " type: 0x%x event ring: %u", chan_ctxt->chtype, -- chan_ctxt->erindex); -- -- seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx", -- chan_ctxt->rbase, chan_ctxt->rlen, chan_ctxt->rp, -- chan_ctxt->wp); -- -- seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n", -- ring->rp, ring->wp, -- &mhi_chan->db_cfg.db_val); -- } -- -- return 0; --} -- --static int mhi_device_info_show(struct device *dev, void *data) --{ -- struct mhi_device *mhi_dev; -- -- if (dev->bus != &mhi_bus_type) -- return 0; -- -- mhi_dev = to_mhi_device(dev); -- -- seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u", -- mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer", -- mhi_dev->dev_wake); -- -- /* for transfer device types only */ -- if (mhi_dev->dev_type == MHI_DEVICE_XFER) -- seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)", -- mhi_dev->ul_chan_id, mhi_dev->dl_chan_id); -- -- seq_puts((struct seq_file *)data, "\n"); -- -- return 0; --} -- --static int mhi_debugfs_devices_show(struct seq_file *m, void *d) --{ -- struct mhi_controller *mhi_cntrl = m->private; -- -- if (!mhi_is_active(mhi_cntrl)) { -- seq_puts(m, "Device not ready\n"); -- return -ENODEV; -- } -- -- /* Show controller and client(s) info */ -- mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m); -- device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show); -- -- return 0; --} -- --static int mhi_debugfs_regdump_show(struct seq_file *m, void *d) --{ -- struct mhi_controller *mhi_cntrl = m->private; -- enum mhi_state state; -- enum mhi_ee_type ee; -- int i, ret = -EIO; -- u32 val; -- void __iomem *mhi_base = mhi_cntrl->regs; -- void __iomem *bhi_base = mhi_cntrl->bhi; -- void __iomem *bhie_base = mhi_cntrl->bhie; -- void __iomem *wake_db = mhi_cntrl->wake_db; -- struct { -- const char *name; -- int offset; -- void __iomem *base; -- } regs[] = { -- { "MHI_REGLEN", MHIREGLEN, mhi_base}, -- { "MHI_VER", MHIVER, mhi_base}, -- { "MHI_CFG", MHICFG, mhi_base}, -- { "MHI_CTRL", MHICTRL, mhi_base}, -- { "MHI_STATUS", MHISTATUS, mhi_base}, -- { "MHI_WAKE_DB", 0, wake_db}, -- { "BHI_EXECENV", BHI_EXECENV, bhi_base}, -- { "BHI_STATUS", BHI_STATUS, bhi_base}, -- { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, -- { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, -- { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, -- { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, -- { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, -- { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, -- { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, -- { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, -- { NULL }, -- }; -- -- if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) -- return ret; -- -- seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n", -- to_mhi_pm_state_str(mhi_cntrl->pm_state), -- TO_MHI_STATE_STR(mhi_cntrl->dev_state), -- TO_MHI_EXEC_STR(mhi_cntrl->ee)); -- -- state = mhi_get_mhi_state(mhi_cntrl); -- ee = mhi_get_exec_env(mhi_cntrl); -- seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee), -- TO_MHI_STATE_STR(state)); -- -- for (i = 0; regs[i].name; i++) { -- if (!regs[i].base) -- continue; -- ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset, -- &val); -- if (ret) -- continue; -- -- seq_printf(m, "%s: 0x%x\n", regs[i].name, val); -- } -- -- return 0; --} -- --static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d) --{ -- struct mhi_controller *mhi_cntrl = m->private; -- struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; -- -- if (!mhi_is_active(mhi_cntrl)) { -- seq_puts(m, "Device not ready\n"); -- return -ENODEV; -- } -- -- seq_printf(m, -- "Wake count: %d\n%s\n", mhi_dev->dev_wake, -- "Usage: echo get/put > device_wake to vote/unvote for M0"); -- -- return 0; --} -- --static ssize_t mhi_debugfs_device_wake_write(struct file *file, -- const char __user *ubuf, -- size_t count, loff_t *ppos) --{ -- struct seq_file *m = file->private_data; -- struct mhi_controller *mhi_cntrl = m->private; -- struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; -- char buf[16]; -- int ret = -EINVAL; -- -- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) -- return -EFAULT; -- -- if (!strncmp(buf, "get", 3)) { -- ret = mhi_device_get_sync(mhi_dev); -- } else if (!strncmp(buf, "put", 3)) { -- mhi_device_put(mhi_dev); -- ret = 0; -- } -- -- return ret ? ret : count; --} -- --static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d) --{ -- struct mhi_controller *mhi_cntrl = m->private; -- -- seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms); -- -- return 0; --} -- --static ssize_t mhi_debugfs_timeout_ms_write(struct file *file, -- const char __user *ubuf, -- size_t count, loff_t *ppos) --{ -- struct seq_file *m = file->private_data; -- struct mhi_controller *mhi_cntrl = m->private; -- u32 timeout_ms; -- -- if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms)) -- return -EINVAL; -- -- mhi_cntrl->timeout_ms = timeout_ms; -- -- return count; --} -- --static int mhi_debugfs_states_open(struct inode *inode, struct file *fp) --{ -- return single_open(fp, mhi_debugfs_states_show, inode->i_private); --} -- --static int mhi_debugfs_events_open(struct inode *inode, struct file *fp) --{ -- return single_open(fp, mhi_debugfs_events_show, inode->i_private); --} -- --static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp) --{ -- return single_open(fp, mhi_debugfs_channels_show, inode->i_private); --} -- --static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp) --{ -- return single_open(fp, mhi_debugfs_devices_show, inode->i_private); --} -- --static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp) --{ -- return single_open(fp, mhi_debugfs_regdump_show, inode->i_private); --} -- --static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp) --{ -- return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private); --} -- --static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp) --{ -- return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private); --} -- --static const struct file_operations debugfs_states_fops = { -- .open = mhi_debugfs_states_open, -- .release = single_release, -- .read = seq_read, --}; -- --static const struct file_operations debugfs_events_fops = { -- .open = mhi_debugfs_events_open, -- .release = single_release, -- .read = seq_read, --}; -- --static const struct file_operations debugfs_channels_fops = { -- .open = mhi_debugfs_channels_open, -- .release = single_release, -- .read = seq_read, --}; -- --static const struct file_operations debugfs_devices_fops = { -- .open = mhi_debugfs_devices_open, -- .release = single_release, -- .read = seq_read, --}; -- --static const struct file_operations debugfs_regdump_fops = { -- .open = mhi_debugfs_regdump_open, -- .release = single_release, -- .read = seq_read, --}; -- --static const struct file_operations debugfs_device_wake_fops = { -- .open = mhi_debugfs_device_wake_open, -- .write = mhi_debugfs_device_wake_write, -- .release = single_release, -- .read = seq_read, --}; -- --static const struct file_operations debugfs_timeout_ms_fops = { -- .open = mhi_debugfs_timeout_ms_open, -- .write = mhi_debugfs_timeout_ms_write, -- .release = single_release, -- .read = seq_read, --}; -- --static struct dentry *mhi_debugfs_root; -- --void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) --{ -- mhi_cntrl->debugfs_dentry = -- debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev), -- mhi_debugfs_root); -- -- debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry, -- mhi_cntrl, &debugfs_states_fops); -- debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry, -- mhi_cntrl, &debugfs_events_fops); -- debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry, -- mhi_cntrl, &debugfs_channels_fops); -- debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry, -- mhi_cntrl, &debugfs_devices_fops); -- debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry, -- mhi_cntrl, &debugfs_regdump_fops); -- debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry, -- mhi_cntrl, &debugfs_device_wake_fops); -- debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry, -- mhi_cntrl, &debugfs_timeout_ms_fops); --} -- --void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) --{ -- debugfs_remove_recursive(mhi_cntrl->debugfs_dentry); -- mhi_cntrl->debugfs_dentry = NULL; --} -- --void mhi_debugfs_init(void) --{ -- mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL); --} -- --void mhi_debugfs_exit(void) --{ -- debugfs_remove_recursive(mhi_debugfs_root); --} -diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c -deleted file mode 100644 -index 5aaca6d0f52b2..0000000000000 ---- a/drivers/bus/mhi/core/init.c -+++ /dev/null -@@ -1,1427 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 --/* -- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -- * -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include "internal.h" -- --static DEFINE_IDA(mhi_controller_ida); -- --const char * const mhi_ee_str[MHI_EE_MAX] = { -- [MHI_EE_PBL] = "PRIMARY BOOTLOADER", -- [MHI_EE_SBL] = "SECONDARY BOOTLOADER", -- [MHI_EE_AMSS] = "MISSION MODE", -- [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE", -- [MHI_EE_WFW] = "WLAN FIRMWARE", -- [MHI_EE_PTHRU] = "PASS THROUGH", -- [MHI_EE_EDL] = "EMERGENCY DOWNLOAD", -- [MHI_EE_FP] = "FLASH PROGRAMMER", -- [MHI_EE_DISABLE_TRANSITION] = "DISABLE", -- [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", --}; -- --const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { -- [DEV_ST_TRANSITION_PBL] = "PBL", -- [DEV_ST_TRANSITION_READY] = "READY", -- [DEV_ST_TRANSITION_SBL] = "SBL", -- [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", -- [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER", -- [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR", -- [DEV_ST_TRANSITION_DISABLE] = "DISABLE", --}; -- --const char * const mhi_state_str[MHI_STATE_MAX] = { -- [MHI_STATE_RESET] = "RESET", -- [MHI_STATE_READY] = "READY", -- [MHI_STATE_M0] = "M0", -- [MHI_STATE_M1] = "M1", -- [MHI_STATE_M2] = "M2", -- [MHI_STATE_M3] = "M3", -- [MHI_STATE_M3_FAST] = "M3 FAST", -- [MHI_STATE_BHI] = "BHI", -- [MHI_STATE_SYS_ERR] = "SYS ERROR", --}; -- --const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { -- [MHI_CH_STATE_TYPE_RESET] = "RESET", -- [MHI_CH_STATE_TYPE_STOP] = "STOP", -- [MHI_CH_STATE_TYPE_START] = "START", --}; -- --static const char * const mhi_pm_state_str[] = { -- [MHI_PM_STATE_DISABLE] = "DISABLE", -- [MHI_PM_STATE_POR] = "POWER ON RESET", -- [MHI_PM_STATE_M0] = "M0", -- [MHI_PM_STATE_M2] = "M2", -- [MHI_PM_STATE_M3_ENTER] = "M?->M3", -- [MHI_PM_STATE_M3] = "M3", -- [MHI_PM_STATE_M3_EXIT] = "M3->M0", -- [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error", -- [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect", -- [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process", -- [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", -- [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", --}; -- --const char *to_mhi_pm_state_str(enum mhi_pm_state state) --{ -- int index = find_last_bit((unsigned long *)&state, 32); -- -- if (index >= ARRAY_SIZE(mhi_pm_state_str)) -- return "Invalid State"; -- -- return mhi_pm_state_str[index]; --} -- --static ssize_t serial_number_show(struct device *dev, -- struct device_attribute *attr, -- char *buf) --{ -- struct mhi_device *mhi_dev = to_mhi_device(dev); -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- -- return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n", -- mhi_cntrl->serial_number); --} --static DEVICE_ATTR_RO(serial_number); -- --static ssize_t oem_pk_hash_show(struct device *dev, -- struct device_attribute *attr, -- char *buf) --{ -- struct mhi_device *mhi_dev = to_mhi_device(dev); -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- int i, cnt = 0; -- -- for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) -- cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, -- "OEMPKHASH[%d]: 0x%x\n", i, -- mhi_cntrl->oem_pk_hash[i]); -- -- return cnt; --} --static DEVICE_ATTR_RO(oem_pk_hash); -- --static struct attribute *mhi_dev_attrs[] = { -- &dev_attr_serial_number.attr, -- &dev_attr_oem_pk_hash.attr, -- NULL, --}; --ATTRIBUTE_GROUPS(mhi_dev); -- --/* MHI protocol requires the transfer ring to be aligned with ring length */ --static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, -- struct mhi_ring *ring, -- u64 len) --{ -- ring->alloc_size = len + (len - 1); -- ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -- &ring->dma_handle, GFP_KERNEL); -- if (!ring->pre_aligned) -- return -ENOMEM; -- -- ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); -- ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); -- -- return 0; --} -- --void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) --{ -- int i; -- struct mhi_event *mhi_event = mhi_cntrl->mhi_event; -- -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -- if (mhi_event->offload_ev) -- continue; -- -- free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); -- } -- -- free_irq(mhi_cntrl->irq[0], mhi_cntrl); --} -- --int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) --{ -- struct mhi_event *mhi_event = mhi_cntrl->mhi_event; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; -- int i, ret; -- -- /* if controller driver has set irq_flags, use it */ -- if (mhi_cntrl->irq_flags) -- irq_flags = mhi_cntrl->irq_flags; -- -- /* Setup BHI_INTVEC IRQ */ -- ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, -- mhi_intvec_threaded_handler, -- irq_flags, -- "bhi", mhi_cntrl); -- if (ret) -- return ret; -- -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -- if (mhi_event->offload_ev) -- continue; -- -- if (mhi_event->irq >= mhi_cntrl->nr_irqs) { -- dev_err(dev, "irq %d not available for event ring\n", -- mhi_event->irq); -- ret = -EINVAL; -- goto error_request; -- } -- -- ret = request_irq(mhi_cntrl->irq[mhi_event->irq], -- mhi_irq_handler, -- irq_flags, -- "mhi", mhi_event); -- if (ret) { -- dev_err(dev, "Error requesting irq:%d for ev:%d\n", -- mhi_cntrl->irq[mhi_event->irq], i); -- goto error_request; -- } -- } -- -- return 0; -- --error_request: -- for (--i, --mhi_event; i >= 0; i--, mhi_event--) { -- if (mhi_event->offload_ev) -- continue; -- -- free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); -- } -- free_irq(mhi_cntrl->irq[0], mhi_cntrl); -- -- return ret; --} -- --void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) --{ -- int i; -- struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; -- struct mhi_cmd *mhi_cmd; -- struct mhi_event *mhi_event; -- struct mhi_ring *ring; -- -- mhi_cmd = mhi_cntrl->mhi_cmd; -- for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { -- ring = &mhi_cmd->ring; -- dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -- ring->pre_aligned, ring->dma_handle); -- ring->base = NULL; -- ring->iommu_base = 0; -- } -- -- dma_free_coherent(mhi_cntrl->cntrl_dev, -- sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, -- mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); -- -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -- if (mhi_event->offload_ev) -- continue; -- -- ring = &mhi_event->ring; -- dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -- ring->pre_aligned, ring->dma_handle); -- ring->base = NULL; -- ring->iommu_base = 0; -- } -- -- dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * -- mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, -- mhi_ctxt->er_ctxt_addr); -- -- dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * -- mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, -- mhi_ctxt->chan_ctxt_addr); -- -- kfree(mhi_ctxt); -- mhi_cntrl->mhi_ctxt = NULL; --} -- --int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) --{ -- struct mhi_ctxt *mhi_ctxt; -- struct mhi_chan_ctxt *chan_ctxt; -- struct mhi_event_ctxt *er_ctxt; -- struct mhi_cmd_ctxt *cmd_ctxt; -- struct mhi_chan *mhi_chan; -- struct mhi_event *mhi_event; -- struct mhi_cmd *mhi_cmd; -- u32 tmp; -- int ret = -ENOMEM, i; -- -- atomic_set(&mhi_cntrl->dev_wake, 0); -- atomic_set(&mhi_cntrl->pending_pkts, 0); -- -- mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); -- if (!mhi_ctxt) -- return -ENOMEM; -- -- /* Setup channel ctxt */ -- mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, -- sizeof(*mhi_ctxt->chan_ctxt) * -- mhi_cntrl->max_chan, -- &mhi_ctxt->chan_ctxt_addr, -- GFP_KERNEL); -- if (!mhi_ctxt->chan_ctxt) -- goto error_alloc_chan_ctxt; -- -- mhi_chan = mhi_cntrl->mhi_chan; -- chan_ctxt = mhi_ctxt->chan_ctxt; -- for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { -- /* Skip if it is an offload channel */ -- if (mhi_chan->offload_ch) -- continue; -- -- tmp = chan_ctxt->chcfg; -- tmp &= ~CHAN_CTX_CHSTATE_MASK; -- tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); -- tmp &= ~CHAN_CTX_BRSTMODE_MASK; -- tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); -- tmp &= ~CHAN_CTX_POLLCFG_MASK; -- tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT); -- chan_ctxt->chcfg = tmp; -- -- chan_ctxt->chtype = mhi_chan->type; -- chan_ctxt->erindex = mhi_chan->er_index; -- -- mhi_chan->ch_state = MHI_CH_STATE_DISABLED; -- mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; -- } -- -- /* Setup event context */ -- mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, -- sizeof(*mhi_ctxt->er_ctxt) * -- mhi_cntrl->total_ev_rings, -- &mhi_ctxt->er_ctxt_addr, -- GFP_KERNEL); -- if (!mhi_ctxt->er_ctxt) -- goto error_alloc_er_ctxt; -- -- er_ctxt = mhi_ctxt->er_ctxt; -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, -- mhi_event++) { -- struct mhi_ring *ring = &mhi_event->ring; -- -- /* Skip if it is an offload event */ -- if (mhi_event->offload_ev) -- continue; -- -- tmp = er_ctxt->intmod; -- tmp &= ~EV_CTX_INTMODC_MASK; -- tmp &= ~EV_CTX_INTMODT_MASK; -- tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT); -- er_ctxt->intmod = tmp; -- -- er_ctxt->ertype = MHI_ER_TYPE_VALID; -- er_ctxt->msivec = mhi_event->irq; -- mhi_event->db_cfg.db_mode = true; -- -- ring->el_size = sizeof(struct mhi_tre); -- ring->len = ring->el_size * ring->elements; -- ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); -- if (ret) -- goto error_alloc_er; -- -- /* -- * If the read pointer equals to the write pointer, then the -- * ring is empty -- */ -- ring->rp = ring->wp = ring->base; -- er_ctxt->rbase = ring->iommu_base; -- er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; -- er_ctxt->rlen = ring->len; -- ring->ctxt_wp = &er_ctxt->wp; -- } -- -- /* Setup cmd context */ -- ret = -ENOMEM; -- mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, -- sizeof(*mhi_ctxt->cmd_ctxt) * -- NR_OF_CMD_RINGS, -- &mhi_ctxt->cmd_ctxt_addr, -- GFP_KERNEL); -- if (!mhi_ctxt->cmd_ctxt) -- goto error_alloc_er; -- -- mhi_cmd = mhi_cntrl->mhi_cmd; -- cmd_ctxt = mhi_ctxt->cmd_ctxt; -- for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { -- struct mhi_ring *ring = &mhi_cmd->ring; -- -- ring->el_size = sizeof(struct mhi_tre); -- ring->elements = CMD_EL_PER_RING; -- ring->len = ring->el_size * ring->elements; -- ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); -- if (ret) -- goto error_alloc_cmd; -- -- ring->rp = ring->wp = ring->base; -- cmd_ctxt->rbase = ring->iommu_base; -- cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; -- cmd_ctxt->rlen = ring->len; -- ring->ctxt_wp = &cmd_ctxt->wp; -- } -- -- mhi_cntrl->mhi_ctxt = mhi_ctxt; -- -- return 0; -- --error_alloc_cmd: -- for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { -- struct mhi_ring *ring = &mhi_cmd->ring; -- -- dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -- ring->pre_aligned, ring->dma_handle); -- } -- dma_free_coherent(mhi_cntrl->cntrl_dev, -- sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, -- mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); -- i = mhi_cntrl->total_ev_rings; -- mhi_event = mhi_cntrl->mhi_event + i; -- --error_alloc_er: -- for (--i, --mhi_event; i >= 0; i--, mhi_event--) { -- struct mhi_ring *ring = &mhi_event->ring; -- -- if (mhi_event->offload_ev) -- continue; -- -- dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -- ring->pre_aligned, ring->dma_handle); -- } -- dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * -- mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, -- mhi_ctxt->er_ctxt_addr); -- --error_alloc_er_ctxt: -- dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * -- mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, -- mhi_ctxt->chan_ctxt_addr); -- --error_alloc_chan_ctxt: -- kfree(mhi_ctxt); -- -- return ret; --} -- --int mhi_init_mmio(struct mhi_controller *mhi_cntrl) --{ -- u32 val; -- int i, ret; -- struct mhi_chan *mhi_chan; -- struct mhi_event *mhi_event; -- void __iomem *base = mhi_cntrl->regs; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- struct { -- u32 offset; -- u32 mask; -- u32 shift; -- u32 val; -- } reg_info[] = { -- { -- CCABAP_HIGHER, U32_MAX, 0, -- upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), -- }, -- { -- CCABAP_LOWER, U32_MAX, 0, -- lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), -- }, -- { -- ECABAP_HIGHER, U32_MAX, 0, -- upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), -- }, -- { -- ECABAP_LOWER, U32_MAX, 0, -- lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), -- }, -- { -- CRCBAP_HIGHER, U32_MAX, 0, -- upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), -- }, -- { -- CRCBAP_LOWER, U32_MAX, 0, -- lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), -- }, -- { -- MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, -- mhi_cntrl->total_ev_rings, -- }, -- { -- MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, -- mhi_cntrl->hw_ev_rings, -- }, -- { -- MHICTRLBASE_HIGHER, U32_MAX, 0, -- upper_32_bits(mhi_cntrl->iova_start), -- }, -- { -- MHICTRLBASE_LOWER, U32_MAX, 0, -- lower_32_bits(mhi_cntrl->iova_start), -- }, -- { -- MHIDATABASE_HIGHER, U32_MAX, 0, -- upper_32_bits(mhi_cntrl->iova_start), -- }, -- { -- MHIDATABASE_LOWER, U32_MAX, 0, -- lower_32_bits(mhi_cntrl->iova_start), -- }, -- { -- MHICTRLLIMIT_HIGHER, U32_MAX, 0, -- upper_32_bits(mhi_cntrl->iova_stop), -- }, -- { -- MHICTRLLIMIT_LOWER, U32_MAX, 0, -- lower_32_bits(mhi_cntrl->iova_stop), -- }, -- { -- MHIDATALIMIT_HIGHER, U32_MAX, 0, -- upper_32_bits(mhi_cntrl->iova_stop), -- }, -- { -- MHIDATALIMIT_LOWER, U32_MAX, 0, -- lower_32_bits(mhi_cntrl->iova_stop), -- }, -- { 0, 0, 0 } -- }; -- -- dev_dbg(dev, "Initializing MHI registers\n"); -- -- /* Read channel db offset */ -- ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, -- CHDBOFF_CHDBOFF_SHIFT, &val); -- if (ret) { -- dev_err(dev, "Unable to read CHDBOFF register\n"); -- return -EIO; -- } -- -- /* Setup wake db */ -- mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); -- mhi_cntrl->wake_set = false; -- -- /* Setup channel db address for each channel in tre_ring */ -- mhi_chan = mhi_cntrl->mhi_chan; -- for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) -- mhi_chan->tre_ring.db_addr = base + val; -- -- /* Read event ring db offset */ -- ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, -- ERDBOFF_ERDBOFF_SHIFT, &val); -- if (ret) { -- dev_err(dev, "Unable to read ERDBOFF register\n"); -- return -EIO; -- } -- -- /* Setup event db address for each ev_ring */ -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { -- if (mhi_event->offload_ev) -- continue; -- -- mhi_event->ring.db_addr = base + val; -- } -- -- /* Setup DB register for primary CMD rings */ -- mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; -- -- /* Write to MMIO registers */ -- for (i = 0; reg_info[i].offset; i++) -- mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, -- reg_info[i].mask, reg_info[i].shift, -- reg_info[i].val); -- -- return 0; --} -- --void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan) --{ -- struct mhi_ring *buf_ring; -- struct mhi_ring *tre_ring; -- struct mhi_chan_ctxt *chan_ctxt; -- u32 tmp; -- -- buf_ring = &mhi_chan->buf_ring; -- tre_ring = &mhi_chan->tre_ring; -- chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; -- -- if (!chan_ctxt->rbase) /* Already uninitialized */ -- return; -- -- dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, -- tre_ring->pre_aligned, tre_ring->dma_handle); -- vfree(buf_ring->base); -- -- buf_ring->base = tre_ring->base = NULL; -- tre_ring->ctxt_wp = NULL; -- chan_ctxt->rbase = 0; -- chan_ctxt->rlen = 0; -- chan_ctxt->rp = 0; -- chan_ctxt->wp = 0; -- -- tmp = chan_ctxt->chcfg; -- tmp &= ~CHAN_CTX_CHSTATE_MASK; -- tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); -- chan_ctxt->chcfg = tmp; -- -- /* Update to all cores */ -- smp_wmb(); --} -- --int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan) --{ -- struct mhi_ring *buf_ring; -- struct mhi_ring *tre_ring; -- struct mhi_chan_ctxt *chan_ctxt; -- u32 tmp; -- int ret; -- -- buf_ring = &mhi_chan->buf_ring; -- tre_ring = &mhi_chan->tre_ring; -- tre_ring->el_size = sizeof(struct mhi_tre); -- tre_ring->len = tre_ring->el_size * tre_ring->elements; -- chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; -- ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); -- if (ret) -- return -ENOMEM; -- -- buf_ring->el_size = sizeof(struct mhi_buf_info); -- buf_ring->len = buf_ring->el_size * buf_ring->elements; -- buf_ring->base = vzalloc(buf_ring->len); -- -- if (!buf_ring->base) { -- dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, -- tre_ring->pre_aligned, tre_ring->dma_handle); -- return -ENOMEM; -- } -- -- tmp = chan_ctxt->chcfg; -- tmp &= ~CHAN_CTX_CHSTATE_MASK; -- tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT); -- chan_ctxt->chcfg = tmp; -- -- chan_ctxt->rbase = tre_ring->iommu_base; -- chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; -- chan_ctxt->rlen = tre_ring->len; -- tre_ring->ctxt_wp = &chan_ctxt->wp; -- -- tre_ring->rp = tre_ring->wp = tre_ring->base; -- buf_ring->rp = buf_ring->wp = buf_ring->base; -- mhi_chan->db_cfg.db_mode = 1; -- -- /* Update to all cores */ -- smp_wmb(); -- -- return 0; --} -- --static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, -- const struct mhi_controller_config *config) --{ -- struct mhi_event *mhi_event; -- const struct mhi_event_config *event_cfg; -- struct device *dev = mhi_cntrl->cntrl_dev; -- int i, num; -- -- num = config->num_events; -- mhi_cntrl->total_ev_rings = num; -- mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), -- GFP_KERNEL); -- if (!mhi_cntrl->mhi_event) -- return -ENOMEM; -- -- /* Populate event ring */ -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < num; i++) { -- event_cfg = &config->event_cfg[i]; -- -- mhi_event->er_index = i; -- mhi_event->ring.elements = event_cfg->num_elements; -- mhi_event->intmod = event_cfg->irq_moderation_ms; -- mhi_event->irq = event_cfg->irq; -- -- if (event_cfg->channel != U32_MAX) { -- /* This event ring has a dedicated channel */ -- mhi_event->chan = event_cfg->channel; -- if (mhi_event->chan >= mhi_cntrl->max_chan) { -- dev_err(dev, -- "Event Ring channel not available\n"); -- goto error_ev_cfg; -- } -- -- mhi_event->mhi_chan = -- &mhi_cntrl->mhi_chan[mhi_event->chan]; -- } -- -- /* Priority is fixed to 1 for now */ -- mhi_event->priority = 1; -- -- mhi_event->db_cfg.brstmode = event_cfg->mode; -- if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) -- goto error_ev_cfg; -- -- if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) -- mhi_event->db_cfg.process_db = mhi_db_brstmode; -- else -- mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; -- -- mhi_event->data_type = event_cfg->data_type; -- -- switch (mhi_event->data_type) { -- case MHI_ER_DATA: -- mhi_event->process_event = mhi_process_data_event_ring; -- break; -- case MHI_ER_CTRL: -- mhi_event->process_event = mhi_process_ctrl_ev_ring; -- break; -- default: -- dev_err(dev, "Event Ring type not supported\n"); -- goto error_ev_cfg; -- } -- -- mhi_event->hw_ring = event_cfg->hardware_event; -- if (mhi_event->hw_ring) -- mhi_cntrl->hw_ev_rings++; -- else -- mhi_cntrl->sw_ev_rings++; -- -- mhi_event->cl_manage = event_cfg->client_managed; -- mhi_event->offload_ev = event_cfg->offload_channel; -- mhi_event++; -- } -- -- return 0; -- --error_ev_cfg: -- -- kfree(mhi_cntrl->mhi_event); -- return -EINVAL; --} -- --static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, -- const struct mhi_controller_config *config) --{ -- const struct mhi_channel_config *ch_cfg; -- struct device *dev = mhi_cntrl->cntrl_dev; -- int i; -- u32 chan; -- -- mhi_cntrl->max_chan = config->max_channels; -- -- /* -- * The allocation of MHI channels can exceed 32KB in some scenarios, -- * so to avoid any memory possible allocation failures, vzalloc is -- * used here -- */ -- mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * -- sizeof(*mhi_cntrl->mhi_chan)); -- if (!mhi_cntrl->mhi_chan) -- return -ENOMEM; -- -- INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); -- -- /* Populate channel configurations */ -- for (i = 0; i < config->num_channels; i++) { -- struct mhi_chan *mhi_chan; -- -- ch_cfg = &config->ch_cfg[i]; -- -- chan = ch_cfg->num; -- if (chan >= mhi_cntrl->max_chan) { -- dev_err(dev, "Channel %d not available\n", chan); -- goto error_chan_cfg; -- } -- -- mhi_chan = &mhi_cntrl->mhi_chan[chan]; -- mhi_chan->name = ch_cfg->name; -- mhi_chan->chan = chan; -- -- mhi_chan->tre_ring.elements = ch_cfg->num_elements; -- if (!mhi_chan->tre_ring.elements) -- goto error_chan_cfg; -- -- /* -- * For some channels, local ring length should be bigger than -- * the transfer ring length due to internal logical channels -- * in device. So host can queue much more buffers than transfer -- * ring length. Example, RSC channels should have a larger local -- * channel length than transfer ring length. -- */ -- mhi_chan->buf_ring.elements = ch_cfg->local_elements; -- if (!mhi_chan->buf_ring.elements) -- mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; -- mhi_chan->er_index = ch_cfg->event_ring; -- mhi_chan->dir = ch_cfg->dir; -- -- /* -- * For most channels, chtype is identical to channel directions. -- * So, if it is not defined then assign channel direction to -- * chtype -- */ -- mhi_chan->type = ch_cfg->type; -- if (!mhi_chan->type) -- mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; -- -- mhi_chan->ee_mask = ch_cfg->ee_mask; -- mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; -- mhi_chan->lpm_notify = ch_cfg->lpm_notify; -- mhi_chan->offload_ch = ch_cfg->offload_channel; -- mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; -- mhi_chan->pre_alloc = ch_cfg->auto_queue; -- -- /* -- * If MHI host allocates buffers, then the channel direction -- * should be DMA_FROM_DEVICE -- */ -- if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { -- dev_err(dev, "Invalid channel configuration\n"); -- goto error_chan_cfg; -- } -- -- /* -- * Bi-directional and direction less channel must be an -- * offload channel -- */ -- if ((mhi_chan->dir == DMA_BIDIRECTIONAL || -- mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { -- dev_err(dev, "Invalid channel configuration\n"); -- goto error_chan_cfg; -- } -- -- if (!mhi_chan->offload_ch) { -- mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; -- if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { -- dev_err(dev, "Invalid Door bell mode\n"); -- goto error_chan_cfg; -- } -- } -- -- if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) -- mhi_chan->db_cfg.process_db = mhi_db_brstmode; -- else -- mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; -- -- mhi_chan->configured = true; -- -- if (mhi_chan->lpm_notify) -- list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); -- } -- -- return 0; -- --error_chan_cfg: -- vfree(mhi_cntrl->mhi_chan); -- -- return -EINVAL; --} -- --static int parse_config(struct mhi_controller *mhi_cntrl, -- const struct mhi_controller_config *config) --{ -- int ret; -- -- /* Parse MHI channel configuration */ -- ret = parse_ch_cfg(mhi_cntrl, config); -- if (ret) -- return ret; -- -- /* Parse MHI event configuration */ -- ret = parse_ev_cfg(mhi_cntrl, config); -- if (ret) -- goto error_ev_cfg; -- -- mhi_cntrl->timeout_ms = config->timeout_ms; -- if (!mhi_cntrl->timeout_ms) -- mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; -- -- mhi_cntrl->bounce_buf = config->use_bounce_buf; -- mhi_cntrl->buffer_len = config->buf_len; -- if (!mhi_cntrl->buffer_len) -- mhi_cntrl->buffer_len = MHI_MAX_MTU; -- -- /* By default, host is allowed to ring DB in both M0 and M2 states */ -- mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; -- if (config->m2_no_db) -- mhi_cntrl->db_access &= ~MHI_PM_M2; -- -- return 0; -- --error_ev_cfg: -- vfree(mhi_cntrl->mhi_chan); -- -- return ret; --} -- --int mhi_register_controller(struct mhi_controller *mhi_cntrl, -- const struct mhi_controller_config *config) --{ -- struct mhi_event *mhi_event; -- struct mhi_chan *mhi_chan; -- struct mhi_cmd *mhi_cmd; -- struct mhi_device *mhi_dev; -- u32 soc_info; -- int ret, i; -- -- if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || -- !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || -- !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || -- !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || -- !mhi_cntrl->irq || !mhi_cntrl->reg_len) -- return -EINVAL; -- -- ret = parse_config(mhi_cntrl, config); -- if (ret) -- return -EINVAL; -- -- mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, -- sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); -- if (!mhi_cntrl->mhi_cmd) { -- ret = -ENOMEM; -- goto err_free_event; -- } -- -- INIT_LIST_HEAD(&mhi_cntrl->transition_list); -- mutex_init(&mhi_cntrl->pm_mutex); -- rwlock_init(&mhi_cntrl->pm_lock); -- spin_lock_init(&mhi_cntrl->transition_lock); -- spin_lock_init(&mhi_cntrl->wlock); -- INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); -- init_waitqueue_head(&mhi_cntrl->state_event); -- -- mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); -- if (!mhi_cntrl->hiprio_wq) { -- dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); -- ret = -ENOMEM; -- goto err_free_cmd; -- } -- -- mhi_cmd = mhi_cntrl->mhi_cmd; -- for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) -- spin_lock_init(&mhi_cmd->lock); -- -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -- /* Skip for offload events */ -- if (mhi_event->offload_ev) -- continue; -- -- mhi_event->mhi_cntrl = mhi_cntrl; -- spin_lock_init(&mhi_event->lock); -- if (mhi_event->data_type == MHI_ER_CTRL) -- tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, -- (ulong)mhi_event); -- else -- tasklet_init(&mhi_event->task, mhi_ev_task, -- (ulong)mhi_event); -- } -- -- mhi_chan = mhi_cntrl->mhi_chan; -- for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { -- mutex_init(&mhi_chan->mutex); -- init_completion(&mhi_chan->completion); -- rwlock_init(&mhi_chan->lock); -- -- /* used in setting bei field of TRE */ -- mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; -- mhi_chan->intmod = mhi_event->intmod; -- } -- -- if (mhi_cntrl->bounce_buf) { -- mhi_cntrl->map_single = mhi_map_single_use_bb; -- mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; -- } else { -- mhi_cntrl->map_single = mhi_map_single_no_bb; -- mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; -- } -- -- /* Read the MHI device info */ -- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, -- SOC_HW_VERSION_OFFS, &soc_info); -- if (ret) -- goto err_destroy_wq; -- -- mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >> -- SOC_HW_VERSION_FAM_NUM_SHFT; -- mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >> -- SOC_HW_VERSION_DEV_NUM_SHFT; -- mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >> -- SOC_HW_VERSION_MAJOR_VER_SHFT; -- mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >> -- SOC_HW_VERSION_MINOR_VER_SHFT; -- -- mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); -- if (mhi_cntrl->index < 0) { -- ret = mhi_cntrl->index; -- goto err_destroy_wq; -- } -- -- /* Register controller with MHI bus */ -- mhi_dev = mhi_alloc_device(mhi_cntrl); -- if (IS_ERR(mhi_dev)) { -- dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); -- ret = PTR_ERR(mhi_dev); -- goto err_ida_free; -- } -- -- mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; -- mhi_dev->mhi_cntrl = mhi_cntrl; -- dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); -- mhi_dev->name = dev_name(&mhi_dev->dev); -- -- /* Init wakeup source */ -- device_init_wakeup(&mhi_dev->dev, true); -- -- ret = device_add(&mhi_dev->dev); -- if (ret) -- goto err_release_dev; -- -- mhi_cntrl->mhi_dev = mhi_dev; -- -- mhi_create_debugfs(mhi_cntrl); -- -- return 0; -- --err_release_dev: -- put_device(&mhi_dev->dev); --err_ida_free: -- ida_free(&mhi_controller_ida, mhi_cntrl->index); --err_destroy_wq: -- destroy_workqueue(mhi_cntrl->hiprio_wq); --err_free_cmd: -- kfree(mhi_cntrl->mhi_cmd); --err_free_event: -- kfree(mhi_cntrl->mhi_event); -- vfree(mhi_cntrl->mhi_chan); -- -- return ret; --} --EXPORT_SYMBOL_GPL(mhi_register_controller); -- --void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) --{ -- struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; -- struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; -- unsigned int i; -- -- mhi_destroy_debugfs(mhi_cntrl); -- -- destroy_workqueue(mhi_cntrl->hiprio_wq); -- kfree(mhi_cntrl->mhi_cmd); -- kfree(mhi_cntrl->mhi_event); -- -- /* Drop the references to MHI devices created for channels */ -- for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { -- if (!mhi_chan->mhi_dev) -- continue; -- -- put_device(&mhi_chan->mhi_dev->dev); -- } -- vfree(mhi_cntrl->mhi_chan); -- -- device_del(&mhi_dev->dev); -- put_device(&mhi_dev->dev); -- -- ida_free(&mhi_controller_ida, mhi_cntrl->index); --} --EXPORT_SYMBOL_GPL(mhi_unregister_controller); -- --struct mhi_controller *mhi_alloc_controller(void) --{ -- struct mhi_controller *mhi_cntrl; -- -- mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL); -- -- return mhi_cntrl; --} --EXPORT_SYMBOL_GPL(mhi_alloc_controller); -- --void mhi_free_controller(struct mhi_controller *mhi_cntrl) --{ -- kfree(mhi_cntrl); --} --EXPORT_SYMBOL_GPL(mhi_free_controller); -- --int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) --{ -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- u32 bhi_off, bhie_off; -- int ret; -- -- mutex_lock(&mhi_cntrl->pm_mutex); -- -- ret = mhi_init_dev_ctxt(mhi_cntrl); -- if (ret) -- goto error_dev_ctxt; -- -- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); -- if (ret) { -- dev_err(dev, "Error getting BHI offset\n"); -- goto error_reg_offset; -- } -- -- if (bhi_off >= mhi_cntrl->reg_len) { -- dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n", -- bhi_off, mhi_cntrl->reg_len); -- ret = -EINVAL; -- goto error_reg_offset; -- } -- mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; -- -- if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { -- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, -- &bhie_off); -- if (ret) { -- dev_err(dev, "Error getting BHIE offset\n"); -- goto error_reg_offset; -- } -- -- if (bhie_off >= mhi_cntrl->reg_len) { -- dev_err(dev, -- "BHIe offset: 0x%x is out of range: 0x%zx\n", -- bhie_off, mhi_cntrl->reg_len); -- ret = -EINVAL; -- goto error_reg_offset; -- } -- mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; -- } -- -- if (mhi_cntrl->rddm_size) { -- /* -- * This controller supports RDDM, so we need to manually clear -- * BHIE RX registers since POR values are undefined. -- */ -- memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, -- 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + -- 4); -- /* -- * Allocate RDDM table for debugging purpose if specified -- */ -- mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, -- mhi_cntrl->rddm_size); -- if (mhi_cntrl->rddm_image) -- mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); -- } -- -- mutex_unlock(&mhi_cntrl->pm_mutex); -- -- return 0; -- --error_reg_offset: -- mhi_deinit_dev_ctxt(mhi_cntrl); -- --error_dev_ctxt: -- mutex_unlock(&mhi_cntrl->pm_mutex); -- -- return ret; --} --EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); -- --void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) --{ -- if (mhi_cntrl->fbc_image) { -- mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); -- mhi_cntrl->fbc_image = NULL; -- } -- -- if (mhi_cntrl->rddm_image) { -- mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); -- mhi_cntrl->rddm_image = NULL; -- } -- -- mhi_cntrl->bhi = NULL; -- mhi_cntrl->bhie = NULL; -- -- mhi_deinit_dev_ctxt(mhi_cntrl); --} --EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); -- --static void mhi_release_device(struct device *dev) --{ -- struct mhi_device *mhi_dev = to_mhi_device(dev); -- -- /* -- * We need to set the mhi_chan->mhi_dev to NULL here since the MHI -- * devices for the channels will only get created if the mhi_dev -- * associated with it is NULL. This scenario will happen during the -- * controller suspend and resume. -- */ -- if (mhi_dev->ul_chan) -- mhi_dev->ul_chan->mhi_dev = NULL; -- -- if (mhi_dev->dl_chan) -- mhi_dev->dl_chan->mhi_dev = NULL; -- -- kfree(mhi_dev); --} -- --struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) --{ -- struct mhi_device *mhi_dev; -- struct device *dev; -- -- mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); -- if (!mhi_dev) -- return ERR_PTR(-ENOMEM); -- -- dev = &mhi_dev->dev; -- device_initialize(dev); -- dev->bus = &mhi_bus_type; -- dev->release = mhi_release_device; -- -- if (mhi_cntrl->mhi_dev) { -- /* for MHI client devices, parent is the MHI controller device */ -- dev->parent = &mhi_cntrl->mhi_dev->dev; -- } else { -- /* for MHI controller device, parent is the bus device (e.g. pci device) */ -- dev->parent = mhi_cntrl->cntrl_dev; -- } -- -- mhi_dev->mhi_cntrl = mhi_cntrl; -- mhi_dev->dev_wake = 0; -- -- return mhi_dev; --} -- --static int mhi_driver_probe(struct device *dev) --{ -- struct mhi_device *mhi_dev = to_mhi_device(dev); -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- struct device_driver *drv = dev->driver; -- struct mhi_driver *mhi_drv = to_mhi_driver(drv); -- struct mhi_event *mhi_event; -- struct mhi_chan *ul_chan = mhi_dev->ul_chan; -- struct mhi_chan *dl_chan = mhi_dev->dl_chan; -- int ret; -- -- /* Bring device out of LPM */ -- ret = mhi_device_get_sync(mhi_dev); -- if (ret) -- return ret; -- -- ret = -EINVAL; -- -- if (ul_chan) { -- /* -- * If channel supports LPM notifications then status_cb should -- * be provided -- */ -- if (ul_chan->lpm_notify && !mhi_drv->status_cb) -- goto exit_probe; -- -- /* For non-offload channels then xfer_cb should be provided */ -- if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) -- goto exit_probe; -- -- ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; -- } -- -- ret = -EINVAL; -- if (dl_chan) { -- /* -- * If channel supports LPM notifications then status_cb should -- * be provided -- */ -- if (dl_chan->lpm_notify && !mhi_drv->status_cb) -- goto exit_probe; -- -- /* For non-offload channels then xfer_cb should be provided */ -- if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) -- goto exit_probe; -- -- mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; -- -- /* -- * If the channel event ring is managed by client, then -- * status_cb must be provided so that the framework can -- * notify pending data -- */ -- if (mhi_event->cl_manage && !mhi_drv->status_cb) -- goto exit_probe; -- -- dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; -- } -- -- /* Call the user provided probe function */ -- ret = mhi_drv->probe(mhi_dev, mhi_dev->id); -- if (ret) -- goto exit_probe; -- -- mhi_device_put(mhi_dev); -- -- return ret; -- --exit_probe: -- mhi_unprepare_from_transfer(mhi_dev); -- -- mhi_device_put(mhi_dev); -- -- return ret; --} -- --static int mhi_driver_remove(struct device *dev) --{ -- struct mhi_device *mhi_dev = to_mhi_device(dev); -- struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- struct mhi_chan *mhi_chan; -- enum mhi_ch_state ch_state[] = { -- MHI_CH_STATE_DISABLED, -- MHI_CH_STATE_DISABLED -- }; -- int dir; -- -- /* Skip if it is a controller device */ -- if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) -- return 0; -- -- /* Reset both channels */ -- for (dir = 0; dir < 2; dir++) { -- mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; -- -- if (!mhi_chan) -- continue; -- -- /* Wake all threads waiting for completion */ -- write_lock_irq(&mhi_chan->lock); -- mhi_chan->ccs = MHI_EV_CC_INVALID; -- complete_all(&mhi_chan->completion); -- write_unlock_irq(&mhi_chan->lock); -- -- /* Set the channel state to disabled */ -- mutex_lock(&mhi_chan->mutex); -- write_lock_irq(&mhi_chan->lock); -- ch_state[dir] = mhi_chan->ch_state; -- mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; -- write_unlock_irq(&mhi_chan->lock); -- -- /* Reset the non-offload channel */ -- if (!mhi_chan->offload_ch) -- mhi_reset_chan(mhi_cntrl, mhi_chan); -- -- mutex_unlock(&mhi_chan->mutex); -- } -- -- mhi_drv->remove(mhi_dev); -- -- /* De-init channel if it was enabled */ -- for (dir = 0; dir < 2; dir++) { -- mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; -- -- if (!mhi_chan) -- continue; -- -- mutex_lock(&mhi_chan->mutex); -- -- if ((ch_state[dir] == MHI_CH_STATE_ENABLED || -- ch_state[dir] == MHI_CH_STATE_STOP) && -- !mhi_chan->offload_ch) -- mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); -- -- mhi_chan->ch_state = MHI_CH_STATE_DISABLED; -- -- mutex_unlock(&mhi_chan->mutex); -- } -- -- while (mhi_dev->dev_wake) -- mhi_device_put(mhi_dev); -- -- return 0; --} -- --int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) --{ -- struct device_driver *driver = &mhi_drv->driver; -- -- if (!mhi_drv->probe || !mhi_drv->remove) -- return -EINVAL; -- -- driver->bus = &mhi_bus_type; -- driver->owner = owner; -- driver->probe = mhi_driver_probe; -- driver->remove = mhi_driver_remove; -- -- return driver_register(driver); --} --EXPORT_SYMBOL_GPL(__mhi_driver_register); -- --void mhi_driver_unregister(struct mhi_driver *mhi_drv) --{ -- driver_unregister(&mhi_drv->driver); --} --EXPORT_SYMBOL_GPL(mhi_driver_unregister); -- --static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) --{ -- struct mhi_device *mhi_dev = to_mhi_device(dev); -- -- return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, -- mhi_dev->name); --} -- --static int mhi_match(struct device *dev, struct device_driver *drv) --{ -- struct mhi_device *mhi_dev = to_mhi_device(dev); -- struct mhi_driver *mhi_drv = to_mhi_driver(drv); -- const struct mhi_device_id *id; -- -- /* -- * If the device is a controller type then there is no client driver -- * associated with it -- */ -- if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) -- return 0; -- -- for (id = mhi_drv->id_table; id->chan[0]; id++) -- if (!strcmp(mhi_dev->name, id->chan)) { -- mhi_dev->id = id; -- return 1; -- } -- -- return 0; --}; -- --struct bus_type mhi_bus_type = { -- .name = "mhi", -- .dev_name = "mhi", -- .match = mhi_match, -- .uevent = mhi_uevent, -- .dev_groups = mhi_dev_groups, --}; -- --static int __init mhi_init(void) --{ -- mhi_debugfs_init(); -- return bus_register(&mhi_bus_type); --} -- --static void __exit mhi_exit(void) --{ -- mhi_debugfs_exit(); -- bus_unregister(&mhi_bus_type); --} -- --postcore_initcall(mhi_init); --module_exit(mhi_exit); -- --MODULE_LICENSE("GPL v2"); --MODULE_DESCRIPTION("MHI Host Interface"); -diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h -deleted file mode 100644 -index 3a732afaf73ed..0000000000000 ---- a/drivers/bus/mhi/core/internal.h -+++ /dev/null -@@ -1,717 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 */ --/* -- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -- * -- */ -- --#ifndef _MHI_INT_H --#define _MHI_INT_H -- --#include -- --extern struct bus_type mhi_bus_type; -- --#define MHIREGLEN (0x0) --#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) --#define MHIREGLEN_MHIREGLEN_SHIFT (0) -- --#define MHIVER (0x8) --#define MHIVER_MHIVER_MASK (0xFFFFFFFF) --#define MHIVER_MHIVER_SHIFT (0) -- --#define MHICFG (0x10) --#define MHICFG_NHWER_MASK (0xFF000000) --#define MHICFG_NHWER_SHIFT (24) --#define MHICFG_NER_MASK (0xFF0000) --#define MHICFG_NER_SHIFT (16) --#define MHICFG_NHWCH_MASK (0xFF00) --#define MHICFG_NHWCH_SHIFT (8) --#define MHICFG_NCH_MASK (0xFF) --#define MHICFG_NCH_SHIFT (0) -- --#define CHDBOFF (0x18) --#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) --#define CHDBOFF_CHDBOFF_SHIFT (0) -- --#define ERDBOFF (0x20) --#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) --#define ERDBOFF_ERDBOFF_SHIFT (0) -- --#define BHIOFF (0x28) --#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) --#define BHIOFF_BHIOFF_SHIFT (0) -- --#define BHIEOFF (0x2C) --#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) --#define BHIEOFF_BHIEOFF_SHIFT (0) -- --#define DEBUGOFF (0x30) --#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) --#define DEBUGOFF_DEBUGOFF_SHIFT (0) -- --#define MHICTRL (0x38) --#define MHICTRL_MHISTATE_MASK (0x0000FF00) --#define MHICTRL_MHISTATE_SHIFT (8) --#define MHICTRL_RESET_MASK (0x2) --#define MHICTRL_RESET_SHIFT (1) -- --#define MHISTATUS (0x48) --#define MHISTATUS_MHISTATE_MASK (0x0000FF00) --#define MHISTATUS_MHISTATE_SHIFT (8) --#define MHISTATUS_SYSERR_MASK (0x4) --#define MHISTATUS_SYSERR_SHIFT (2) --#define MHISTATUS_READY_MASK (0x1) --#define MHISTATUS_READY_SHIFT (0) -- --#define CCABAP_LOWER (0x58) --#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) --#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) -- --#define CCABAP_HIGHER (0x5C) --#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) --#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) -- --#define ECABAP_LOWER (0x60) --#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) --#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) -- --#define ECABAP_HIGHER (0x64) --#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) --#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) -- --#define CRCBAP_LOWER (0x68) --#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) --#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) -- --#define CRCBAP_HIGHER (0x6C) --#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) --#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) -- --#define CRDB_LOWER (0x70) --#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) --#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) -- --#define CRDB_HIGHER (0x74) --#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) --#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) -- --#define MHICTRLBASE_LOWER (0x80) --#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) --#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) -- --#define MHICTRLBASE_HIGHER (0x84) --#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) --#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) -- --#define MHICTRLLIMIT_LOWER (0x88) --#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) --#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) -- --#define MHICTRLLIMIT_HIGHER (0x8C) --#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) --#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) -- --#define MHIDATABASE_LOWER (0x98) --#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) --#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) -- --#define MHIDATABASE_HIGHER (0x9C) --#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) --#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) -- --#define MHIDATALIMIT_LOWER (0xA0) --#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) --#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) -- --#define MHIDATALIMIT_HIGHER (0xA4) --#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) --#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) -- --/* Host request register */ --#define MHI_SOC_RESET_REQ_OFFSET (0xB0) --#define MHI_SOC_RESET_REQ BIT(0) -- --/* MHI BHI offfsets */ --#define BHI_BHIVERSION_MINOR (0x00) --#define BHI_BHIVERSION_MAJOR (0x04) --#define BHI_IMGADDR_LOW (0x08) --#define BHI_IMGADDR_HIGH (0x0C) --#define BHI_IMGSIZE (0x10) --#define BHI_RSVD1 (0x14) --#define BHI_IMGTXDB (0x18) --#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) --#define BHI_TXDB_SEQNUM_SHFT (0) --#define BHI_RSVD2 (0x1C) --#define BHI_INTVEC (0x20) --#define BHI_RSVD3 (0x24) --#define BHI_EXECENV (0x28) --#define BHI_STATUS (0x2C) --#define BHI_ERRCODE (0x30) --#define BHI_ERRDBG1 (0x34) --#define BHI_ERRDBG2 (0x38) --#define BHI_ERRDBG3 (0x3C) --#define BHI_SERIALNU (0x40) --#define BHI_SBLANTIROLLVER (0x44) --#define BHI_NUMSEG (0x48) --#define BHI_MSMHWID(n) (0x4C + (0x4 * (n))) --#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n))) --#define BHI_RSVD5 (0xC4) --#define BHI_STATUS_MASK (0xC0000000) --#define BHI_STATUS_SHIFT (30) --#define BHI_STATUS_ERROR (3) --#define BHI_STATUS_SUCCESS (2) --#define BHI_STATUS_RESET (0) -- --/* MHI BHIE offsets */ --#define BHIE_MSMSOCID_OFFS (0x0000) --#define BHIE_TXVECADDR_LOW_OFFS (0x002C) --#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) --#define BHIE_TXVECSIZE_OFFS (0x0034) --#define BHIE_TXVECDB_OFFS (0x003C) --#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) --#define BHIE_TXVECDB_SEQNUM_SHFT (0) --#define BHIE_TXVECSTATUS_OFFS (0x0044) --#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) --#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) --#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) --#define BHIE_TXVECSTATUS_STATUS_SHFT (30) --#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) --#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) --#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) --#define BHIE_RXVECADDR_LOW_OFFS (0x0060) --#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) --#define BHIE_RXVECSIZE_OFFS (0x0068) --#define BHIE_RXVECDB_OFFS (0x0070) --#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) --#define BHIE_RXVECDB_SEQNUM_SHFT (0) --#define BHIE_RXVECSTATUS_OFFS (0x0078) --#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) --#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) --#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) --#define BHIE_RXVECSTATUS_STATUS_SHFT (30) --#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) --#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) --#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) -- --#define SOC_HW_VERSION_OFFS (0x224) --#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000) --#define SOC_HW_VERSION_FAM_NUM_SHFT (28) --#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000) --#define SOC_HW_VERSION_DEV_NUM_SHFT (16) --#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00) --#define SOC_HW_VERSION_MAJOR_VER_SHFT (8) --#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF) --#define SOC_HW_VERSION_MINOR_VER_SHFT (0) -- --#define EV_CTX_RESERVED_MASK GENMASK(7, 0) --#define EV_CTX_INTMODC_MASK GENMASK(15, 8) --#define EV_CTX_INTMODC_SHIFT 8 --#define EV_CTX_INTMODT_MASK GENMASK(31, 16) --#define EV_CTX_INTMODT_SHIFT 16 --struct mhi_event_ctxt { -- __u32 intmod; -- __u32 ertype; -- __u32 msivec; -- -- __u64 rbase __packed __aligned(4); -- __u64 rlen __packed __aligned(4); -- __u64 rp __packed __aligned(4); -- __u64 wp __packed __aligned(4); --}; -- --#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) --#define CHAN_CTX_CHSTATE_SHIFT 0 --#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) --#define CHAN_CTX_BRSTMODE_SHIFT 8 --#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) --#define CHAN_CTX_POLLCFG_SHIFT 10 --#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) --struct mhi_chan_ctxt { -- __u32 chcfg; -- __u32 chtype; -- __u32 erindex; -- -- __u64 rbase __packed __aligned(4); -- __u64 rlen __packed __aligned(4); -- __u64 rp __packed __aligned(4); -- __u64 wp __packed __aligned(4); --}; -- --struct mhi_cmd_ctxt { -- __u32 reserved0; -- __u32 reserved1; -- __u32 reserved2; -- -- __u64 rbase __packed __aligned(4); -- __u64 rlen __packed __aligned(4); -- __u64 rp __packed __aligned(4); -- __u64 wp __packed __aligned(4); --}; -- --struct mhi_ctxt { -- struct mhi_event_ctxt *er_ctxt; -- struct mhi_chan_ctxt *chan_ctxt; -- struct mhi_cmd_ctxt *cmd_ctxt; -- dma_addr_t er_ctxt_addr; -- dma_addr_t chan_ctxt_addr; -- dma_addr_t cmd_ctxt_addr; --}; -- --struct mhi_tre { -- u64 ptr; -- u32 dword[2]; --}; -- --struct bhi_vec_entry { -- u64 dma_addr; -- u64 size; --}; -- --enum mhi_cmd_type { -- MHI_CMD_NOP = 1, -- MHI_CMD_RESET_CHAN = 16, -- MHI_CMD_STOP_CHAN = 17, -- MHI_CMD_START_CHAN = 18, --}; -- --/* No operation command */ --#define MHI_TRE_CMD_NOOP_PTR (0) --#define MHI_TRE_CMD_NOOP_DWORD0 (0) --#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16) -- --/* Channel reset command */ --#define MHI_TRE_CMD_RESET_PTR (0) --#define MHI_TRE_CMD_RESET_DWORD0 (0) --#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ -- (MHI_CMD_RESET_CHAN << 16)) -- --/* Channel stop command */ --#define MHI_TRE_CMD_STOP_PTR (0) --#define MHI_TRE_CMD_STOP_DWORD0 (0) --#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \ -- (MHI_CMD_STOP_CHAN << 16)) -- --/* Channel start command */ --#define MHI_TRE_CMD_START_PTR (0) --#define MHI_TRE_CMD_START_DWORD0 (0) --#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ -- (MHI_CMD_START_CHAN << 16)) -- --#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) --#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) -- --/* Event descriptor macros */ --#define MHI_TRE_EV_PTR(ptr) (ptr) --#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) --#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) --#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) --#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) --#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) --#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) --#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) --#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) --#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) --#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) --#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) --#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) --#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) --#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF) --#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF) -- --/* Transfer descriptor macros */ --#define MHI_TRE_DATA_PTR(ptr) (ptr) --#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) --#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ -- | (ieot << 9) | (ieob << 8) | chain) -- --/* RSC transfer descriptor macros */ --#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) --#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) --#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) -- --enum mhi_pkt_type { -- MHI_PKT_TYPE_INVALID = 0x0, -- MHI_PKT_TYPE_NOOP_CMD = 0x1, -- MHI_PKT_TYPE_TRANSFER = 0x2, -- MHI_PKT_TYPE_COALESCING = 0x8, -- MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, -- MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, -- MHI_PKT_TYPE_START_CHAN_CMD = 0x12, -- MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, -- MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, -- MHI_PKT_TYPE_TX_EVENT = 0x22, -- MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, -- MHI_PKT_TYPE_EE_EVENT = 0x40, -- MHI_PKT_TYPE_TSYNC_EVENT = 0x48, -- MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, -- MHI_PKT_TYPE_STALE_EVENT, /* internal event */ --}; -- --/* MHI transfer completion events */ --enum mhi_ev_ccs { -- MHI_EV_CC_INVALID = 0x0, -- MHI_EV_CC_SUCCESS = 0x1, -- MHI_EV_CC_EOT = 0x2, /* End of transfer event */ -- MHI_EV_CC_OVERFLOW = 0x3, -- MHI_EV_CC_EOB = 0x4, /* End of block event */ -- MHI_EV_CC_OOB = 0x5, /* Out of block event */ -- MHI_EV_CC_DB_MODE = 0x6, -- MHI_EV_CC_UNDEFINED_ERR = 0x10, -- MHI_EV_CC_BAD_TRE = 0x11, --}; -- --enum mhi_ch_state { -- MHI_CH_STATE_DISABLED = 0x0, -- MHI_CH_STATE_ENABLED = 0x1, -- MHI_CH_STATE_RUNNING = 0x2, -- MHI_CH_STATE_SUSPENDED = 0x3, -- MHI_CH_STATE_STOP = 0x4, -- MHI_CH_STATE_ERROR = 0x5, --}; -- --enum mhi_ch_state_type { -- MHI_CH_STATE_TYPE_RESET, -- MHI_CH_STATE_TYPE_STOP, -- MHI_CH_STATE_TYPE_START, -- MHI_CH_STATE_TYPE_MAX, --}; -- --extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; --#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \ -- "INVALID_STATE" : \ -- mhi_ch_state_type_str[(state)]) -- --#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ -- mode != MHI_DB_BRST_ENABLE) -- --extern const char * const mhi_ee_str[MHI_EE_MAX]; --#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ -- "INVALID_EE" : mhi_ee_str[ee]) -- --#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ -- ee == MHI_EE_EDL) -- --#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \ -- ee == MHI_EE_FP) -- --enum dev_st_transition { -- DEV_ST_TRANSITION_PBL, -- DEV_ST_TRANSITION_READY, -- DEV_ST_TRANSITION_SBL, -- DEV_ST_TRANSITION_MISSION_MODE, -- DEV_ST_TRANSITION_FP, -- DEV_ST_TRANSITION_SYS_ERR, -- DEV_ST_TRANSITION_DISABLE, -- DEV_ST_TRANSITION_MAX, --}; -- --extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; --#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ -- "INVALID_STATE" : dev_state_tran_str[state]) -- --extern const char * const mhi_state_str[MHI_STATE_MAX]; --#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ -- !mhi_state_str[state]) ? \ -- "INVALID_STATE" : mhi_state_str[state]) -- --/* internal power states */ --enum mhi_pm_state { -- MHI_PM_STATE_DISABLE, -- MHI_PM_STATE_POR, -- MHI_PM_STATE_M0, -- MHI_PM_STATE_M2, -- MHI_PM_STATE_M3_ENTER, -- MHI_PM_STATE_M3, -- MHI_PM_STATE_M3_EXIT, -- MHI_PM_STATE_FW_DL_ERR, -- MHI_PM_STATE_SYS_ERR_DETECT, -- MHI_PM_STATE_SYS_ERR_PROCESS, -- MHI_PM_STATE_SHUTDOWN_PROCESS, -- MHI_PM_STATE_LD_ERR_FATAL_DETECT, -- MHI_PM_STATE_MAX --}; -- --#define MHI_PM_DISABLE BIT(0) --#define MHI_PM_POR BIT(1) --#define MHI_PM_M0 BIT(2) --#define MHI_PM_M2 BIT(3) --#define MHI_PM_M3_ENTER BIT(4) --#define MHI_PM_M3 BIT(5) --#define MHI_PM_M3_EXIT BIT(6) --/* firmware download failure state */ --#define MHI_PM_FW_DL_ERR BIT(7) --#define MHI_PM_SYS_ERR_DETECT BIT(8) --#define MHI_PM_SYS_ERR_PROCESS BIT(9) --#define MHI_PM_SHUTDOWN_PROCESS BIT(10) --/* link not accessible */ --#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) -- --#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ -- MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ -- MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ -- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) --#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) --#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) --#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \ -- mhi_cntrl->db_access) --#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ -- MHI_PM_M2 | MHI_PM_M3_EXIT)) --#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) --#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) --#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ -- MHI_PM_IN_ERROR_STATE(pm_state)) --#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ -- (MHI_PM_M3_ENTER | MHI_PM_M3)) -- --#define NR_OF_CMD_RINGS 1 --#define CMD_EL_PER_RING 128 --#define PRIMARY_CMD_RING 0 --#define MHI_DEV_WAKE_DB 127 --#define MHI_MAX_MTU 0xffff --#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) -- --enum mhi_er_type { -- MHI_ER_TYPE_INVALID = 0x0, -- MHI_ER_TYPE_VALID = 0x1, --}; -- --struct db_cfg { -- bool reset_req; -- bool db_mode; -- u32 pollcfg; -- enum mhi_db_brst_mode brstmode; -- dma_addr_t db_val; -- void (*process_db)(struct mhi_controller *mhi_cntrl, -- struct db_cfg *db_cfg, void __iomem *io_addr, -- dma_addr_t db_val); --}; -- --struct mhi_pm_transitions { -- enum mhi_pm_state from_state; -- u32 to_states; --}; -- --struct state_transition { -- struct list_head node; -- enum dev_st_transition state; --}; -- --struct mhi_ring { -- dma_addr_t dma_handle; -- dma_addr_t iommu_base; -- u64 *ctxt_wp; /* point to ctxt wp */ -- void *pre_aligned; -- void *base; -- void *rp; -- void *wp; -- size_t el_size; -- size_t len; -- size_t elements; -- size_t alloc_size; -- void __iomem *db_addr; --}; -- --struct mhi_cmd { -- struct mhi_ring ring; -- spinlock_t lock; --}; -- --struct mhi_buf_info { -- void *v_addr; -- void *bb_addr; -- void *wp; -- void *cb_buf; -- dma_addr_t p_addr; -- size_t len; -- enum dma_data_direction dir; -- bool used; /* Indicates whether the buffer is used or not */ -- bool pre_mapped; /* Already pre-mapped by client */ --}; -- --struct mhi_event { -- struct mhi_controller *mhi_cntrl; -- struct mhi_chan *mhi_chan; /* dedicated to channel */ -- u32 er_index; -- u32 intmod; -- u32 irq; -- int chan; /* this event ring is dedicated to a channel (optional) */ -- u32 priority; -- enum mhi_er_data_type data_type; -- struct mhi_ring ring; -- struct db_cfg db_cfg; -- struct tasklet_struct task; -- spinlock_t lock; -- int (*process_event)(struct mhi_controller *mhi_cntrl, -- struct mhi_event *mhi_event, -- u32 event_quota); -- bool hw_ring; -- bool cl_manage; -- bool offload_ev; /* managed by a device driver */ --}; -- --struct mhi_chan { -- const char *name; -- /* -- * Important: When consuming, increment tre_ring first and when -- * releasing, decrement buf_ring first. If tre_ring has space, buf_ring -- * is guranteed to have space so we do not need to check both rings. -- */ -- struct mhi_ring buf_ring; -- struct mhi_ring tre_ring; -- u32 chan; -- u32 er_index; -- u32 intmod; -- enum mhi_ch_type type; -- enum dma_data_direction dir; -- struct db_cfg db_cfg; -- enum mhi_ch_ee_mask ee_mask; -- enum mhi_ch_state ch_state; -- enum mhi_ev_ccs ccs; -- struct mhi_device *mhi_dev; -- void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); -- struct mutex mutex; -- struct completion completion; -- rwlock_t lock; -- struct list_head node; -- bool lpm_notify; -- bool configured; -- bool offload_ch; -- bool pre_alloc; -- bool wake_capable; --}; -- --/* Default MHI timeout */ --#define MHI_TIMEOUT_MS (1000) -- --/* debugfs related functions */ --#ifdef CONFIG_MHI_BUS_DEBUG --void mhi_create_debugfs(struct mhi_controller *mhi_cntrl); --void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl); --void mhi_debugfs_init(void); --void mhi_debugfs_exit(void); --#else --static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) --{ --} -- --static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) --{ --} -- --static inline void mhi_debugfs_init(void) --{ --} -- --static inline void mhi_debugfs_exit(void) --{ --} --#endif -- --struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); -- --int mhi_destroy_device(struct device *dev, void *data); --void mhi_create_devices(struct mhi_controller *mhi_cntrl); -- --int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, -- struct image_info **image_info, size_t alloc_size); --void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, -- struct image_info *image_info); -- --/* Power management APIs */ --enum mhi_pm_state __must_check mhi_tryset_pm_state( -- struct mhi_controller *mhi_cntrl, -- enum mhi_pm_state state); --const char *to_mhi_pm_state_str(enum mhi_pm_state state); --int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, -- enum dev_st_transition state); --void mhi_pm_st_worker(struct work_struct *work); --void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); --int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); --int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); --void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); --int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); --int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); --int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, -- enum mhi_cmd_type cmd); --int mhi_download_amss_image(struct mhi_controller *mhi_cntrl); --static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) --{ -- return (mhi_cntrl->dev_state >= MHI_STATE_M0 && -- mhi_cntrl->dev_state <= MHI_STATE_M3_FAST); --} -- --static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) --{ -- pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); -- mhi_cntrl->runtime_get(mhi_cntrl); -- mhi_cntrl->runtime_put(mhi_cntrl); --} -- --/* Register access methods */ --void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, -- void __iomem *db_addr, dma_addr_t db_val); --void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, -- struct db_cfg *db_mode, void __iomem *db_addr, -- dma_addr_t db_val); --int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, -- void __iomem *base, u32 offset, u32 *out); --int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, -- void __iomem *base, u32 offset, u32 mask, -- u32 shift, u32 *out); --int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, -- void __iomem *base, u32 offset, u32 mask, -- u32 shift, u32 val, u32 delayus); --void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, -- u32 offset, u32 val); --void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, -- u32 offset, u32 mask, u32 shift, u32 val); --void mhi_ring_er_db(struct mhi_event *mhi_event); --void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, -- dma_addr_t db_val); --void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); --void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan); -- --/* Initialization methods */ --int mhi_init_mmio(struct mhi_controller *mhi_cntrl); --int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); --void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); --int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); --void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); --void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, -- struct image_info *img_info); --void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); --int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan); --int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan); --void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan); --void mhi_reset_chan(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan); -- --/* Event processing methods */ --void mhi_ctrl_ev_task(unsigned long data); --void mhi_ev_task(unsigned long data); --int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, -- struct mhi_event *mhi_event, u32 event_quota); --int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, -- struct mhi_event *mhi_event, u32 event_quota); -- --/* ISR handlers */ --irqreturn_t mhi_irq_handler(int irq_number, void *dev); --irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); --irqreturn_t mhi_intvec_handler(int irq_number, void *dev); -- --int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, -- struct mhi_buf_info *info, enum mhi_flags flags); --int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, -- struct mhi_buf_info *buf_info); --int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, -- struct mhi_buf_info *buf_info); --void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, -- struct mhi_buf_info *buf_info); --void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, -- struct mhi_buf_info *buf_info); -- --#endif /* _MHI_INT_H */ -diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c -deleted file mode 100644 -index b15c5bc37dd4f..0000000000000 ---- a/drivers/bus/mhi/core/main.c -+++ /dev/null -@@ -1,1673 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 --/* -- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -- * -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include "internal.h" -- --int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, -- void __iomem *base, u32 offset, u32 *out) --{ -- return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); --} -- --int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, -- void __iomem *base, u32 offset, -- u32 mask, u32 shift, u32 *out) --{ -- u32 tmp; -- int ret; -- -- ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); -- if (ret) -- return ret; -- -- *out = (tmp & mask) >> shift; -- -- return 0; --} -- --int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, -- void __iomem *base, u32 offset, -- u32 mask, u32 shift, u32 val, u32 delayus) --{ -- int ret; -- u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; -- -- while (retry--) { -- ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift, -- &out); -- if (ret) -- return ret; -- -- if (out == val) -- return 0; -- -- fsleep(delayus); -- } -- -- return -ETIMEDOUT; --} -- --void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, -- u32 offset, u32 val) --{ -- mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); --} -- --void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, -- u32 offset, u32 mask, u32 shift, u32 val) --{ -- int ret; -- u32 tmp; -- -- ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); -- if (ret) -- return; -- -- tmp &= ~mask; -- tmp |= (val << shift); -- mhi_write_reg(mhi_cntrl, base, offset, tmp); --} -- --void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, -- dma_addr_t db_val) --{ -- mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); -- mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); --} -- --void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, -- struct db_cfg *db_cfg, -- void __iomem *db_addr, -- dma_addr_t db_val) --{ -- if (db_cfg->db_mode) { -- db_cfg->db_val = db_val; -- mhi_write_db(mhi_cntrl, db_addr, db_val); -- db_cfg->db_mode = 0; -- } --} -- --void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, -- struct db_cfg *db_cfg, -- void __iomem *db_addr, -- dma_addr_t db_val) --{ -- db_cfg->db_val = db_val; -- mhi_write_db(mhi_cntrl, db_addr, db_val); --} -- --void mhi_ring_er_db(struct mhi_event *mhi_event) --{ -- struct mhi_ring *ring = &mhi_event->ring; -- -- mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, -- ring->db_addr, *ring->ctxt_wp); --} -- --void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) --{ -- dma_addr_t db; -- struct mhi_ring *ring = &mhi_cmd->ring; -- -- db = ring->iommu_base + (ring->wp - ring->base); -- *ring->ctxt_wp = db; -- mhi_write_db(mhi_cntrl, ring->db_addr, db); --} -- --void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan) --{ -- struct mhi_ring *ring = &mhi_chan->tre_ring; -- dma_addr_t db; -- -- db = ring->iommu_base + (ring->wp - ring->base); -- -- /* -- * Writes to the new ring element must be visible to the hardware -- * before letting h/w know there is new element to fetch. -- */ -- dma_wmb(); -- *ring->ctxt_wp = db; -- -- mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, -- ring->db_addr, db); --} -- --enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) --{ -- u32 exec; -- int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); -- -- return (ret) ? MHI_EE_MAX : exec; --} --EXPORT_SYMBOL_GPL(mhi_get_exec_env); -- --enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) --{ -- u32 state; -- int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, -- MHISTATUS_MHISTATE_MASK, -- MHISTATUS_MHISTATE_SHIFT, &state); -- return ret ? MHI_STATE_MAX : state; --} --EXPORT_SYMBOL_GPL(mhi_get_mhi_state); -- --void mhi_soc_reset(struct mhi_controller *mhi_cntrl) --{ -- if (mhi_cntrl->reset) { -- mhi_cntrl->reset(mhi_cntrl); -- return; -- } -- -- /* Generic MHI SoC reset */ -- mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, -- MHI_SOC_RESET_REQ); --} --EXPORT_SYMBOL_GPL(mhi_soc_reset); -- --int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, -- struct mhi_buf_info *buf_info) --{ -- buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, -- buf_info->v_addr, buf_info->len, -- buf_info->dir); -- if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) -- return -ENOMEM; -- -- return 0; --} -- --int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, -- struct mhi_buf_info *buf_info) --{ -- void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, -- &buf_info->p_addr, GFP_ATOMIC); -- -- if (!buf) -- return -ENOMEM; -- -- if (buf_info->dir == DMA_TO_DEVICE) -- memcpy(buf, buf_info->v_addr, buf_info->len); -- -- buf_info->bb_addr = buf; -- -- return 0; --} -- --void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, -- struct mhi_buf_info *buf_info) --{ -- dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, -- buf_info->dir); --} -- --void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, -- struct mhi_buf_info *buf_info) --{ -- if (buf_info->dir == DMA_FROM_DEVICE) -- memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); -- -- dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, -- buf_info->bb_addr, buf_info->p_addr); --} -- --static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, -- struct mhi_ring *ring) --{ -- int nr_el; -- -- if (ring->wp < ring->rp) { -- nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; -- } else { -- nr_el = (ring->rp - ring->base) / ring->el_size; -- nr_el += ((ring->base + ring->len - ring->wp) / -- ring->el_size) - 1; -- } -- -- return nr_el; --} -- --static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) --{ -- return (addr - ring->iommu_base) + ring->base; --} -- --static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, -- struct mhi_ring *ring) --{ -- ring->wp += ring->el_size; -- if (ring->wp >= (ring->base + ring->len)) -- ring->wp = ring->base; -- /* smp update */ -- smp_wmb(); --} -- --static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, -- struct mhi_ring *ring) --{ -- ring->rp += ring->el_size; -- if (ring->rp >= (ring->base + ring->len)) -- ring->rp = ring->base; -- /* smp update */ -- smp_wmb(); --} -- --static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) --{ -- return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; --} -- --int mhi_destroy_device(struct device *dev, void *data) --{ -- struct mhi_chan *ul_chan, *dl_chan; -- struct mhi_device *mhi_dev; -- struct mhi_controller *mhi_cntrl; -- enum mhi_ee_type ee = MHI_EE_MAX; -- -- if (dev->bus != &mhi_bus_type) -- return 0; -- -- mhi_dev = to_mhi_device(dev); -- mhi_cntrl = mhi_dev->mhi_cntrl; -- -- /* Only destroy virtual devices thats attached to bus */ -- if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) -- return 0; -- -- ul_chan = mhi_dev->ul_chan; -- dl_chan = mhi_dev->dl_chan; -- -- /* -- * If execution environment is specified, remove only those devices that -- * started in them based on ee_mask for the channels as we move on to a -- * different execution environment -- */ -- if (data) -- ee = *(enum mhi_ee_type *)data; -- -- /* -- * For the suspend and resume case, this function will get called -- * without mhi_unregister_controller(). Hence, we need to drop the -- * references to mhi_dev created for ul and dl channels. We can -- * be sure that there will be no instances of mhi_dev left after -- * this. -- */ -- if (ul_chan) { -- if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) -- return 0; -- -- put_device(&ul_chan->mhi_dev->dev); -- } -- -- if (dl_chan) { -- if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) -- return 0; -- -- put_device(&dl_chan->mhi_dev->dev); -- } -- -- dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", -- mhi_dev->name); -- -- /* Notify the client and remove the device from MHI bus */ -- device_del(dev); -- put_device(dev); -- -- return 0; --} -- --int mhi_get_free_desc_count(struct mhi_device *mhi_dev, -- enum dma_data_direction dir) --{ -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? -- mhi_dev->ul_chan : mhi_dev->dl_chan; -- struct mhi_ring *tre_ring = &mhi_chan->tre_ring; -- -- return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); --} --EXPORT_SYMBOL_GPL(mhi_get_free_desc_count); -- --void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) --{ -- struct mhi_driver *mhi_drv; -- -- if (!mhi_dev->dev.driver) -- return; -- -- mhi_drv = to_mhi_driver(mhi_dev->dev.driver); -- -- if (mhi_drv->status_cb) -- mhi_drv->status_cb(mhi_dev, cb_reason); --} --EXPORT_SYMBOL_GPL(mhi_notify); -- --/* Bind MHI channels to MHI devices */ --void mhi_create_devices(struct mhi_controller *mhi_cntrl) --{ -- struct mhi_chan *mhi_chan; -- struct mhi_device *mhi_dev; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- int i, ret; -- -- mhi_chan = mhi_cntrl->mhi_chan; -- for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { -- if (!mhi_chan->configured || mhi_chan->mhi_dev || -- !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) -- continue; -- mhi_dev = mhi_alloc_device(mhi_cntrl); -- if (IS_ERR(mhi_dev)) -- return; -- -- mhi_dev->dev_type = MHI_DEVICE_XFER; -- switch (mhi_chan->dir) { -- case DMA_TO_DEVICE: -- mhi_dev->ul_chan = mhi_chan; -- mhi_dev->ul_chan_id = mhi_chan->chan; -- break; -- case DMA_FROM_DEVICE: -- /* We use dl_chan as offload channels */ -- mhi_dev->dl_chan = mhi_chan; -- mhi_dev->dl_chan_id = mhi_chan->chan; -- break; -- default: -- dev_err(dev, "Direction not supported\n"); -- put_device(&mhi_dev->dev); -- return; -- } -- -- get_device(&mhi_dev->dev); -- mhi_chan->mhi_dev = mhi_dev; -- -- /* Check next channel if it matches */ -- if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { -- if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { -- i++; -- mhi_chan++; -- if (mhi_chan->dir == DMA_TO_DEVICE) { -- mhi_dev->ul_chan = mhi_chan; -- mhi_dev->ul_chan_id = mhi_chan->chan; -- } else { -- mhi_dev->dl_chan = mhi_chan; -- mhi_dev->dl_chan_id = mhi_chan->chan; -- } -- get_device(&mhi_dev->dev); -- mhi_chan->mhi_dev = mhi_dev; -- } -- } -- -- /* Channel name is same for both UL and DL */ -- mhi_dev->name = mhi_chan->name; -- dev_set_name(&mhi_dev->dev, "%s_%s", -- dev_name(&mhi_cntrl->mhi_dev->dev), -- mhi_dev->name); -- -- /* Init wakeup source if available */ -- if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) -- device_init_wakeup(&mhi_dev->dev, true); -- -- ret = device_add(&mhi_dev->dev); -- if (ret) -- put_device(&mhi_dev->dev); -- } --} -- --irqreturn_t mhi_irq_handler(int irq_number, void *dev) --{ -- struct mhi_event *mhi_event = dev; -- struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; -- struct mhi_event_ctxt *er_ctxt = -- &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; -- struct mhi_ring *ev_ring = &mhi_event->ring; -- dma_addr_t ptr = er_ctxt->rp; -- void *dev_rp; -- -- if (!is_valid_ring_ptr(ev_ring, ptr)) { -- dev_err(&mhi_cntrl->mhi_dev->dev, -- "Event ring rp points outside of the event ring\n"); -- return IRQ_HANDLED; -- } -- -- dev_rp = mhi_to_virtual(ev_ring, ptr); -- -- /* Only proceed if event ring has pending events */ -- if (ev_ring->rp == dev_rp) -- return IRQ_HANDLED; -- -- /* For client managed event ring, notify pending data */ -- if (mhi_event->cl_manage) { -- struct mhi_chan *mhi_chan = mhi_event->mhi_chan; -- struct mhi_device *mhi_dev = mhi_chan->mhi_dev; -- -- if (mhi_dev) -- mhi_notify(mhi_dev, MHI_CB_PENDING_DATA); -- } else { -- tasklet_schedule(&mhi_event->task); -- } -- -- return IRQ_HANDLED; --} -- --irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) --{ -- struct mhi_controller *mhi_cntrl = priv; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- enum mhi_state state; -- enum mhi_pm_state pm_state = 0; -- enum mhi_ee_type ee; -- -- write_lock_irq(&mhi_cntrl->pm_lock); -- if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -- write_unlock_irq(&mhi_cntrl->pm_lock); -- goto exit_intvec; -- } -- -- state = mhi_get_mhi_state(mhi_cntrl); -- ee = mhi_get_exec_env(mhi_cntrl); -- dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n", -- TO_MHI_EXEC_STR(mhi_cntrl->ee), -- TO_MHI_STATE_STR(mhi_cntrl->dev_state), -- TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state)); -- -- if (state == MHI_STATE_SYS_ERR) { -- dev_dbg(dev, "System error detected\n"); -- pm_state = mhi_tryset_pm_state(mhi_cntrl, -- MHI_PM_SYS_ERR_DETECT); -- } -- write_unlock_irq(&mhi_cntrl->pm_lock); -- -- if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee) -- goto exit_intvec; -- -- switch (ee) { -- case MHI_EE_RDDM: -- /* proceed if power down is not already in progress */ -- if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { -- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); -- mhi_cntrl->ee = ee; -- wake_up_all(&mhi_cntrl->state_event); -- } -- break; -- case MHI_EE_PBL: -- case MHI_EE_EDL: -- case MHI_EE_PTHRU: -- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); -- mhi_cntrl->ee = ee; -- wake_up_all(&mhi_cntrl->state_event); -- mhi_pm_sys_err_handler(mhi_cntrl); -- break; -- default: -- wake_up_all(&mhi_cntrl->state_event); -- mhi_pm_sys_err_handler(mhi_cntrl); -- break; -- } -- --exit_intvec: -- -- return IRQ_HANDLED; --} -- --irqreturn_t mhi_intvec_handler(int irq_number, void *dev) --{ -- struct mhi_controller *mhi_cntrl = dev; -- -- /* Wake up events waiting for state change */ -- wake_up_all(&mhi_cntrl->state_event); -- -- return IRQ_WAKE_THREAD; --} -- --static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, -- struct mhi_ring *ring) --{ -- dma_addr_t ctxt_wp; -- -- /* Update the WP */ -- ring->wp += ring->el_size; -- ctxt_wp = *ring->ctxt_wp + ring->el_size; -- -- if (ring->wp >= (ring->base + ring->len)) { -- ring->wp = ring->base; -- ctxt_wp = ring->iommu_base; -- } -- -- *ring->ctxt_wp = ctxt_wp; -- -- /* Update the RP */ -- ring->rp += ring->el_size; -- if (ring->rp >= (ring->base + ring->len)) -- ring->rp = ring->base; -- -- /* Update to all cores */ -- smp_wmb(); --} -- --static int parse_xfer_event(struct mhi_controller *mhi_cntrl, -- struct mhi_tre *event, -- struct mhi_chan *mhi_chan) --{ -- struct mhi_ring *buf_ring, *tre_ring; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- struct mhi_result result; -- unsigned long flags = 0; -- u32 ev_code; -- -- ev_code = MHI_TRE_GET_EV_CODE(event); -- buf_ring = &mhi_chan->buf_ring; -- tre_ring = &mhi_chan->tre_ring; -- -- result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? -- -EOVERFLOW : 0; -- -- /* -- * If it's a DB Event then we need to grab the lock -- * with preemption disabled and as a write because we -- * have to update db register and there are chances that -- * another thread could be doing the same. -- */ -- if (ev_code >= MHI_EV_CC_OOB) -- write_lock_irqsave(&mhi_chan->lock, flags); -- else -- read_lock_bh(&mhi_chan->lock); -- -- if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) -- goto end_process_tx_event; -- -- switch (ev_code) { -- case MHI_EV_CC_OVERFLOW: -- case MHI_EV_CC_EOB: -- case MHI_EV_CC_EOT: -- { -- dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); -- struct mhi_tre *local_rp, *ev_tre; -- void *dev_rp; -- struct mhi_buf_info *buf_info; -- u16 xfer_len; -- -- if (!is_valid_ring_ptr(tre_ring, ptr)) { -- dev_err(&mhi_cntrl->mhi_dev->dev, -- "Event element points outside of the tre ring\n"); -- break; -- } -- /* Get the TRB this event points to */ -- ev_tre = mhi_to_virtual(tre_ring, ptr); -- -- dev_rp = ev_tre + 1; -- if (dev_rp >= (tre_ring->base + tre_ring->len)) -- dev_rp = tre_ring->base; -- -- result.dir = mhi_chan->dir; -- -- local_rp = tre_ring->rp; -- while (local_rp != dev_rp) { -- buf_info = buf_ring->rp; -- /* If it's the last TRE, get length from the event */ -- if (local_rp == ev_tre) -- xfer_len = MHI_TRE_GET_EV_LEN(event); -- else -- xfer_len = buf_info->len; -- -- /* Unmap if it's not pre-mapped by client */ -- if (likely(!buf_info->pre_mapped)) -- mhi_cntrl->unmap_single(mhi_cntrl, buf_info); -- -- result.buf_addr = buf_info->cb_buf; -- -- /* truncate to buf len if xfer_len is larger */ -- result.bytes_xferd = -- min_t(u16, xfer_len, buf_info->len); -- mhi_del_ring_element(mhi_cntrl, buf_ring); -- mhi_del_ring_element(mhi_cntrl, tre_ring); -- local_rp = tre_ring->rp; -- -- /* notify client */ -- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); -- -- if (mhi_chan->dir == DMA_TO_DEVICE) { -- atomic_dec(&mhi_cntrl->pending_pkts); -- /* Release the reference got from mhi_queue() */ -- mhi_cntrl->runtime_put(mhi_cntrl); -- } -- -- /* -- * Recycle the buffer if buffer is pre-allocated, -- * if there is an error, not much we can do apart -- * from dropping the packet -- */ -- if (mhi_chan->pre_alloc) { -- if (mhi_queue_buf(mhi_chan->mhi_dev, -- mhi_chan->dir, -- buf_info->cb_buf, -- buf_info->len, MHI_EOT)) { -- dev_err(dev, -- "Error recycling buffer for chan:%d\n", -- mhi_chan->chan); -- kfree(buf_info->cb_buf); -- } -- } -- } -- break; -- } /* CC_EOT */ -- case MHI_EV_CC_OOB: -- case MHI_EV_CC_DB_MODE: -- { -- unsigned long pm_lock_flags; -- -- mhi_chan->db_cfg.db_mode = 1; -- read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); -- if (tre_ring->wp != tre_ring->rp && -- MHI_DB_ACCESS_VALID(mhi_cntrl)) { -- mhi_ring_chan_db(mhi_cntrl, mhi_chan); -- } -- read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); -- break; -- } -- case MHI_EV_CC_BAD_TRE: -- default: -- dev_err(dev, "Unknown event 0x%x\n", ev_code); -- break; -- } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ -- --end_process_tx_event: -- if (ev_code >= MHI_EV_CC_OOB) -- write_unlock_irqrestore(&mhi_chan->lock, flags); -- else -- read_unlock_bh(&mhi_chan->lock); -- -- return 0; --} -- --static int parse_rsc_event(struct mhi_controller *mhi_cntrl, -- struct mhi_tre *event, -- struct mhi_chan *mhi_chan) --{ -- struct mhi_ring *buf_ring, *tre_ring; -- struct mhi_buf_info *buf_info; -- struct mhi_result result; -- int ev_code; -- u32 cookie; /* offset to local descriptor */ -- u16 xfer_len; -- -- buf_ring = &mhi_chan->buf_ring; -- tre_ring = &mhi_chan->tre_ring; -- -- ev_code = MHI_TRE_GET_EV_CODE(event); -- cookie = MHI_TRE_GET_EV_COOKIE(event); -- xfer_len = MHI_TRE_GET_EV_LEN(event); -- -- /* Received out of bound cookie */ -- WARN_ON(cookie >= buf_ring->len); -- -- buf_info = buf_ring->base + cookie; -- -- result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? -- -EOVERFLOW : 0; -- -- /* truncate to buf len if xfer_len is larger */ -- result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); -- result.buf_addr = buf_info->cb_buf; -- result.dir = mhi_chan->dir; -- -- read_lock_bh(&mhi_chan->lock); -- -- if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) -- goto end_process_rsc_event; -- -- WARN_ON(!buf_info->used); -- -- /* notify the client */ -- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); -- -- /* -- * Note: We're arbitrarily incrementing RP even though, completion -- * packet we processed might not be the same one, reason we can do this -- * is because device guaranteed to cache descriptors in order it -- * receive, so even though completion event is different we can re-use -- * all descriptors in between. -- * Example: -- * Transfer Ring has descriptors: A, B, C, D -- * Last descriptor host queue is D (WP) and first descriptor -- * host queue is A (RP). -- * The completion event we just serviced is descriptor C. -- * Then we can safely queue descriptors to replace A, B, and C -- * even though host did not receive any completions. -- */ -- mhi_del_ring_element(mhi_cntrl, tre_ring); -- buf_info->used = false; -- --end_process_rsc_event: -- read_unlock_bh(&mhi_chan->lock); -- -- return 0; --} -- --static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, -- struct mhi_tre *tre) --{ -- dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); -- struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; -- struct mhi_ring *mhi_ring = &cmd_ring->ring; -- struct mhi_tre *cmd_pkt; -- struct mhi_chan *mhi_chan; -- u32 chan; -- -- if (!is_valid_ring_ptr(mhi_ring, ptr)) { -- dev_err(&mhi_cntrl->mhi_dev->dev, -- "Event element points outside of the cmd ring\n"); -- return; -- } -- -- cmd_pkt = mhi_to_virtual(mhi_ring, ptr); -- -- chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); -- -- if (chan < mhi_cntrl->max_chan && -- mhi_cntrl->mhi_chan[chan].configured) { -- mhi_chan = &mhi_cntrl->mhi_chan[chan]; -- write_lock_bh(&mhi_chan->lock); -- mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); -- complete(&mhi_chan->completion); -- write_unlock_bh(&mhi_chan->lock); -- } else { -- dev_err(&mhi_cntrl->mhi_dev->dev, -- "Completion packet for invalid channel ID: %d\n", chan); -- } -- -- mhi_del_ring_element(mhi_cntrl, mhi_ring); --} -- --int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, -- struct mhi_event *mhi_event, -- u32 event_quota) --{ -- struct mhi_tre *dev_rp, *local_rp; -- struct mhi_ring *ev_ring = &mhi_event->ring; -- struct mhi_event_ctxt *er_ctxt = -- &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; -- struct mhi_chan *mhi_chan; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- u32 chan; -- int count = 0; -- dma_addr_t ptr = er_ctxt->rp; -- -- /* -- * This is a quick check to avoid unnecessary event processing -- * in case MHI is already in error state, but it's still possible -- * to transition to error state while processing events -- */ -- if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) -- return -EIO; -- -- if (!is_valid_ring_ptr(ev_ring, ptr)) { -- dev_err(&mhi_cntrl->mhi_dev->dev, -- "Event ring rp points outside of the event ring\n"); -- return -EIO; -- } -- -- dev_rp = mhi_to_virtual(ev_ring, ptr); -- local_rp = ev_ring->rp; -- -- while (dev_rp != local_rp) { -- enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); -- -- switch (type) { -- case MHI_PKT_TYPE_BW_REQ_EVENT: -- { -- struct mhi_link_info *link_info; -- -- link_info = &mhi_cntrl->mhi_link_info; -- write_lock_irq(&mhi_cntrl->pm_lock); -- link_info->target_link_speed = -- MHI_TRE_GET_EV_LINKSPEED(local_rp); -- link_info->target_link_width = -- MHI_TRE_GET_EV_LINKWIDTH(local_rp); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- dev_dbg(dev, "Received BW_REQ event\n"); -- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); -- break; -- } -- case MHI_PKT_TYPE_STATE_CHANGE_EVENT: -- { -- enum mhi_state new_state; -- -- new_state = MHI_TRE_GET_EV_STATE(local_rp); -- -- dev_dbg(dev, "State change event to state: %s\n", -- TO_MHI_STATE_STR(new_state)); -- -- switch (new_state) { -- case MHI_STATE_M0: -- mhi_pm_m0_transition(mhi_cntrl); -- break; -- case MHI_STATE_M1: -- mhi_pm_m1_transition(mhi_cntrl); -- break; -- case MHI_STATE_M3: -- mhi_pm_m3_transition(mhi_cntrl); -- break; -- case MHI_STATE_SYS_ERR: -- { -- enum mhi_pm_state pm_state; -- -- dev_dbg(dev, "System error detected\n"); -- write_lock_irq(&mhi_cntrl->pm_lock); -- pm_state = mhi_tryset_pm_state(mhi_cntrl, -- MHI_PM_SYS_ERR_DETECT); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- if (pm_state == MHI_PM_SYS_ERR_DETECT) -- mhi_pm_sys_err_handler(mhi_cntrl); -- break; -- } -- default: -- dev_err(dev, "Invalid state: %s\n", -- TO_MHI_STATE_STR(new_state)); -- } -- -- break; -- } -- case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: -- mhi_process_cmd_completion(mhi_cntrl, local_rp); -- break; -- case MHI_PKT_TYPE_EE_EVENT: -- { -- enum dev_st_transition st = DEV_ST_TRANSITION_MAX; -- enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp); -- -- dev_dbg(dev, "Received EE event: %s\n", -- TO_MHI_EXEC_STR(event)); -- switch (event) { -- case MHI_EE_SBL: -- st = DEV_ST_TRANSITION_SBL; -- break; -- case MHI_EE_WFW: -- case MHI_EE_AMSS: -- st = DEV_ST_TRANSITION_MISSION_MODE; -- break; -- case MHI_EE_FP: -- st = DEV_ST_TRANSITION_FP; -- break; -- case MHI_EE_RDDM: -- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); -- write_lock_irq(&mhi_cntrl->pm_lock); -- mhi_cntrl->ee = event; -- write_unlock_irq(&mhi_cntrl->pm_lock); -- wake_up_all(&mhi_cntrl->state_event); -- break; -- default: -- dev_err(dev, -- "Unhandled EE event: 0x%x\n", type); -- } -- if (st != DEV_ST_TRANSITION_MAX) -- mhi_queue_state_transition(mhi_cntrl, st); -- -- break; -- } -- case MHI_PKT_TYPE_TX_EVENT: -- chan = MHI_TRE_GET_EV_CHID(local_rp); -- -- WARN_ON(chan >= mhi_cntrl->max_chan); -- -- /* -- * Only process the event ring elements whose channel -- * ID is within the maximum supported range. -- */ -- if (chan < mhi_cntrl->max_chan) { -- mhi_chan = &mhi_cntrl->mhi_chan[chan]; -- if (!mhi_chan->configured) -- break; -- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); -- event_quota--; -- } -- break; -- default: -- dev_err(dev, "Unhandled event type: %d\n", type); -- break; -- } -- -- mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); -- local_rp = ev_ring->rp; -- -- ptr = er_ctxt->rp; -- if (!is_valid_ring_ptr(ev_ring, ptr)) { -- dev_err(&mhi_cntrl->mhi_dev->dev, -- "Event ring rp points outside of the event ring\n"); -- return -EIO; -- } -- -- dev_rp = mhi_to_virtual(ev_ring, ptr); -- count++; -- } -- -- read_lock_bh(&mhi_cntrl->pm_lock); -- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) -- mhi_ring_er_db(mhi_event); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- -- return count; --} -- --int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, -- struct mhi_event *mhi_event, -- u32 event_quota) --{ -- struct mhi_tre *dev_rp, *local_rp; -- struct mhi_ring *ev_ring = &mhi_event->ring; -- struct mhi_event_ctxt *er_ctxt = -- &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; -- int count = 0; -- u32 chan; -- struct mhi_chan *mhi_chan; -- dma_addr_t ptr = er_ctxt->rp; -- -- if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) -- return -EIO; -- -- if (!is_valid_ring_ptr(ev_ring, ptr)) { -- dev_err(&mhi_cntrl->mhi_dev->dev, -- "Event ring rp points outside of the event ring\n"); -- return -EIO; -- } -- -- dev_rp = mhi_to_virtual(ev_ring, ptr); -- local_rp = ev_ring->rp; -- -- while (dev_rp != local_rp && event_quota > 0) { -- enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); -- -- chan = MHI_TRE_GET_EV_CHID(local_rp); -- -- WARN_ON(chan >= mhi_cntrl->max_chan); -- -- /* -- * Only process the event ring elements whose channel -- * ID is within the maximum supported range. -- */ -- if (chan < mhi_cntrl->max_chan && -- mhi_cntrl->mhi_chan[chan].configured) { -- mhi_chan = &mhi_cntrl->mhi_chan[chan]; -- -- if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { -- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); -- event_quota--; -- } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { -- parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); -- event_quota--; -- } -- } -- -- mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); -- local_rp = ev_ring->rp; -- -- ptr = er_ctxt->rp; -- if (!is_valid_ring_ptr(ev_ring, ptr)) { -- dev_err(&mhi_cntrl->mhi_dev->dev, -- "Event ring rp points outside of the event ring\n"); -- return -EIO; -- } -- -- dev_rp = mhi_to_virtual(ev_ring, ptr); -- count++; -- } -- read_lock_bh(&mhi_cntrl->pm_lock); -- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) -- mhi_ring_er_db(mhi_event); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- -- return count; --} -- --void mhi_ev_task(unsigned long data) --{ -- struct mhi_event *mhi_event = (struct mhi_event *)data; -- struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; -- -- /* process all pending events */ -- spin_lock_bh(&mhi_event->lock); -- mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); -- spin_unlock_bh(&mhi_event->lock); --} -- --void mhi_ctrl_ev_task(unsigned long data) --{ -- struct mhi_event *mhi_event = (struct mhi_event *)data; -- struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- enum mhi_state state; -- enum mhi_pm_state pm_state = 0; -- int ret; -- -- /* -- * We can check PM state w/o a lock here because there is no way -- * PM state can change from reg access valid to no access while this -- * thread being executed. -- */ -- if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -- /* -- * We may have a pending event but not allowed to -- * process it since we are probably in a suspended state, -- * so trigger a resume. -- */ -- mhi_trigger_resume(mhi_cntrl); -- -- return; -- } -- -- /* Process ctrl events events */ -- ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); -- -- /* -- * We received an IRQ but no events to process, maybe device went to -- * SYS_ERR state? Check the state to confirm. -- */ -- if (!ret) { -- write_lock_irq(&mhi_cntrl->pm_lock); -- state = mhi_get_mhi_state(mhi_cntrl); -- if (state == MHI_STATE_SYS_ERR) { -- dev_dbg(dev, "System error detected\n"); -- pm_state = mhi_tryset_pm_state(mhi_cntrl, -- MHI_PM_SYS_ERR_DETECT); -- } -- write_unlock_irq(&mhi_cntrl->pm_lock); -- if (pm_state == MHI_PM_SYS_ERR_DETECT) -- mhi_pm_sys_err_handler(mhi_cntrl); -- } --} -- --static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, -- struct mhi_ring *ring) --{ -- void *tmp = ring->wp + ring->el_size; -- -- if (tmp >= (ring->base + ring->len)) -- tmp = ring->base; -- -- return (tmp == ring->rp); --} -- --static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, -- enum dma_data_direction dir, enum mhi_flags mflags) --{ -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : -- mhi_dev->dl_chan; -- struct mhi_ring *tre_ring = &mhi_chan->tre_ring; -- unsigned long flags; -- int ret; -- -- if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) -- return -EIO; -- -- read_lock_irqsave(&mhi_cntrl->pm_lock, flags); -- -- ret = mhi_is_ring_full(mhi_cntrl, tre_ring); -- if (unlikely(ret)) { -- ret = -EAGAIN; -- goto exit_unlock; -- } -- -- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); -- if (unlikely(ret)) -- goto exit_unlock; -- -- /* Packet is queued, take a usage ref to exit M3 if necessary -- * for host->device buffer, balanced put is done on buffer completion -- * for device->host buffer, balanced put is after ringing the DB -- */ -- mhi_cntrl->runtime_get(mhi_cntrl); -- -- /* Assert dev_wake (to exit/prevent M1/M2)*/ -- mhi_cntrl->wake_toggle(mhi_cntrl); -- -- if (mhi_chan->dir == DMA_TO_DEVICE) -- atomic_inc(&mhi_cntrl->pending_pkts); -- -- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) -- mhi_ring_chan_db(mhi_cntrl, mhi_chan); -- -- if (dir == DMA_FROM_DEVICE) -- mhi_cntrl->runtime_put(mhi_cntrl); -- --exit_unlock: -- read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); -- -- return ret; --} -- --int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, -- struct sk_buff *skb, size_t len, enum mhi_flags mflags) --{ -- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : -- mhi_dev->dl_chan; -- struct mhi_buf_info buf_info = { }; -- -- buf_info.v_addr = skb->data; -- buf_info.cb_buf = skb; -- buf_info.len = len; -- -- if (unlikely(mhi_chan->pre_alloc)) -- return -EINVAL; -- -- return mhi_queue(mhi_dev, &buf_info, dir, mflags); --} --EXPORT_SYMBOL_GPL(mhi_queue_skb); -- --int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, -- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) --{ -- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : -- mhi_dev->dl_chan; -- struct mhi_buf_info buf_info = { }; -- -- buf_info.p_addr = mhi_buf->dma_addr; -- buf_info.cb_buf = mhi_buf; -- buf_info.pre_mapped = true; -- buf_info.len = len; -- -- if (unlikely(mhi_chan->pre_alloc)) -- return -EINVAL; -- -- return mhi_queue(mhi_dev, &buf_info, dir, mflags); --} --EXPORT_SYMBOL_GPL(mhi_queue_dma); -- --int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, -- struct mhi_buf_info *info, enum mhi_flags flags) --{ -- struct mhi_ring *buf_ring, *tre_ring; -- struct mhi_tre *mhi_tre; -- struct mhi_buf_info *buf_info; -- int eot, eob, chain, bei; -- int ret; -- -- buf_ring = &mhi_chan->buf_ring; -- tre_ring = &mhi_chan->tre_ring; -- -- buf_info = buf_ring->wp; -- WARN_ON(buf_info->used); -- buf_info->pre_mapped = info->pre_mapped; -- if (info->pre_mapped) -- buf_info->p_addr = info->p_addr; -- else -- buf_info->v_addr = info->v_addr; -- buf_info->cb_buf = info->cb_buf; -- buf_info->wp = tre_ring->wp; -- buf_info->dir = mhi_chan->dir; -- buf_info->len = info->len; -- -- if (!info->pre_mapped) { -- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); -- if (ret) -- return ret; -- } -- -- eob = !!(flags & MHI_EOB); -- eot = !!(flags & MHI_EOT); -- chain = !!(flags & MHI_CHAIN); -- bei = !!(mhi_chan->intmod); -- -- mhi_tre = tre_ring->wp; -- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); -- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); -- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); -- -- /* increment WP */ -- mhi_add_ring_element(mhi_cntrl, tre_ring); -- mhi_add_ring_element(mhi_cntrl, buf_ring); -- -- return 0; --} -- --int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, -- void *buf, size_t len, enum mhi_flags mflags) --{ -- struct mhi_buf_info buf_info = { }; -- -- buf_info.v_addr = buf; -- buf_info.cb_buf = buf; -- buf_info.len = len; -- -- return mhi_queue(mhi_dev, &buf_info, dir, mflags); --} --EXPORT_SYMBOL_GPL(mhi_queue_buf); -- --bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir) --{ -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? -- mhi_dev->ul_chan : mhi_dev->dl_chan; -- struct mhi_ring *tre_ring = &mhi_chan->tre_ring; -- -- return mhi_is_ring_full(mhi_cntrl, tre_ring); --} --EXPORT_SYMBOL_GPL(mhi_queue_is_full); -- --int mhi_send_cmd(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan, -- enum mhi_cmd_type cmd) --{ -- struct mhi_tre *cmd_tre = NULL; -- struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; -- struct mhi_ring *ring = &mhi_cmd->ring; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- int chan = 0; -- -- if (mhi_chan) -- chan = mhi_chan->chan; -- -- spin_lock_bh(&mhi_cmd->lock); -- if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { -- spin_unlock_bh(&mhi_cmd->lock); -- return -ENOMEM; -- } -- -- /* prepare the cmd tre */ -- cmd_tre = ring->wp; -- switch (cmd) { -- case MHI_CMD_RESET_CHAN: -- cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; -- cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; -- cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); -- break; -- case MHI_CMD_STOP_CHAN: -- cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; -- cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; -- cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); -- break; -- case MHI_CMD_START_CHAN: -- cmd_tre->ptr = MHI_TRE_CMD_START_PTR; -- cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; -- cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); -- break; -- default: -- dev_err(dev, "Command not supported\n"); -- break; -- } -- -- /* queue to hardware */ -- mhi_add_ring_element(mhi_cntrl, ring); -- read_lock_bh(&mhi_cntrl->pm_lock); -- if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) -- mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- spin_unlock_bh(&mhi_cmd->lock); -- -- return 0; --} -- --static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan, -- enum mhi_ch_state_type to_state) --{ -- struct device *dev = &mhi_chan->mhi_dev->dev; -- enum mhi_cmd_type cmd = MHI_CMD_NOP; -- int ret; -- -- dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, -- TO_CH_STATE_TYPE_STR(to_state)); -- -- switch (to_state) { -- case MHI_CH_STATE_TYPE_RESET: -- write_lock_irq(&mhi_chan->lock); -- if (mhi_chan->ch_state != MHI_CH_STATE_STOP && -- mhi_chan->ch_state != MHI_CH_STATE_ENABLED && -- mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { -- write_unlock_irq(&mhi_chan->lock); -- return -EINVAL; -- } -- mhi_chan->ch_state = MHI_CH_STATE_DISABLED; -- write_unlock_irq(&mhi_chan->lock); -- -- cmd = MHI_CMD_RESET_CHAN; -- break; -- case MHI_CH_STATE_TYPE_STOP: -- if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) -- return -EINVAL; -- -- cmd = MHI_CMD_STOP_CHAN; -- break; -- case MHI_CH_STATE_TYPE_START: -- if (mhi_chan->ch_state != MHI_CH_STATE_STOP && -- mhi_chan->ch_state != MHI_CH_STATE_DISABLED) -- return -EINVAL; -- -- cmd = MHI_CMD_START_CHAN; -- break; -- default: -- dev_err(dev, "%d: Channel state update to %s not allowed\n", -- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); -- return -EINVAL; -- } -- -- /* bring host and device out of suspended states */ -- ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); -- if (ret) -- return ret; -- mhi_cntrl->runtime_get(mhi_cntrl); -- -- reinit_completion(&mhi_chan->completion); -- ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); -- if (ret) { -- dev_err(dev, "%d: Failed to send %s channel command\n", -- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); -- goto exit_channel_update; -- } -- -- ret = wait_for_completion_timeout(&mhi_chan->completion, -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { -- dev_err(dev, -- "%d: Failed to receive %s channel command completion\n", -- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); -- ret = -EIO; -- goto exit_channel_update; -- } -- -- ret = 0; -- -- if (to_state != MHI_CH_STATE_TYPE_RESET) { -- write_lock_irq(&mhi_chan->lock); -- mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? -- MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP; -- write_unlock_irq(&mhi_chan->lock); -- } -- -- dev_dbg(dev, "%d: Channel state change to %s successful\n", -- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); -- --exit_channel_update: -- mhi_cntrl->runtime_put(mhi_cntrl); -- mhi_device_put(mhi_cntrl->mhi_dev); -- -- return ret; --} -- --static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan) --{ -- int ret; -- struct device *dev = &mhi_chan->mhi_dev->dev; -- -- mutex_lock(&mhi_chan->mutex); -- -- if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { -- dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n", -- TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); -- goto exit_unprepare_channel; -- } -- -- /* no more processing events for this channel */ -- ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, -- MHI_CH_STATE_TYPE_RESET); -- if (ret) -- dev_err(dev, "%d: Failed to reset channel, still resetting\n", -- mhi_chan->chan); -- --exit_unprepare_channel: -- write_lock_irq(&mhi_chan->lock); -- mhi_chan->ch_state = MHI_CH_STATE_DISABLED; -- write_unlock_irq(&mhi_chan->lock); -- -- if (!mhi_chan->offload_ch) { -- mhi_reset_chan(mhi_cntrl, mhi_chan); -- mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); -- } -- dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); -- -- mutex_unlock(&mhi_chan->mutex); --} -- --int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan) --{ -- int ret = 0; -- struct device *dev = &mhi_chan->mhi_dev->dev; -- -- if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { -- dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n", -- TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); -- return -ENOTCONN; -- } -- -- mutex_lock(&mhi_chan->mutex); -- -- /* Check of client manages channel context for offload channels */ -- if (!mhi_chan->offload_ch) { -- ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); -- if (ret) -- goto error_init_chan; -- } -- -- ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, -- MHI_CH_STATE_TYPE_START); -- if (ret) -- goto error_pm_state; -- -- /* Pre-allocate buffer for xfer ring */ -- if (mhi_chan->pre_alloc) { -- int nr_el = get_nr_avail_ring_elements(mhi_cntrl, -- &mhi_chan->tre_ring); -- size_t len = mhi_cntrl->buffer_len; -- -- while (nr_el--) { -- void *buf; -- struct mhi_buf_info info = { }; -- buf = kmalloc(len, GFP_KERNEL); -- if (!buf) { -- ret = -ENOMEM; -- goto error_pre_alloc; -- } -- -- /* Prepare transfer descriptors */ -- info.v_addr = buf; -- info.cb_buf = buf; -- info.len = len; -- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); -- if (ret) { -- kfree(buf); -- goto error_pre_alloc; -- } -- } -- -- read_lock_bh(&mhi_cntrl->pm_lock); -- if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { -- read_lock_irq(&mhi_chan->lock); -- mhi_ring_chan_db(mhi_cntrl, mhi_chan); -- read_unlock_irq(&mhi_chan->lock); -- } -- read_unlock_bh(&mhi_cntrl->pm_lock); -- } -- -- mutex_unlock(&mhi_chan->mutex); -- -- return 0; -- --error_pm_state: -- if (!mhi_chan->offload_ch) -- mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); -- --error_init_chan: -- mutex_unlock(&mhi_chan->mutex); -- -- return ret; -- --error_pre_alloc: -- mutex_unlock(&mhi_chan->mutex); -- mhi_unprepare_channel(mhi_cntrl, mhi_chan); -- -- return ret; --} -- --static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, -- struct mhi_event *mhi_event, -- struct mhi_event_ctxt *er_ctxt, -- int chan) -- --{ -- struct mhi_tre *dev_rp, *local_rp; -- struct mhi_ring *ev_ring; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- unsigned long flags; -- dma_addr_t ptr; -- -- dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan); -- -- ev_ring = &mhi_event->ring; -- -- /* mark all stale events related to channel as STALE event */ -- spin_lock_irqsave(&mhi_event->lock, flags); -- -- ptr = er_ctxt->rp; -- if (!is_valid_ring_ptr(ev_ring, ptr)) { -- dev_err(&mhi_cntrl->mhi_dev->dev, -- "Event ring rp points outside of the event ring\n"); -- dev_rp = ev_ring->rp; -- } else { -- dev_rp = mhi_to_virtual(ev_ring, ptr); -- } -- -- local_rp = ev_ring->rp; -- while (dev_rp != local_rp) { -- if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT && -- chan == MHI_TRE_GET_EV_CHID(local_rp)) -- local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, -- MHI_PKT_TYPE_STALE_EVENT); -- local_rp++; -- if (local_rp == (ev_ring->base + ev_ring->len)) -- local_rp = ev_ring->base; -- } -- -- dev_dbg(dev, "Finished marking events as stale events\n"); -- spin_unlock_irqrestore(&mhi_event->lock, flags); --} -- --static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, -- struct mhi_chan *mhi_chan) --{ -- struct mhi_ring *buf_ring, *tre_ring; -- struct mhi_result result; -- -- /* Reset any pending buffers */ -- buf_ring = &mhi_chan->buf_ring; -- tre_ring = &mhi_chan->tre_ring; -- result.transaction_status = -ENOTCONN; -- result.bytes_xferd = 0; -- while (tre_ring->rp != tre_ring->wp) { -- struct mhi_buf_info *buf_info = buf_ring->rp; -- -- if (mhi_chan->dir == DMA_TO_DEVICE) { -- atomic_dec(&mhi_cntrl->pending_pkts); -- /* Release the reference got from mhi_queue() */ -- mhi_cntrl->runtime_put(mhi_cntrl); -- } -- -- if (!buf_info->pre_mapped) -- mhi_cntrl->unmap_single(mhi_cntrl, buf_info); -- -- mhi_del_ring_element(mhi_cntrl, buf_ring); -- mhi_del_ring_element(mhi_cntrl, tre_ring); -- -- if (mhi_chan->pre_alloc) { -- kfree(buf_info->cb_buf); -- } else { -- result.buf_addr = buf_info->cb_buf; -- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); -- } -- } --} -- --void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) --{ -- struct mhi_event *mhi_event; -- struct mhi_event_ctxt *er_ctxt; -- int chan = mhi_chan->chan; -- -- /* Nothing to reset, client doesn't queue buffers */ -- if (mhi_chan->offload_ch) -- return; -- -- read_lock_bh(&mhi_cntrl->pm_lock); -- mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; -- er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; -- -- mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); -- -- mhi_reset_data_chan(mhi_cntrl, mhi_chan); -- -- read_unlock_bh(&mhi_cntrl->pm_lock); --} -- --/* Move channel to start state */ --int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) --{ -- int ret, dir; -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- struct mhi_chan *mhi_chan; -- -- for (dir = 0; dir < 2; dir++) { -- mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; -- if (!mhi_chan) -- continue; -- -- ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); -- if (ret) -- goto error_open_chan; -- } -- -- return 0; -- --error_open_chan: -- for (--dir; dir >= 0; dir--) { -- mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; -- if (!mhi_chan) -- continue; -- -- mhi_unprepare_channel(mhi_cntrl, mhi_chan); -- } -- -- return ret; --} --EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); -- --void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) --{ -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- struct mhi_chan *mhi_chan; -- int dir; -- -- for (dir = 0; dir < 2; dir++) { -- mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; -- if (!mhi_chan) -- continue; -- -- mhi_unprepare_channel(mhi_cntrl, mhi_chan); -- } --} --EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); -- --int mhi_poll(struct mhi_device *mhi_dev, u32 budget) --{ -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- struct mhi_chan *mhi_chan = mhi_dev->dl_chan; -- struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; -- int ret; -- -- spin_lock_bh(&mhi_event->lock); -- ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); -- spin_unlock_bh(&mhi_event->lock); -- -- return ret; --} --EXPORT_SYMBOL_GPL(mhi_poll); -diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c -deleted file mode 100644 -index fb99e3727155b..0000000000000 ---- a/drivers/bus/mhi/core/pm.c -+++ /dev/null -@@ -1,1256 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 --/* -- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -- * -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include "internal.h" -- --/* -- * Not all MHI state transitions are synchronous. Transitions like Linkdown, -- * SYS_ERR, and shutdown can happen anytime asynchronously. This function will -- * transition to a new state only if we're allowed to. -- * -- * Priority increases as we go down. For instance, from any state in L0, the -- * transition can be made to states in L1, L2 and L3. A notable exception to -- * this rule is state DISABLE. From DISABLE state we can only transition to -- * POR state. Also, while in L2 state, user cannot jump back to previous -- * L1 or L0 states. -- * -- * Valid transitions: -- * L0: DISABLE <--> POR -- * POR <--> POR -- * POR -> M0 -> M2 --> M0 -- * POR -> FW_DL_ERR -- * FW_DL_ERR <--> FW_DL_ERR -- * M0 <--> M0 -- * M0 -> FW_DL_ERR -- * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 -- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR -- * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT -- * SHUTDOWN_PROCESS -> DISABLE -- * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT -- * LD_ERR_FATAL_DETECT -> DISABLE -- */ --static struct mhi_pm_transitions const dev_state_transitions[] = { -- /* L0 States */ -- { -- MHI_PM_DISABLE, -- MHI_PM_POR -- }, -- { -- MHI_PM_POR, -- MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | -- MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -- MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR -- }, -- { -- MHI_PM_M0, -- MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | -- MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -- MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR -- }, -- { -- MHI_PM_M2, -- MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -- MHI_PM_LD_ERR_FATAL_DETECT -- }, -- { -- MHI_PM_M3_ENTER, -- MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -- MHI_PM_LD_ERR_FATAL_DETECT -- }, -- { -- MHI_PM_M3, -- MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | -- MHI_PM_LD_ERR_FATAL_DETECT -- }, -- { -- MHI_PM_M3_EXIT, -- MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -- MHI_PM_LD_ERR_FATAL_DETECT -- }, -- { -- MHI_PM_FW_DL_ERR, -- MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | -- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT -- }, -- /* L1 States */ -- { -- MHI_PM_SYS_ERR_DETECT, -- MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | -- MHI_PM_LD_ERR_FATAL_DETECT -- }, -- { -- MHI_PM_SYS_ERR_PROCESS, -- MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | -- MHI_PM_LD_ERR_FATAL_DETECT -- }, -- /* L2 States */ -- { -- MHI_PM_SHUTDOWN_PROCESS, -- MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT -- }, -- /* L3 States */ -- { -- MHI_PM_LD_ERR_FATAL_DETECT, -- MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE -- }, --}; -- --enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, -- enum mhi_pm_state state) --{ -- unsigned long cur_state = mhi_cntrl->pm_state; -- int index = find_last_bit(&cur_state, 32); -- -- if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) -- return cur_state; -- -- if (unlikely(dev_state_transitions[index].from_state != cur_state)) -- return cur_state; -- -- if (unlikely(!(dev_state_transitions[index].to_states & state))) -- return cur_state; -- -- mhi_cntrl->pm_state = state; -- return mhi_cntrl->pm_state; --} -- --void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) --{ -- if (state == MHI_STATE_RESET) { -- mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, -- MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); -- } else { -- mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, -- MHICTRL_MHISTATE_MASK, -- MHICTRL_MHISTATE_SHIFT, state); -- } --} -- --/* NOP for backward compatibility, host allowed to ring DB in M2 state */ --static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) --{ --} -- --static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) --{ -- mhi_cntrl->wake_get(mhi_cntrl, false); -- mhi_cntrl->wake_put(mhi_cntrl, true); --} -- --/* Handle device ready state transition */ --int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) --{ -- struct mhi_event *mhi_event; -- enum mhi_pm_state cur_state; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- u32 interval_us = 25000; /* poll register field every 25 milliseconds */ -- int ret, i; -- -- /* Check if device entered error state */ -- if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { -- dev_err(dev, "Device link is not accessible\n"); -- return -EIO; -- } -- -- /* Wait for RESET to be cleared and READY bit to be set by the device */ -- ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, -- MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, -- interval_us); -- if (ret) { -- dev_err(dev, "Device failed to clear MHI Reset\n"); -- return ret; -- } -- -- ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, -- MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1, -- interval_us); -- if (ret) { -- dev_err(dev, "Device failed to enter MHI Ready\n"); -- return ret; -- } -- -- dev_dbg(dev, "Device in READY State\n"); -- write_lock_irq(&mhi_cntrl->pm_lock); -- cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); -- mhi_cntrl->dev_state = MHI_STATE_READY; -- write_unlock_irq(&mhi_cntrl->pm_lock); -- -- if (cur_state != MHI_PM_POR) { -- dev_err(dev, "Error moving to state %s from %s\n", -- to_mhi_pm_state_str(MHI_PM_POR), -- to_mhi_pm_state_str(cur_state)); -- return -EIO; -- } -- -- read_lock_bh(&mhi_cntrl->pm_lock); -- if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -- dev_err(dev, "Device registers not accessible\n"); -- goto error_mmio; -- } -- -- /* Configure MMIO registers */ -- ret = mhi_init_mmio(mhi_cntrl); -- if (ret) { -- dev_err(dev, "Error configuring MMIO registers\n"); -- goto error_mmio; -- } -- -- /* Add elements to all SW event rings */ -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -- struct mhi_ring *ring = &mhi_event->ring; -- -- /* Skip if this is an offload or HW event */ -- if (mhi_event->offload_ev || mhi_event->hw_ring) -- continue; -- -- ring->wp = ring->base + ring->len - ring->el_size; -- *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; -- /* Update all cores */ -- smp_wmb(); -- -- /* Ring the event ring db */ -- spin_lock_irq(&mhi_event->lock); -- mhi_ring_er_db(mhi_event); -- spin_unlock_irq(&mhi_event->lock); -- } -- -- /* Set MHI to M0 state */ -- mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- -- return 0; -- --error_mmio: -- read_unlock_bh(&mhi_cntrl->pm_lock); -- -- return -EIO; --} -- --int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) --{ -- enum mhi_pm_state cur_state; -- struct mhi_chan *mhi_chan; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- int i; -- -- write_lock_irq(&mhi_cntrl->pm_lock); -- mhi_cntrl->dev_state = MHI_STATE_M0; -- cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- if (unlikely(cur_state != MHI_PM_M0)) { -- dev_err(dev, "Unable to transition to M0 state\n"); -- return -EIO; -- } -- mhi_cntrl->M0++; -- -- /* Wake up the device */ -- read_lock_bh(&mhi_cntrl->pm_lock); -- mhi_cntrl->wake_get(mhi_cntrl, true); -- -- /* Ring all event rings and CMD ring only if we're in mission mode */ -- if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { -- struct mhi_event *mhi_event = mhi_cntrl->mhi_event; -- struct mhi_cmd *mhi_cmd = -- &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; -- -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -- if (mhi_event->offload_ev) -- continue; -- -- spin_lock_irq(&mhi_event->lock); -- mhi_ring_er_db(mhi_event); -- spin_unlock_irq(&mhi_event->lock); -- } -- -- /* Only ring primary cmd ring if ring is not empty */ -- spin_lock_irq(&mhi_cmd->lock); -- if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) -- mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); -- spin_unlock_irq(&mhi_cmd->lock); -- } -- -- /* Ring channel DB registers */ -- mhi_chan = mhi_cntrl->mhi_chan; -- for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { -- struct mhi_ring *tre_ring = &mhi_chan->tre_ring; -- -- if (mhi_chan->db_cfg.reset_req) { -- write_lock_irq(&mhi_chan->lock); -- mhi_chan->db_cfg.db_mode = true; -- write_unlock_irq(&mhi_chan->lock); -- } -- -- read_lock_irq(&mhi_chan->lock); -- -- /* Only ring DB if ring is not empty */ -- if (tre_ring->base && tre_ring->wp != tre_ring->rp) -- mhi_ring_chan_db(mhi_cntrl, mhi_chan); -- read_unlock_irq(&mhi_chan->lock); -- } -- -- mhi_cntrl->wake_put(mhi_cntrl, false); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- wake_up_all(&mhi_cntrl->state_event); -- -- return 0; --} -- --/* -- * After receiving the MHI state change event from the device indicating the -- * transition to M1 state, the host can transition the device to M2 state -- * for keeping it in low power state. -- */ --void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) --{ -- enum mhi_pm_state state; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- -- write_lock_irq(&mhi_cntrl->pm_lock); -- state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); -- if (state == MHI_PM_M2) { -- mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); -- mhi_cntrl->dev_state = MHI_STATE_M2; -- -- write_unlock_irq(&mhi_cntrl->pm_lock); -- -- mhi_cntrl->M2++; -- wake_up_all(&mhi_cntrl->state_event); -- -- /* If there are any pending resources, exit M2 immediately */ -- if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || -- atomic_read(&mhi_cntrl->dev_wake))) { -- dev_dbg(dev, -- "Exiting M2, pending_pkts: %d dev_wake: %d\n", -- atomic_read(&mhi_cntrl->pending_pkts), -- atomic_read(&mhi_cntrl->dev_wake)); -- read_lock_bh(&mhi_cntrl->pm_lock); -- mhi_cntrl->wake_get(mhi_cntrl, true); -- mhi_cntrl->wake_put(mhi_cntrl, true); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- } else { -- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); -- } -- } else { -- write_unlock_irq(&mhi_cntrl->pm_lock); -- } --} -- --/* MHI M3 completion handler */ --int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) --{ -- enum mhi_pm_state state; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- -- write_lock_irq(&mhi_cntrl->pm_lock); -- mhi_cntrl->dev_state = MHI_STATE_M3; -- state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- if (state != MHI_PM_M3) { -- dev_err(dev, "Unable to transition to M3 state\n"); -- return -EIO; -- } -- -- mhi_cntrl->M3++; -- wake_up_all(&mhi_cntrl->state_event); -- -- return 0; --} -- --/* Handle device Mission Mode transition */ --static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) --{ -- struct mhi_event *mhi_event; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee; -- int i, ret; -- -- dev_dbg(dev, "Processing Mission Mode transition\n"); -- -- write_lock_irq(&mhi_cntrl->pm_lock); -- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) -- ee = mhi_get_exec_env(mhi_cntrl); -- -- if (!MHI_IN_MISSION_MODE(ee)) { -- mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; -- write_unlock_irq(&mhi_cntrl->pm_lock); -- wake_up_all(&mhi_cntrl->state_event); -- return -EIO; -- } -- mhi_cntrl->ee = ee; -- write_unlock_irq(&mhi_cntrl->pm_lock); -- -- wake_up_all(&mhi_cntrl->state_event); -- -- device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee, -- mhi_destroy_device); -- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); -- -- /* Force MHI to be in M0 state before continuing */ -- ret = __mhi_device_get_sync(mhi_cntrl); -- if (ret) -- return ret; -- -- read_lock_bh(&mhi_cntrl->pm_lock); -- -- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -- ret = -EIO; -- goto error_mission_mode; -- } -- -- /* Add elements to all HW event rings */ -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -- struct mhi_ring *ring = &mhi_event->ring; -- -- if (mhi_event->offload_ev || !mhi_event->hw_ring) -- continue; -- -- ring->wp = ring->base + ring->len - ring->el_size; -- *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; -- /* Update to all cores */ -- smp_wmb(); -- -- spin_lock_irq(&mhi_event->lock); -- if (MHI_DB_ACCESS_VALID(mhi_cntrl)) -- mhi_ring_er_db(mhi_event); -- spin_unlock_irq(&mhi_event->lock); -- } -- -- read_unlock_bh(&mhi_cntrl->pm_lock); -- -- /* -- * The MHI devices are only created when the client device switches its -- * Execution Environment (EE) to either SBL or AMSS states -- */ -- mhi_create_devices(mhi_cntrl); -- -- read_lock_bh(&mhi_cntrl->pm_lock); -- --error_mission_mode: -- mhi_cntrl->wake_put(mhi_cntrl, false); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- -- return ret; --} -- --/* Handle shutdown transitions */ --static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) --{ -- enum mhi_pm_state cur_state; -- struct mhi_event *mhi_event; -- struct mhi_cmd_ctxt *cmd_ctxt; -- struct mhi_cmd *mhi_cmd; -- struct mhi_event_ctxt *er_ctxt; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- int ret, i; -- -- dev_dbg(dev, "Processing disable transition with PM state: %s\n", -- to_mhi_pm_state_str(mhi_cntrl->pm_state)); -- -- mutex_lock(&mhi_cntrl->pm_mutex); -- -- /* Trigger MHI RESET so that the device will not access host memory */ -- if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { -- dev_dbg(dev, "Triggering MHI Reset in device\n"); -- mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); -- -- /* Wait for the reset bit to be cleared by the device */ -- ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, -- MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, -- 25000); -- if (ret) -- dev_err(dev, "Device failed to clear MHI Reset\n"); -- -- /* -- * Device will clear BHI_INTVEC as a part of RESET processing, -- * hence re-program it -- */ -- mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); -- } -- -- dev_dbg(dev, -- "Waiting for all pending event ring processing to complete\n"); -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -- if (mhi_event->offload_ev) -- continue; -- free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); -- tasklet_kill(&mhi_event->task); -- } -- -- /* Release lock and wait for all pending threads to complete */ -- mutex_unlock(&mhi_cntrl->pm_mutex); -- dev_dbg(dev, "Waiting for all pending threads to complete\n"); -- wake_up_all(&mhi_cntrl->state_event); -- -- dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); -- device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); -- -- mutex_lock(&mhi_cntrl->pm_mutex); -- -- WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); -- WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); -- -- /* Reset the ev rings and cmd rings */ -- dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); -- mhi_cmd = mhi_cntrl->mhi_cmd; -- cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; -- for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { -- struct mhi_ring *ring = &mhi_cmd->ring; -- -- ring->rp = ring->base; -- ring->wp = ring->base; -- cmd_ctxt->rp = cmd_ctxt->rbase; -- cmd_ctxt->wp = cmd_ctxt->rbase; -- } -- -- mhi_event = mhi_cntrl->mhi_event; -- er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, -- mhi_event++) { -- struct mhi_ring *ring = &mhi_event->ring; -- -- /* Skip offload events */ -- if (mhi_event->offload_ev) -- continue; -- -- ring->rp = ring->base; -- ring->wp = ring->base; -- er_ctxt->rp = er_ctxt->rbase; -- er_ctxt->wp = er_ctxt->rbase; -- } -- -- /* Move to disable state */ -- write_lock_irq(&mhi_cntrl->pm_lock); -- cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- if (unlikely(cur_state != MHI_PM_DISABLE)) -- dev_err(dev, "Error moving from PM state: %s to: %s\n", -- to_mhi_pm_state_str(cur_state), -- to_mhi_pm_state_str(MHI_PM_DISABLE)); -- -- dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", -- to_mhi_pm_state_str(mhi_cntrl->pm_state), -- TO_MHI_STATE_STR(mhi_cntrl->dev_state)); -- -- mutex_unlock(&mhi_cntrl->pm_mutex); --} -- --/* Handle system error transitions */ --static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) --{ -- enum mhi_pm_state cur_state, prev_state; -- enum dev_st_transition next_state; -- struct mhi_event *mhi_event; -- struct mhi_cmd_ctxt *cmd_ctxt; -- struct mhi_cmd *mhi_cmd; -- struct mhi_event_ctxt *er_ctxt; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- int ret, i; -- -- dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", -- to_mhi_pm_state_str(mhi_cntrl->pm_state), -- to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); -- -- /* We must notify MHI control driver so it can clean up first */ -- mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); -- -- mutex_lock(&mhi_cntrl->pm_mutex); -- write_lock_irq(&mhi_cntrl->pm_lock); -- prev_state = mhi_cntrl->pm_state; -- cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- -- if (cur_state != MHI_PM_SYS_ERR_PROCESS) { -- dev_err(dev, "Failed to transition from PM state: %s to: %s\n", -- to_mhi_pm_state_str(cur_state), -- to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); -- goto exit_sys_error_transition; -- } -- -- mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; -- mhi_cntrl->dev_state = MHI_STATE_RESET; -- -- /* Wake up threads waiting for state transition */ -- wake_up_all(&mhi_cntrl->state_event); -- -- /* Trigger MHI RESET so that the device will not access host memory */ -- if (MHI_REG_ACCESS_VALID(prev_state)) { -- u32 in_reset = -1; -- unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); -- -- dev_dbg(dev, "Triggering MHI Reset in device\n"); -- mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); -- -- /* Wait for the reset bit to be cleared by the device */ -- ret = wait_event_timeout(mhi_cntrl->state_event, -- mhi_read_reg_field(mhi_cntrl, -- mhi_cntrl->regs, -- MHICTRL, -- MHICTRL_RESET_MASK, -- MHICTRL_RESET_SHIFT, -- &in_reset) || -- !in_reset, timeout); -- if (!ret || in_reset) { -- dev_err(dev, "Device failed to exit MHI Reset state\n"); -- goto exit_sys_error_transition; -- } -- -- /* -- * Device will clear BHI_INTVEC as a part of RESET processing, -- * hence re-program it -- */ -- mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); -- } -- -- dev_dbg(dev, -- "Waiting for all pending event ring processing to complete\n"); -- mhi_event = mhi_cntrl->mhi_event; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -- if (mhi_event->offload_ev) -- continue; -- tasklet_kill(&mhi_event->task); -- } -- -- /* Release lock and wait for all pending threads to complete */ -- mutex_unlock(&mhi_cntrl->pm_mutex); -- dev_dbg(dev, "Waiting for all pending threads to complete\n"); -- wake_up_all(&mhi_cntrl->state_event); -- -- dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); -- device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); -- -- mutex_lock(&mhi_cntrl->pm_mutex); -- -- WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); -- WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); -- -- /* Reset the ev rings and cmd rings */ -- dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); -- mhi_cmd = mhi_cntrl->mhi_cmd; -- cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; -- for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { -- struct mhi_ring *ring = &mhi_cmd->ring; -- -- ring->rp = ring->base; -- ring->wp = ring->base; -- cmd_ctxt->rp = cmd_ctxt->rbase; -- cmd_ctxt->wp = cmd_ctxt->rbase; -- } -- -- mhi_event = mhi_cntrl->mhi_event; -- er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; -- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, -- mhi_event++) { -- struct mhi_ring *ring = &mhi_event->ring; -- -- /* Skip offload events */ -- if (mhi_event->offload_ev) -- continue; -- -- ring->rp = ring->base; -- ring->wp = ring->base; -- er_ctxt->rp = er_ctxt->rbase; -- er_ctxt->wp = er_ctxt->rbase; -- } -- -- /* Transition to next state */ -- if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { -- write_lock_irq(&mhi_cntrl->pm_lock); -- cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- if (cur_state != MHI_PM_POR) { -- dev_err(dev, "Error moving to state %s from %s\n", -- to_mhi_pm_state_str(MHI_PM_POR), -- to_mhi_pm_state_str(cur_state)); -- goto exit_sys_error_transition; -- } -- next_state = DEV_ST_TRANSITION_PBL; -- } else { -- next_state = DEV_ST_TRANSITION_READY; -- } -- -- mhi_queue_state_transition(mhi_cntrl, next_state); -- --exit_sys_error_transition: -- dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", -- to_mhi_pm_state_str(mhi_cntrl->pm_state), -- TO_MHI_STATE_STR(mhi_cntrl->dev_state)); -- -- mutex_unlock(&mhi_cntrl->pm_mutex); --} -- --/* Queue a new work item and schedule work */ --int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, -- enum dev_st_transition state) --{ -- struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); -- unsigned long flags; -- -- if (!item) -- return -ENOMEM; -- -- item->state = state; -- spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); -- list_add_tail(&item->node, &mhi_cntrl->transition_list); -- spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); -- -- queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); -- -- return 0; --} -- --/* SYS_ERR worker */ --void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) --{ -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- -- /* skip if controller supports RDDM */ -- if (mhi_cntrl->rddm_image) { -- dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); -- return; -- } -- -- mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); --} -- --/* Device State Transition worker */ --void mhi_pm_st_worker(struct work_struct *work) --{ -- struct state_transition *itr, *tmp; -- LIST_HEAD(head); -- struct mhi_controller *mhi_cntrl = container_of(work, -- struct mhi_controller, -- st_worker); -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- -- spin_lock_irq(&mhi_cntrl->transition_lock); -- list_splice_tail_init(&mhi_cntrl->transition_list, &head); -- spin_unlock_irq(&mhi_cntrl->transition_lock); -- -- list_for_each_entry_safe(itr, tmp, &head, node) { -- list_del(&itr->node); -- dev_dbg(dev, "Handling state transition: %s\n", -- TO_DEV_STATE_TRANS_STR(itr->state)); -- -- switch (itr->state) { -- case DEV_ST_TRANSITION_PBL: -- write_lock_irq(&mhi_cntrl->pm_lock); -- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) -- mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- mhi_fw_load_handler(mhi_cntrl); -- break; -- case DEV_ST_TRANSITION_SBL: -- write_lock_irq(&mhi_cntrl->pm_lock); -- mhi_cntrl->ee = MHI_EE_SBL; -- write_unlock_irq(&mhi_cntrl->pm_lock); -- /* -- * The MHI devices are only created when the client -- * device switches its Execution Environment (EE) to -- * either SBL or AMSS states -- */ -- mhi_create_devices(mhi_cntrl); -- if (mhi_cntrl->fbc_download) -- mhi_download_amss_image(mhi_cntrl); -- break; -- case DEV_ST_TRANSITION_MISSION_MODE: -- mhi_pm_mission_mode_transition(mhi_cntrl); -- break; -- case DEV_ST_TRANSITION_FP: -- write_lock_irq(&mhi_cntrl->pm_lock); -- mhi_cntrl->ee = MHI_EE_FP; -- write_unlock_irq(&mhi_cntrl->pm_lock); -- mhi_create_devices(mhi_cntrl); -- break; -- case DEV_ST_TRANSITION_READY: -- mhi_ready_state_transition(mhi_cntrl); -- break; -- case DEV_ST_TRANSITION_SYS_ERR: -- mhi_pm_sys_error_transition(mhi_cntrl); -- break; -- case DEV_ST_TRANSITION_DISABLE: -- mhi_pm_disable_transition(mhi_cntrl); -- break; -- default: -- break; -- } -- kfree(itr); -- } --} -- --int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) --{ -- struct mhi_chan *itr, *tmp; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- enum mhi_pm_state new_state; -- int ret; -- -- if (mhi_cntrl->pm_state == MHI_PM_DISABLE) -- return -EINVAL; -- -- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) -- return -EIO; -- -- /* Return busy if there are any pending resources */ -- if (atomic_read(&mhi_cntrl->dev_wake) || -- atomic_read(&mhi_cntrl->pending_pkts)) -- return -EBUSY; -- -- /* Take MHI out of M2 state */ -- read_lock_bh(&mhi_cntrl->pm_lock); -- mhi_cntrl->wake_get(mhi_cntrl, false); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- -- ret = wait_event_timeout(mhi_cntrl->state_event, -- mhi_cntrl->dev_state == MHI_STATE_M0 || -- mhi_cntrl->dev_state == MHI_STATE_M1 || -- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- -- read_lock_bh(&mhi_cntrl->pm_lock); -- mhi_cntrl->wake_put(mhi_cntrl, false); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- -- if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -- dev_err(dev, -- "Could not enter M0/M1 state"); -- return -EIO; -- } -- -- write_lock_irq(&mhi_cntrl->pm_lock); -- -- if (atomic_read(&mhi_cntrl->dev_wake) || -- atomic_read(&mhi_cntrl->pending_pkts)) { -- write_unlock_irq(&mhi_cntrl->pm_lock); -- return -EBUSY; -- } -- -- dev_dbg(dev, "Allowing M3 transition\n"); -- new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); -- if (new_state != MHI_PM_M3_ENTER) { -- write_unlock_irq(&mhi_cntrl->pm_lock); -- dev_err(dev, -- "Error setting to PM state: %s from: %s\n", -- to_mhi_pm_state_str(MHI_PM_M3_ENTER), -- to_mhi_pm_state_str(mhi_cntrl->pm_state)); -- return -EIO; -- } -- -- /* Set MHI to M3 and wait for completion */ -- mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- dev_dbg(dev, "Waiting for M3 completion\n"); -- -- ret = wait_event_timeout(mhi_cntrl->state_event, -- mhi_cntrl->dev_state == MHI_STATE_M3 || -- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- -- if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -- dev_err(dev, -- "Did not enter M3 state, MHI state: %s, PM state: %s\n", -- TO_MHI_STATE_STR(mhi_cntrl->dev_state), -- to_mhi_pm_state_str(mhi_cntrl->pm_state)); -- return -EIO; -- } -- -- /* Notify clients about entering LPM */ -- list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { -- mutex_lock(&itr->mutex); -- if (itr->mhi_dev) -- mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); -- mutex_unlock(&itr->mutex); -- } -- -- return 0; --} --EXPORT_SYMBOL_GPL(mhi_pm_suspend); -- --int mhi_pm_resume(struct mhi_controller *mhi_cntrl) --{ -- struct mhi_chan *itr, *tmp; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- enum mhi_pm_state cur_state; -- int ret; -- -- dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n", -- to_mhi_pm_state_str(mhi_cntrl->pm_state), -- TO_MHI_STATE_STR(mhi_cntrl->dev_state)); -- -- if (mhi_cntrl->pm_state == MHI_PM_DISABLE) -- return 0; -- -- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) -- return -EIO; -- -- if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) -- return -EINVAL; -- -- /* Notify clients about exiting LPM */ -- list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { -- mutex_lock(&itr->mutex); -- if (itr->mhi_dev) -- mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); -- mutex_unlock(&itr->mutex); -- } -- -- write_lock_irq(&mhi_cntrl->pm_lock); -- cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); -- if (cur_state != MHI_PM_M3_EXIT) { -- write_unlock_irq(&mhi_cntrl->pm_lock); -- dev_info(dev, -- "Error setting to PM state: %s from: %s\n", -- to_mhi_pm_state_str(MHI_PM_M3_EXIT), -- to_mhi_pm_state_str(mhi_cntrl->pm_state)); -- return -EIO; -- } -- -- /* Set MHI to M0 and wait for completion */ -- mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- -- ret = wait_event_timeout(mhi_cntrl->state_event, -- mhi_cntrl->dev_state == MHI_STATE_M0 || -- mhi_cntrl->dev_state == MHI_STATE_M2 || -- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- -- if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -- dev_err(dev, -- "Did not enter M0 state, MHI state: %s, PM state: %s\n", -- TO_MHI_STATE_STR(mhi_cntrl->dev_state), -- to_mhi_pm_state_str(mhi_cntrl->pm_state)); -- return -EIO; -- } -- -- return 0; --} --EXPORT_SYMBOL_GPL(mhi_pm_resume); -- --int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) --{ -- int ret; -- -- /* Wake up the device */ -- read_lock_bh(&mhi_cntrl->pm_lock); -- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -- read_unlock_bh(&mhi_cntrl->pm_lock); -- return -EIO; -- } -- mhi_cntrl->wake_get(mhi_cntrl, true); -- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) -- mhi_trigger_resume(mhi_cntrl); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- -- ret = wait_event_timeout(mhi_cntrl->state_event, -- mhi_cntrl->pm_state == MHI_PM_M0 || -- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- -- if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -- read_lock_bh(&mhi_cntrl->pm_lock); -- mhi_cntrl->wake_put(mhi_cntrl, false); -- read_unlock_bh(&mhi_cntrl->pm_lock); -- return -EIO; -- } -- -- return 0; --} -- --/* Assert device wake db */ --static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) --{ -- unsigned long flags; -- -- /* -- * If force flag is set, then increment the wake count value and -- * ring wake db -- */ -- if (unlikely(force)) { -- spin_lock_irqsave(&mhi_cntrl->wlock, flags); -- atomic_inc(&mhi_cntrl->dev_wake); -- if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && -- !mhi_cntrl->wake_set) { -- mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); -- mhi_cntrl->wake_set = true; -- } -- spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); -- } else { -- /* -- * If resources are already requested, then just increment -- * the wake count value and return -- */ -- if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) -- return; -- -- spin_lock_irqsave(&mhi_cntrl->wlock, flags); -- if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && -- MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && -- !mhi_cntrl->wake_set) { -- mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); -- mhi_cntrl->wake_set = true; -- } -- spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); -- } --} -- --/* De-assert device wake db */ --static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, -- bool override) --{ -- unsigned long flags; -- -- /* -- * Only continue if there is a single resource, else just decrement -- * and return -- */ -- if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) -- return; -- -- spin_lock_irqsave(&mhi_cntrl->wlock, flags); -- if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && -- MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && -- mhi_cntrl->wake_set) { -- mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); -- mhi_cntrl->wake_set = false; -- } -- spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); --} -- --int mhi_async_power_up(struct mhi_controller *mhi_cntrl) --{ -- enum mhi_state state; -- enum mhi_ee_type current_ee; -- enum dev_st_transition next_state; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- u32 val; -- int ret; -- -- dev_info(dev, "Requested to power ON\n"); -- -- /* Supply default wake routines if not provided by controller driver */ -- if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || -- !mhi_cntrl->wake_toggle) { -- mhi_cntrl->wake_get = mhi_assert_dev_wake; -- mhi_cntrl->wake_put = mhi_deassert_dev_wake; -- mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? -- mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; -- } -- -- mutex_lock(&mhi_cntrl->pm_mutex); -- mhi_cntrl->pm_state = MHI_PM_DISABLE; -- -- ret = mhi_init_irq_setup(mhi_cntrl); -- if (ret) -- goto error_setup_irq; -- -- /* Setup BHI INTVEC */ -- write_lock_irq(&mhi_cntrl->pm_lock); -- mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); -- mhi_cntrl->pm_state = MHI_PM_POR; -- mhi_cntrl->ee = MHI_EE_MAX; -- current_ee = mhi_get_exec_env(mhi_cntrl); -- write_unlock_irq(&mhi_cntrl->pm_lock); -- -- /* Confirm that the device is in valid exec env */ -- if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { -- dev_err(dev, "%s is not a valid EE for power on\n", -- TO_MHI_EXEC_STR(current_ee)); -- ret = -EIO; -- goto error_async_power_up; -- } -- -- state = mhi_get_mhi_state(mhi_cntrl); -- dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n", -- TO_MHI_EXEC_STR(current_ee), TO_MHI_STATE_STR(state)); -- -- if (state == MHI_STATE_SYS_ERR) { -- mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); -- ret = wait_event_timeout(mhi_cntrl->state_event, -- MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || -- mhi_read_reg_field(mhi_cntrl, -- mhi_cntrl->regs, -- MHICTRL, -- MHICTRL_RESET_MASK, -- MHICTRL_RESET_SHIFT, -- &val) || -- !val, -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- if (!ret) { -- ret = -EIO; -- dev_info(dev, "Failed to reset MHI due to syserr state\n"); -- goto error_async_power_up; -- } -- -- /* -- * device cleares INTVEC as part of RESET processing, -- * re-program it -- */ -- mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); -- } -- -- /* Transition to next state */ -- next_state = MHI_IN_PBL(current_ee) ? -- DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; -- -- mhi_queue_state_transition(mhi_cntrl, next_state); -- -- mutex_unlock(&mhi_cntrl->pm_mutex); -- -- dev_info(dev, "Power on setup success\n"); -- -- return 0; -- --error_async_power_up: -- mhi_deinit_free_irq(mhi_cntrl); -- --error_setup_irq: -- mhi_cntrl->pm_state = MHI_PM_DISABLE; -- mutex_unlock(&mhi_cntrl->pm_mutex); -- -- return ret; --} --EXPORT_SYMBOL_GPL(mhi_async_power_up); -- --void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) --{ -- enum mhi_pm_state cur_state, transition_state; -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- -- mutex_lock(&mhi_cntrl->pm_mutex); -- write_lock_irq(&mhi_cntrl->pm_lock); -- cur_state = mhi_cntrl->pm_state; -- if (cur_state == MHI_PM_DISABLE) { -- write_unlock_irq(&mhi_cntrl->pm_lock); -- mutex_unlock(&mhi_cntrl->pm_mutex); -- return; /* Already powered down */ -- } -- -- /* If it's not a graceful shutdown, force MHI to linkdown state */ -- transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS : -- MHI_PM_LD_ERR_FATAL_DETECT; -- -- cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); -- if (cur_state != transition_state) { -- dev_err(dev, "Failed to move to state: %s from: %s\n", -- to_mhi_pm_state_str(transition_state), -- to_mhi_pm_state_str(mhi_cntrl->pm_state)); -- /* Force link down or error fatal detected state */ -- mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; -- } -- -- /* mark device inactive to avoid any further host processing */ -- mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; -- mhi_cntrl->dev_state = MHI_STATE_RESET; -- -- wake_up_all(&mhi_cntrl->state_event); -- -- write_unlock_irq(&mhi_cntrl->pm_lock); -- mutex_unlock(&mhi_cntrl->pm_mutex); -- -- mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); -- -- /* Wait for shutdown to complete */ -- flush_work(&mhi_cntrl->st_worker); -- -- free_irq(mhi_cntrl->irq[0], mhi_cntrl); --} --EXPORT_SYMBOL_GPL(mhi_power_down); -- --int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) --{ -- int ret = mhi_async_power_up(mhi_cntrl); -- -- if (ret) -- return ret; -- -- wait_event_timeout(mhi_cntrl->state_event, -- MHI_IN_MISSION_MODE(mhi_cntrl->ee) || -- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- -- ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; -- if (ret) -- mhi_power_down(mhi_cntrl, false); -- -- return ret; --} --EXPORT_SYMBOL(mhi_sync_power_up); -- --int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) --{ -- struct device *dev = &mhi_cntrl->mhi_dev->dev; -- int ret; -- -- /* Check if device is already in RDDM */ -- if (mhi_cntrl->ee == MHI_EE_RDDM) -- return 0; -- -- dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n"); -- mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); -- -- /* Wait for RDDM event */ -- ret = wait_event_timeout(mhi_cntrl->state_event, -- mhi_cntrl->ee == MHI_EE_RDDM, -- msecs_to_jiffies(mhi_cntrl->timeout_ms)); -- ret = ret ? 0 : -EIO; -- -- return ret; --} --EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); -- --void mhi_device_get(struct mhi_device *mhi_dev) --{ -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- -- mhi_dev->dev_wake++; -- read_lock_bh(&mhi_cntrl->pm_lock); -- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) -- mhi_trigger_resume(mhi_cntrl); -- -- mhi_cntrl->wake_get(mhi_cntrl, true); -- read_unlock_bh(&mhi_cntrl->pm_lock); --} --EXPORT_SYMBOL_GPL(mhi_device_get); -- --int mhi_device_get_sync(struct mhi_device *mhi_dev) --{ -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- int ret; -- -- ret = __mhi_device_get_sync(mhi_cntrl); -- if (!ret) -- mhi_dev->dev_wake++; -- -- return ret; --} --EXPORT_SYMBOL_GPL(mhi_device_get_sync); -- --void mhi_device_put(struct mhi_device *mhi_dev) --{ -- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -- -- mhi_dev->dev_wake--; -- read_lock_bh(&mhi_cntrl->pm_lock); -- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) -- mhi_trigger_resume(mhi_cntrl); -- -- mhi_cntrl->wake_put(mhi_cntrl, false); -- read_unlock_bh(&mhi_cntrl->pm_lock); --} --EXPORT_SYMBOL_GPL(mhi_device_put); -diff --git a/drivers/bus/mhi/host/Kconfig b/drivers/bus/mhi/host/Kconfig -new file mode 100644 -index 0000000000000..da5cd0c9fc620 ---- /dev/null -+++ b/drivers/bus/mhi/host/Kconfig -@@ -0,0 +1,31 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# -+# MHI bus -+# -+# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -+# -+ -+config MHI_BUS -+ tristate "Modem Host Interface (MHI) bus" -+ help -+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a -+ communication protocol used by the host processors to control -+ and communicate with modem devices over a high speed peripheral -+ bus or shared memory. -+ -+config MHI_BUS_DEBUG -+ bool "Debugfs support for the MHI bus" -+ depends on MHI_BUS && DEBUG_FS -+ help -+ Enable debugfs support for use with the MHI transport. Allows -+ reading and/or modifying some values within the MHI controller -+ for debug and test purposes. -+ -+config MHI_BUS_PCI_GENERIC -+ tristate "MHI PCI controller driver" -+ depends on MHI_BUS -+ depends on PCI -+ help -+ This driver provides MHI PCI controller driver for devices such as -+ Qualcomm SDX55 based PCIe modems. -+ -diff --git a/drivers/bus/mhi/host/Makefile b/drivers/bus/mhi/host/Makefile -new file mode 100644 -index 0000000000000..859c2f38451c6 ---- /dev/null -+++ b/drivers/bus/mhi/host/Makefile -@@ -0,0 +1,6 @@ -+obj-$(CONFIG_MHI_BUS) += mhi.o -+mhi-y := init.o main.o pm.o boot.o -+mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o -+ -+obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o -+mhi_pci_generic-y += pci_generic.o -diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c -new file mode 100644 -index 0000000000000..c9dfb1a48ad6d ---- /dev/null -+++ b/drivers/bus/mhi/host/boot.c -@@ -0,0 +1,541 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "internal.h" -+ -+/* Setup RDDM vector table for RDDM transfer and program RXVEC */ -+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, -+ struct image_info *img_info) -+{ -+ struct mhi_buf *mhi_buf = img_info->mhi_buf; -+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; -+ void __iomem *base = mhi_cntrl->bhie; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ u32 sequence_id; -+ unsigned int i; -+ -+ for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { -+ bhi_vec->dma_addr = mhi_buf->dma_addr; -+ bhi_vec->size = mhi_buf->len; -+ } -+ -+ dev_dbg(dev, "BHIe programming for RDDM\n"); -+ -+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, -+ upper_32_bits(mhi_buf->dma_addr)); -+ -+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, -+ lower_32_bits(mhi_buf->dma_addr)); -+ -+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); -+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); -+ -+ mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, -+ BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, -+ sequence_id); -+ -+ dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n", -+ &mhi_buf->dma_addr, mhi_buf->len, sequence_id); -+} -+ -+/* Collect RDDM buffer during kernel panic */ -+static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) -+{ -+ int ret; -+ u32 rx_status; -+ enum mhi_ee_type ee; -+ const u32 delayus = 2000; -+ u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; -+ const u32 rddm_timeout_us = 200000; -+ int rddm_retry = rddm_timeout_us / delayus; -+ void __iomem *base = mhi_cntrl->bhie; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ -+ dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n", -+ to_mhi_pm_state_str(mhi_cntrl->pm_state), -+ TO_MHI_STATE_STR(mhi_cntrl->dev_state), -+ TO_MHI_EXEC_STR(mhi_cntrl->ee)); -+ -+ /* -+ * This should only be executing during a kernel panic, we expect all -+ * other cores to shutdown while we're collecting RDDM buffer. After -+ * returning from this function, we expect the device to reset. -+ * -+ * Normaly, we read/write pm_state only after grabbing the -+ * pm_lock, since we're in a panic, skipping it. Also there is no -+ * gurantee that this state change would take effect since -+ * we're setting it w/o grabbing pm_lock -+ */ -+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; -+ /* update should take the effect immediately */ -+ smp_wmb(); -+ -+ /* -+ * Make sure device is not already in RDDM. In case the device asserts -+ * and a kernel panic follows, device will already be in RDDM. -+ * Do not trigger SYS ERR again and proceed with waiting for -+ * image download completion. -+ */ -+ ee = mhi_get_exec_env(mhi_cntrl); -+ if (ee == MHI_EE_MAX) -+ goto error_exit_rddm; -+ -+ if (ee != MHI_EE_RDDM) { -+ dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n"); -+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); -+ -+ dev_dbg(dev, "Waiting for device to enter RDDM\n"); -+ while (rddm_retry--) { -+ ee = mhi_get_exec_env(mhi_cntrl); -+ if (ee == MHI_EE_RDDM) -+ break; -+ -+ udelay(delayus); -+ } -+ -+ if (rddm_retry <= 0) { -+ /* Hardware reset so force device to enter RDDM */ -+ dev_dbg(dev, -+ "Did not enter RDDM, do a host req reset\n"); -+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, -+ MHI_SOC_RESET_REQ_OFFSET, -+ MHI_SOC_RESET_REQ); -+ udelay(delayus); -+ } -+ -+ ee = mhi_get_exec_env(mhi_cntrl); -+ } -+ -+ dev_dbg(dev, -+ "Waiting for RDDM image download via BHIe, current EE:%s\n", -+ TO_MHI_EXEC_STR(ee)); -+ -+ while (retry--) { -+ ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, -+ BHIE_RXVECSTATUS_STATUS_BMSK, -+ BHIE_RXVECSTATUS_STATUS_SHFT, -+ &rx_status); -+ if (ret) -+ return -EIO; -+ -+ if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) -+ return 0; -+ -+ udelay(delayus); -+ } -+ -+ ee = mhi_get_exec_env(mhi_cntrl); -+ ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); -+ -+ dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status); -+ -+error_exit_rddm: -+ dev_err(dev, "RDDM transfer failed. Current EE: %s\n", -+ TO_MHI_EXEC_STR(ee)); -+ -+ return -EIO; -+} -+ -+/* Download RDDM image from device */ -+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) -+{ -+ void __iomem *base = mhi_cntrl->bhie; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ u32 rx_status; -+ -+ if (in_panic) -+ return __mhi_download_rddm_in_panic(mhi_cntrl); -+ -+ dev_dbg(dev, "Waiting for RDDM image download via BHIe\n"); -+ -+ /* Wait for the image download to complete */ -+ wait_event_timeout(mhi_cntrl->state_event, -+ mhi_read_reg_field(mhi_cntrl, base, -+ BHIE_RXVECSTATUS_OFFS, -+ BHIE_RXVECSTATUS_STATUS_BMSK, -+ BHIE_RXVECSTATUS_STATUS_SHFT, -+ &rx_status) || rx_status, -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ -+ return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; -+} -+EXPORT_SYMBOL_GPL(mhi_download_rddm_image); -+ -+static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, -+ const struct mhi_buf *mhi_buf) -+{ -+ void __iomem *base = mhi_cntrl->bhie; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock; -+ u32 tx_status, sequence_id; -+ int ret; -+ -+ read_lock_bh(pm_lock); -+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -+ read_unlock_bh(pm_lock); -+ return -EIO; -+ } -+ -+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); -+ dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n", -+ sequence_id); -+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, -+ upper_32_bits(mhi_buf->dma_addr)); -+ -+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, -+ lower_32_bits(mhi_buf->dma_addr)); -+ -+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); -+ -+ mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, -+ BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, -+ sequence_id); -+ read_unlock_bh(pm_lock); -+ -+ /* Wait for the image download to complete */ -+ ret = wait_event_timeout(mhi_cntrl->state_event, -+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || -+ mhi_read_reg_field(mhi_cntrl, base, -+ BHIE_TXVECSTATUS_OFFS, -+ BHIE_TXVECSTATUS_STATUS_BMSK, -+ BHIE_TXVECSTATUS_STATUS_SHFT, -+ &tx_status) || tx_status, -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || -+ tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL) -+ return -EIO; -+ -+ return (!ret) ? -ETIMEDOUT : 0; -+} -+ -+static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, -+ dma_addr_t dma_addr, -+ size_t size) -+{ -+ u32 tx_status, val, session_id; -+ int i, ret; -+ void __iomem *base = mhi_cntrl->bhi; -+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ struct { -+ char *name; -+ u32 offset; -+ } error_reg[] = { -+ { "ERROR_CODE", BHI_ERRCODE }, -+ { "ERROR_DBG1", BHI_ERRDBG1 }, -+ { "ERROR_DBG2", BHI_ERRDBG2 }, -+ { "ERROR_DBG3", BHI_ERRDBG3 }, -+ { NULL }, -+ }; -+ -+ read_lock_bh(pm_lock); -+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -+ read_unlock_bh(pm_lock); -+ goto invalid_pm_state; -+ } -+ -+ session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); -+ dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n", -+ session_id); -+ mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); -+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, -+ upper_32_bits(dma_addr)); -+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, -+ lower_32_bits(dma_addr)); -+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); -+ mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); -+ read_unlock_bh(pm_lock); -+ -+ /* Wait for the image download to complete */ -+ ret = wait_event_timeout(mhi_cntrl->state_event, -+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || -+ mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, -+ BHI_STATUS_MASK, BHI_STATUS_SHIFT, -+ &tx_status) || tx_status, -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) -+ goto invalid_pm_state; -+ -+ if (tx_status == BHI_STATUS_ERROR) { -+ dev_err(dev, "Image transfer failed\n"); -+ read_lock_bh(pm_lock); -+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -+ for (i = 0; error_reg[i].name; i++) { -+ ret = mhi_read_reg(mhi_cntrl, base, -+ error_reg[i].offset, &val); -+ if (ret) -+ break; -+ dev_err(dev, "Reg: %s value: 0x%x\n", -+ error_reg[i].name, val); -+ } -+ } -+ read_unlock_bh(pm_lock); -+ goto invalid_pm_state; -+ } -+ -+ return (!ret) ? -ETIMEDOUT : 0; -+ -+invalid_pm_state: -+ -+ return -EIO; -+} -+ -+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, -+ struct image_info *image_info) -+{ -+ int i; -+ struct mhi_buf *mhi_buf = image_info->mhi_buf; -+ -+ for (i = 0; i < image_info->entries; i++, mhi_buf++) -+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, -+ mhi_buf->buf, mhi_buf->dma_addr); -+ -+ kfree(image_info->mhi_buf); -+ kfree(image_info); -+} -+ -+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, -+ struct image_info **image_info, -+ size_t alloc_size) -+{ -+ size_t seg_size = mhi_cntrl->seg_len; -+ int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; -+ int i; -+ struct image_info *img_info; -+ struct mhi_buf *mhi_buf; -+ -+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); -+ if (!img_info) -+ return -ENOMEM; -+ -+ /* Allocate memory for entries */ -+ img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), -+ GFP_KERNEL); -+ if (!img_info->mhi_buf) -+ goto error_alloc_mhi_buf; -+ -+ /* Allocate and populate vector table */ -+ mhi_buf = img_info->mhi_buf; -+ for (i = 0; i < segments; i++, mhi_buf++) { -+ size_t vec_size = seg_size; -+ -+ /* Vector table is the last entry */ -+ if (i == segments - 1) -+ vec_size = sizeof(struct bhi_vec_entry) * i; -+ -+ mhi_buf->len = vec_size; -+ mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, -+ vec_size, &mhi_buf->dma_addr, -+ GFP_KERNEL); -+ if (!mhi_buf->buf) -+ goto error_alloc_segment; -+ } -+ -+ img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; -+ img_info->entries = segments; -+ *image_info = img_info; -+ -+ return 0; -+ -+error_alloc_segment: -+ for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) -+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, -+ mhi_buf->buf, mhi_buf->dma_addr); -+ -+error_alloc_mhi_buf: -+ kfree(img_info); -+ -+ return -ENOMEM; -+} -+ -+static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, -+ const struct firmware *firmware, -+ struct image_info *img_info) -+{ -+ size_t remainder = firmware->size; -+ size_t to_cpy; -+ const u8 *buf = firmware->data; -+ struct mhi_buf *mhi_buf = img_info->mhi_buf; -+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; -+ -+ while (remainder) { -+ to_cpy = min(remainder, mhi_buf->len); -+ memcpy(mhi_buf->buf, buf, to_cpy); -+ bhi_vec->dma_addr = mhi_buf->dma_addr; -+ bhi_vec->size = to_cpy; -+ -+ buf += to_cpy; -+ remainder -= to_cpy; -+ bhi_vec++; -+ mhi_buf++; -+ } -+} -+ -+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) -+{ -+ const struct firmware *firmware = NULL; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ enum mhi_pm_state new_state; -+ const char *fw_name; -+ void *buf; -+ dma_addr_t dma_addr; -+ size_t size; -+ int i, ret; -+ -+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -+ dev_err(dev, "Device MHI is not in valid state\n"); -+ return; -+ } -+ -+ /* save hardware info from BHI */ -+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU, -+ &mhi_cntrl->serial_number); -+ if (ret) -+ dev_err(dev, "Could not capture serial number via BHI\n"); -+ -+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) { -+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), -+ &mhi_cntrl->oem_pk_hash[i]); -+ if (ret) { -+ dev_err(dev, "Could not capture OEM PK HASH via BHI\n"); -+ break; -+ } -+ } -+ -+ /* wait for ready on pass through or any other execution environment */ -+ if (mhi_cntrl->ee != MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_PBL) -+ goto fw_load_ready_state; -+ -+ fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? -+ mhi_cntrl->edl_image : mhi_cntrl->fw_image; -+ -+ if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || -+ !mhi_cntrl->seg_len))) { -+ dev_err(dev, -+ "No firmware image defined or !sbl_size || !seg_len\n"); -+ goto error_fw_load; -+ } -+ -+ ret = request_firmware(&firmware, fw_name, dev); -+ if (ret) { -+ dev_err(dev, "Error loading firmware: %d\n", ret); -+ goto error_fw_load; -+ } -+ -+ size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; -+ -+ /* SBL size provided is maximum size, not necessarily the image size */ -+ if (size > firmware->size) -+ size = firmware->size; -+ -+ buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, -+ GFP_KERNEL); -+ if (!buf) { -+ release_firmware(firmware); -+ goto error_fw_load; -+ } -+ -+ /* Download image using BHI */ -+ memcpy(buf, firmware->data, size); -+ ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); -+ dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); -+ -+ /* Error or in EDL mode, we're done */ -+ if (ret) { -+ dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret); -+ release_firmware(firmware); -+ goto error_fw_load; -+ } -+ -+ /* Wait for ready since EDL image was loaded */ -+ if (fw_name == mhi_cntrl->edl_image) { -+ release_firmware(firmware); -+ goto fw_load_ready_state; -+ } -+ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ mhi_cntrl->dev_state = MHI_STATE_RESET; -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ -+ /* -+ * If we're doing fbc, populate vector tables while -+ * device transitioning into MHI READY state -+ */ -+ if (mhi_cntrl->fbc_download) { -+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, -+ firmware->size); -+ if (ret) { -+ release_firmware(firmware); -+ goto error_fw_load; -+ } -+ -+ /* Load the firmware into BHIE vec table */ -+ mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); -+ } -+ -+ release_firmware(firmware); -+ -+fw_load_ready_state: -+ /* Transitioning into MHI RESET->READY state */ -+ ret = mhi_ready_state_transition(mhi_cntrl); -+ if (ret) { -+ dev_err(dev, "MHI did not enter READY state\n"); -+ goto error_ready_state; -+ } -+ -+ dev_info(dev, "Wait for device to enter SBL or Mission mode\n"); -+ return; -+ -+error_ready_state: -+ if (mhi_cntrl->fbc_download) { -+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); -+ mhi_cntrl->fbc_image = NULL; -+ } -+ -+error_fw_load: -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ if (new_state == MHI_PM_FW_DL_ERR) -+ wake_up_all(&mhi_cntrl->state_event); -+} -+ -+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl) -+{ -+ struct image_info *image_info = mhi_cntrl->fbc_image; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ enum mhi_pm_state new_state; -+ int ret; -+ -+ if (!image_info) -+ return -EIO; -+ -+ ret = mhi_fw_load_bhie(mhi_cntrl, -+ /* Vector table is the last entry */ -+ &image_info->mhi_buf[image_info->entries - 1]); -+ if (ret) { -+ dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ if (new_state == MHI_PM_FW_DL_ERR) -+ wake_up_all(&mhi_cntrl->state_event); -+ } -+ -+ return ret; -+} -diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c -new file mode 100644 -index 0000000000000..d818586c229d2 ---- /dev/null -+++ b/drivers/bus/mhi/host/debugfs.c -@@ -0,0 +1,413 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (c) 2020, The Linux Foundation. All rights reserved. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include "internal.h" -+ -+static int mhi_debugfs_states_show(struct seq_file *m, void *d) -+{ -+ struct mhi_controller *mhi_cntrl = m->private; -+ -+ /* states */ -+ seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n", -+ to_mhi_pm_state_str(mhi_cntrl->pm_state), -+ mhi_is_active(mhi_cntrl) ? "Active" : "Inactive", -+ TO_MHI_STATE_STR(mhi_cntrl->dev_state), -+ TO_MHI_EXEC_STR(mhi_cntrl->ee), -+ mhi_cntrl->wake_set ? "true" : "false"); -+ -+ /* counters */ -+ seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2, -+ mhi_cntrl->M3); -+ -+ seq_printf(m, " device wake: %u pending packets: %u\n", -+ atomic_read(&mhi_cntrl->dev_wake), -+ atomic_read(&mhi_cntrl->pending_pkts)); -+ -+ return 0; -+} -+ -+static int mhi_debugfs_events_show(struct seq_file *m, void *d) -+{ -+ struct mhi_controller *mhi_cntrl = m->private; -+ struct mhi_event *mhi_event; -+ struct mhi_event_ctxt *er_ctxt; -+ int i; -+ -+ if (!mhi_is_active(mhi_cntrl)) { -+ seq_puts(m, "Device not ready\n"); -+ return -ENODEV; -+ } -+ -+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; -+ i++, er_ctxt++, mhi_event++) { -+ struct mhi_ring *ring = &mhi_event->ring; -+ -+ if (mhi_event->offload_ev) { -+ seq_printf(m, "Index: %d is an offload event ring\n", -+ i); -+ continue; -+ } -+ -+ seq_printf(m, "Index: %d intmod count: %lu time: %lu", -+ i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >> -+ EV_CTX_INTMODC_SHIFT, -+ (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >> -+ EV_CTX_INTMODT_SHIFT); -+ -+ seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase), -+ le64_to_cpu(er_ctxt->rlen)); -+ -+ seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp), -+ le64_to_cpu(er_ctxt->wp)); -+ -+ seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp, -+ &mhi_event->db_cfg.db_val); -+ } -+ -+ return 0; -+} -+ -+static int mhi_debugfs_channels_show(struct seq_file *m, void *d) -+{ -+ struct mhi_controller *mhi_cntrl = m->private; -+ struct mhi_chan *mhi_chan; -+ struct mhi_chan_ctxt *chan_ctxt; -+ int i; -+ -+ if (!mhi_is_active(mhi_cntrl)) { -+ seq_puts(m, "Device not ready\n"); -+ return -ENODEV; -+ } -+ -+ mhi_chan = mhi_cntrl->mhi_chan; -+ chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; -+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { -+ struct mhi_ring *ring = &mhi_chan->tre_ring; -+ -+ if (mhi_chan->offload_ch) { -+ seq_printf(m, "%s(%u) is an offload channel\n", -+ mhi_chan->name, mhi_chan->chan); -+ continue; -+ } -+ -+ if (!mhi_chan->mhi_dev) -+ continue; -+ -+ seq_printf(m, -+ "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx", -+ mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) & -+ CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT, -+ (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >> -+ CHAN_CTX_BRSTMODE_SHIFT, (le32_to_cpu(chan_ctxt->chcfg) & -+ CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT); -+ -+ seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype), -+ le32_to_cpu(chan_ctxt->erindex)); -+ -+ seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx", -+ le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen), -+ le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp)); -+ -+ seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n", -+ ring->rp, ring->wp, -+ &mhi_chan->db_cfg.db_val); -+ } -+ -+ return 0; -+} -+ -+static int mhi_device_info_show(struct device *dev, void *data) -+{ -+ struct mhi_device *mhi_dev; -+ -+ if (dev->bus != &mhi_bus_type) -+ return 0; -+ -+ mhi_dev = to_mhi_device(dev); -+ -+ seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u", -+ mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer", -+ mhi_dev->dev_wake); -+ -+ /* for transfer device types only */ -+ if (mhi_dev->dev_type == MHI_DEVICE_XFER) -+ seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)", -+ mhi_dev->ul_chan_id, mhi_dev->dl_chan_id); -+ -+ seq_puts((struct seq_file *)data, "\n"); -+ -+ return 0; -+} -+ -+static int mhi_debugfs_devices_show(struct seq_file *m, void *d) -+{ -+ struct mhi_controller *mhi_cntrl = m->private; -+ -+ if (!mhi_is_active(mhi_cntrl)) { -+ seq_puts(m, "Device not ready\n"); -+ return -ENODEV; -+ } -+ -+ /* Show controller and client(s) info */ -+ mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m); -+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show); -+ -+ return 0; -+} -+ -+static int mhi_debugfs_regdump_show(struct seq_file *m, void *d) -+{ -+ struct mhi_controller *mhi_cntrl = m->private; -+ enum mhi_state state; -+ enum mhi_ee_type ee; -+ int i, ret = -EIO; -+ u32 val; -+ void __iomem *mhi_base = mhi_cntrl->regs; -+ void __iomem *bhi_base = mhi_cntrl->bhi; -+ void __iomem *bhie_base = mhi_cntrl->bhie; -+ void __iomem *wake_db = mhi_cntrl->wake_db; -+ struct { -+ const char *name; -+ int offset; -+ void __iomem *base; -+ } regs[] = { -+ { "MHI_REGLEN", MHIREGLEN, mhi_base}, -+ { "MHI_VER", MHIVER, mhi_base}, -+ { "MHI_CFG", MHICFG, mhi_base}, -+ { "MHI_CTRL", MHICTRL, mhi_base}, -+ { "MHI_STATUS", MHISTATUS, mhi_base}, -+ { "MHI_WAKE_DB", 0, wake_db}, -+ { "BHI_EXECENV", BHI_EXECENV, bhi_base}, -+ { "BHI_STATUS", BHI_STATUS, bhi_base}, -+ { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, -+ { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, -+ { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, -+ { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, -+ { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, -+ { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, -+ { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, -+ { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, -+ { NULL }, -+ }; -+ -+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) -+ return ret; -+ -+ seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n", -+ to_mhi_pm_state_str(mhi_cntrl->pm_state), -+ TO_MHI_STATE_STR(mhi_cntrl->dev_state), -+ TO_MHI_EXEC_STR(mhi_cntrl->ee)); -+ -+ state = mhi_get_mhi_state(mhi_cntrl); -+ ee = mhi_get_exec_env(mhi_cntrl); -+ seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee), -+ TO_MHI_STATE_STR(state)); -+ -+ for (i = 0; regs[i].name; i++) { -+ if (!regs[i].base) -+ continue; -+ ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset, -+ &val); -+ if (ret) -+ continue; -+ -+ seq_printf(m, "%s: 0x%x\n", regs[i].name, val); -+ } -+ -+ return 0; -+} -+ -+static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d) -+{ -+ struct mhi_controller *mhi_cntrl = m->private; -+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; -+ -+ if (!mhi_is_active(mhi_cntrl)) { -+ seq_puts(m, "Device not ready\n"); -+ return -ENODEV; -+ } -+ -+ seq_printf(m, -+ "Wake count: %d\n%s\n", mhi_dev->dev_wake, -+ "Usage: echo get/put > device_wake to vote/unvote for M0"); -+ -+ return 0; -+} -+ -+static ssize_t mhi_debugfs_device_wake_write(struct file *file, -+ const char __user *ubuf, -+ size_t count, loff_t *ppos) -+{ -+ struct seq_file *m = file->private_data; -+ struct mhi_controller *mhi_cntrl = m->private; -+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; -+ char buf[16]; -+ int ret = -EINVAL; -+ -+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) -+ return -EFAULT; -+ -+ if (!strncmp(buf, "get", 3)) { -+ ret = mhi_device_get_sync(mhi_dev); -+ } else if (!strncmp(buf, "put", 3)) { -+ mhi_device_put(mhi_dev); -+ ret = 0; -+ } -+ -+ return ret ? ret : count; -+} -+ -+static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d) -+{ -+ struct mhi_controller *mhi_cntrl = m->private; -+ -+ seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms); -+ -+ return 0; -+} -+ -+static ssize_t mhi_debugfs_timeout_ms_write(struct file *file, -+ const char __user *ubuf, -+ size_t count, loff_t *ppos) -+{ -+ struct seq_file *m = file->private_data; -+ struct mhi_controller *mhi_cntrl = m->private; -+ u32 timeout_ms; -+ -+ if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms)) -+ return -EINVAL; -+ -+ mhi_cntrl->timeout_ms = timeout_ms; -+ -+ return count; -+} -+ -+static int mhi_debugfs_states_open(struct inode *inode, struct file *fp) -+{ -+ return single_open(fp, mhi_debugfs_states_show, inode->i_private); -+} -+ -+static int mhi_debugfs_events_open(struct inode *inode, struct file *fp) -+{ -+ return single_open(fp, mhi_debugfs_events_show, inode->i_private); -+} -+ -+static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp) -+{ -+ return single_open(fp, mhi_debugfs_channels_show, inode->i_private); -+} -+ -+static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp) -+{ -+ return single_open(fp, mhi_debugfs_devices_show, inode->i_private); -+} -+ -+static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp) -+{ -+ return single_open(fp, mhi_debugfs_regdump_show, inode->i_private); -+} -+ -+static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp) -+{ -+ return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private); -+} -+ -+static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp) -+{ -+ return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private); -+} -+ -+static const struct file_operations debugfs_states_fops = { -+ .open = mhi_debugfs_states_open, -+ .release = single_release, -+ .read = seq_read, -+}; -+ -+static const struct file_operations debugfs_events_fops = { -+ .open = mhi_debugfs_events_open, -+ .release = single_release, -+ .read = seq_read, -+}; -+ -+static const struct file_operations debugfs_channels_fops = { -+ .open = mhi_debugfs_channels_open, -+ .release = single_release, -+ .read = seq_read, -+}; -+ -+static const struct file_operations debugfs_devices_fops = { -+ .open = mhi_debugfs_devices_open, -+ .release = single_release, -+ .read = seq_read, -+}; -+ -+static const struct file_operations debugfs_regdump_fops = { -+ .open = mhi_debugfs_regdump_open, -+ .release = single_release, -+ .read = seq_read, -+}; -+ -+static const struct file_operations debugfs_device_wake_fops = { -+ .open = mhi_debugfs_device_wake_open, -+ .write = mhi_debugfs_device_wake_write, -+ .release = single_release, -+ .read = seq_read, -+}; -+ -+static const struct file_operations debugfs_timeout_ms_fops = { -+ .open = mhi_debugfs_timeout_ms_open, -+ .write = mhi_debugfs_timeout_ms_write, -+ .release = single_release, -+ .read = seq_read, -+}; -+ -+static struct dentry *mhi_debugfs_root; -+ -+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) -+{ -+ mhi_cntrl->debugfs_dentry = -+ debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev), -+ mhi_debugfs_root); -+ -+ debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry, -+ mhi_cntrl, &debugfs_states_fops); -+ debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry, -+ mhi_cntrl, &debugfs_events_fops); -+ debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry, -+ mhi_cntrl, &debugfs_channels_fops); -+ debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry, -+ mhi_cntrl, &debugfs_devices_fops); -+ debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry, -+ mhi_cntrl, &debugfs_regdump_fops); -+ debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry, -+ mhi_cntrl, &debugfs_device_wake_fops); -+ debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry, -+ mhi_cntrl, &debugfs_timeout_ms_fops); -+} -+ -+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) -+{ -+ debugfs_remove_recursive(mhi_cntrl->debugfs_dentry); -+ mhi_cntrl->debugfs_dentry = NULL; -+} -+ -+void mhi_debugfs_init(void) -+{ -+ mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL); -+} -+ -+void mhi_debugfs_exit(void) -+{ -+ debugfs_remove_recursive(mhi_debugfs_root); -+} -diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c -new file mode 100644 -index 0000000000000..829d4fca7ddc9 ---- /dev/null -+++ b/drivers/bus/mhi/host/init.c -@@ -0,0 +1,1443 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "internal.h" -+ -+static DEFINE_IDA(mhi_controller_ida); -+ -+const char * const mhi_ee_str[MHI_EE_MAX] = { -+ [MHI_EE_PBL] = "PRIMARY BOOTLOADER", -+ [MHI_EE_SBL] = "SECONDARY BOOTLOADER", -+ [MHI_EE_AMSS] = "MISSION MODE", -+ [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE", -+ [MHI_EE_WFW] = "WLAN FIRMWARE", -+ [MHI_EE_PTHRU] = "PASS THROUGH", -+ [MHI_EE_EDL] = "EMERGENCY DOWNLOAD", -+ [MHI_EE_FP] = "FLASH PROGRAMMER", -+ [MHI_EE_DISABLE_TRANSITION] = "DISABLE", -+ [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", -+}; -+ -+const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { -+ [DEV_ST_TRANSITION_PBL] = "PBL", -+ [DEV_ST_TRANSITION_READY] = "READY", -+ [DEV_ST_TRANSITION_SBL] = "SBL", -+ [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", -+ [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER", -+ [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR", -+ [DEV_ST_TRANSITION_DISABLE] = "DISABLE", -+}; -+ -+const char * const mhi_state_str[MHI_STATE_MAX] = { -+ [MHI_STATE_RESET] = "RESET", -+ [MHI_STATE_READY] = "READY", -+ [MHI_STATE_M0] = "M0", -+ [MHI_STATE_M1] = "M1", -+ [MHI_STATE_M2] = "M2", -+ [MHI_STATE_M3] = "M3", -+ [MHI_STATE_M3_FAST] = "M3 FAST", -+ [MHI_STATE_BHI] = "BHI", -+ [MHI_STATE_SYS_ERR] = "SYS ERROR", -+}; -+ -+const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { -+ [MHI_CH_STATE_TYPE_RESET] = "RESET", -+ [MHI_CH_STATE_TYPE_STOP] = "STOP", -+ [MHI_CH_STATE_TYPE_START] = "START", -+}; -+ -+static const char * const mhi_pm_state_str[] = { -+ [MHI_PM_STATE_DISABLE] = "DISABLE", -+ [MHI_PM_STATE_POR] = "POWER ON RESET", -+ [MHI_PM_STATE_M0] = "M0", -+ [MHI_PM_STATE_M2] = "M2", -+ [MHI_PM_STATE_M3_ENTER] = "M?->M3", -+ [MHI_PM_STATE_M3] = "M3", -+ [MHI_PM_STATE_M3_EXIT] = "M3->M0", -+ [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error", -+ [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect", -+ [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process", -+ [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", -+ [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", -+}; -+ -+const char *to_mhi_pm_state_str(u32 state) -+{ -+ int index; -+ -+ if (state) -+ index = __fls(state); -+ -+ if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) -+ return "Invalid State"; -+ -+ return mhi_pm_state_str[index]; -+} -+ -+static ssize_t serial_number_show(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct mhi_device *mhi_dev = to_mhi_device(dev); -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ -+ return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n", -+ mhi_cntrl->serial_number); -+} -+static DEVICE_ATTR_RO(serial_number); -+ -+static ssize_t oem_pk_hash_show(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct mhi_device *mhi_dev = to_mhi_device(dev); -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ int i, cnt = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) -+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, -+ "OEMPKHASH[%d]: 0x%x\n", i, -+ mhi_cntrl->oem_pk_hash[i]); -+ -+ return cnt; -+} -+static DEVICE_ATTR_RO(oem_pk_hash); -+ -+static struct attribute *mhi_dev_attrs[] = { -+ &dev_attr_serial_number.attr, -+ &dev_attr_oem_pk_hash.attr, -+ NULL, -+}; -+ATTRIBUTE_GROUPS(mhi_dev); -+ -+/* MHI protocol requires the transfer ring to be aligned with ring length */ -+static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, -+ struct mhi_ring *ring, -+ u64 len) -+{ -+ ring->alloc_size = len + (len - 1); -+ ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -+ &ring->dma_handle, GFP_KERNEL); -+ if (!ring->pre_aligned) -+ return -ENOMEM; -+ -+ ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); -+ ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); -+ -+ return 0; -+} -+ -+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) -+{ -+ int i; -+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event; -+ -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -+ if (mhi_event->offload_ev) -+ continue; -+ -+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); -+ } -+ -+ free_irq(mhi_cntrl->irq[0], mhi_cntrl); -+} -+ -+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) -+{ -+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; -+ int i, ret; -+ -+ /* if controller driver has set irq_flags, use it */ -+ if (mhi_cntrl->irq_flags) -+ irq_flags = mhi_cntrl->irq_flags; -+ -+ /* Setup BHI_INTVEC IRQ */ -+ ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, -+ mhi_intvec_threaded_handler, -+ irq_flags, -+ "bhi", mhi_cntrl); -+ if (ret) -+ return ret; -+ -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -+ if (mhi_event->offload_ev) -+ continue; -+ -+ if (mhi_event->irq >= mhi_cntrl->nr_irqs) { -+ dev_err(dev, "irq %d not available for event ring\n", -+ mhi_event->irq); -+ ret = -EINVAL; -+ goto error_request; -+ } -+ -+ ret = request_irq(mhi_cntrl->irq[mhi_event->irq], -+ mhi_irq_handler, -+ irq_flags, -+ "mhi", mhi_event); -+ if (ret) { -+ dev_err(dev, "Error requesting irq:%d for ev:%d\n", -+ mhi_cntrl->irq[mhi_event->irq], i); -+ goto error_request; -+ } -+ } -+ -+ return 0; -+ -+error_request: -+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) { -+ if (mhi_event->offload_ev) -+ continue; -+ -+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); -+ } -+ free_irq(mhi_cntrl->irq[0], mhi_cntrl); -+ -+ return ret; -+} -+ -+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) -+{ -+ int i; -+ struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; -+ struct mhi_cmd *mhi_cmd; -+ struct mhi_event *mhi_event; -+ struct mhi_ring *ring; -+ -+ mhi_cmd = mhi_cntrl->mhi_cmd; -+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { -+ ring = &mhi_cmd->ring; -+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -+ ring->pre_aligned, ring->dma_handle); -+ ring->base = NULL; -+ ring->iommu_base = 0; -+ } -+ -+ dma_free_coherent(mhi_cntrl->cntrl_dev, -+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, -+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); -+ -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -+ if (mhi_event->offload_ev) -+ continue; -+ -+ ring = &mhi_event->ring; -+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -+ ring->pre_aligned, ring->dma_handle); -+ ring->base = NULL; -+ ring->iommu_base = 0; -+ } -+ -+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * -+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, -+ mhi_ctxt->er_ctxt_addr); -+ -+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * -+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, -+ mhi_ctxt->chan_ctxt_addr); -+ -+ kfree(mhi_ctxt); -+ mhi_cntrl->mhi_ctxt = NULL; -+} -+ -+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) -+{ -+ struct mhi_ctxt *mhi_ctxt; -+ struct mhi_chan_ctxt *chan_ctxt; -+ struct mhi_event_ctxt *er_ctxt; -+ struct mhi_cmd_ctxt *cmd_ctxt; -+ struct mhi_chan *mhi_chan; -+ struct mhi_event *mhi_event; -+ struct mhi_cmd *mhi_cmd; -+ u32 tmp; -+ int ret = -ENOMEM, i; -+ -+ atomic_set(&mhi_cntrl->dev_wake, 0); -+ atomic_set(&mhi_cntrl->pending_pkts, 0); -+ -+ mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); -+ if (!mhi_ctxt) -+ return -ENOMEM; -+ -+ /* Setup channel ctxt */ -+ mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, -+ sizeof(*mhi_ctxt->chan_ctxt) * -+ mhi_cntrl->max_chan, -+ &mhi_ctxt->chan_ctxt_addr, -+ GFP_KERNEL); -+ if (!mhi_ctxt->chan_ctxt) -+ goto error_alloc_chan_ctxt; -+ -+ mhi_chan = mhi_cntrl->mhi_chan; -+ chan_ctxt = mhi_ctxt->chan_ctxt; -+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { -+ /* Skip if it is an offload channel */ -+ if (mhi_chan->offload_ch) -+ continue; -+ -+ tmp = le32_to_cpu(chan_ctxt->chcfg); -+ tmp &= ~CHAN_CTX_CHSTATE_MASK; -+ tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); -+ tmp &= ~CHAN_CTX_BRSTMODE_MASK; -+ tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT); -+ tmp &= ~CHAN_CTX_POLLCFG_MASK; -+ tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT); -+ chan_ctxt->chcfg = cpu_to_le32(tmp); -+ -+ chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); -+ chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); -+ -+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED; -+ mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; -+ } -+ -+ /* Setup event context */ -+ mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, -+ sizeof(*mhi_ctxt->er_ctxt) * -+ mhi_cntrl->total_ev_rings, -+ &mhi_ctxt->er_ctxt_addr, -+ GFP_KERNEL); -+ if (!mhi_ctxt->er_ctxt) -+ goto error_alloc_er_ctxt; -+ -+ er_ctxt = mhi_ctxt->er_ctxt; -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, -+ mhi_event++) { -+ struct mhi_ring *ring = &mhi_event->ring; -+ -+ /* Skip if it is an offload event */ -+ if (mhi_event->offload_ev) -+ continue; -+ -+ tmp = le32_to_cpu(er_ctxt->intmod); -+ tmp &= ~EV_CTX_INTMODC_MASK; -+ tmp &= ~EV_CTX_INTMODT_MASK; -+ tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT); -+ er_ctxt->intmod = cpu_to_le32(tmp); -+ -+ er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); -+ er_ctxt->msivec = cpu_to_le32(mhi_event->irq); -+ mhi_event->db_cfg.db_mode = true; -+ -+ ring->el_size = sizeof(struct mhi_tre); -+ ring->len = ring->el_size * ring->elements; -+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); -+ if (ret) -+ goto error_alloc_er; -+ -+ /* -+ * If the read pointer equals to the write pointer, then the -+ * ring is empty -+ */ -+ ring->rp = ring->wp = ring->base; -+ er_ctxt->rbase = cpu_to_le64(ring->iommu_base); -+ er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; -+ er_ctxt->rlen = cpu_to_le64(ring->len); -+ ring->ctxt_wp = &er_ctxt->wp; -+ } -+ -+ /* Setup cmd context */ -+ ret = -ENOMEM; -+ mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, -+ sizeof(*mhi_ctxt->cmd_ctxt) * -+ NR_OF_CMD_RINGS, -+ &mhi_ctxt->cmd_ctxt_addr, -+ GFP_KERNEL); -+ if (!mhi_ctxt->cmd_ctxt) -+ goto error_alloc_er; -+ -+ mhi_cmd = mhi_cntrl->mhi_cmd; -+ cmd_ctxt = mhi_ctxt->cmd_ctxt; -+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { -+ struct mhi_ring *ring = &mhi_cmd->ring; -+ -+ ring->el_size = sizeof(struct mhi_tre); -+ ring->elements = CMD_EL_PER_RING; -+ ring->len = ring->el_size * ring->elements; -+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); -+ if (ret) -+ goto error_alloc_cmd; -+ -+ ring->rp = ring->wp = ring->base; -+ cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); -+ cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; -+ cmd_ctxt->rlen = cpu_to_le64(ring->len); -+ ring->ctxt_wp = &cmd_ctxt->wp; -+ } -+ -+ mhi_cntrl->mhi_ctxt = mhi_ctxt; -+ -+ return 0; -+ -+error_alloc_cmd: -+ for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { -+ struct mhi_ring *ring = &mhi_cmd->ring; -+ -+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -+ ring->pre_aligned, ring->dma_handle); -+ } -+ dma_free_coherent(mhi_cntrl->cntrl_dev, -+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, -+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); -+ i = mhi_cntrl->total_ev_rings; -+ mhi_event = mhi_cntrl->mhi_event + i; -+ -+error_alloc_er: -+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) { -+ struct mhi_ring *ring = &mhi_event->ring; -+ -+ if (mhi_event->offload_ev) -+ continue; -+ -+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, -+ ring->pre_aligned, ring->dma_handle); -+ } -+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * -+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, -+ mhi_ctxt->er_ctxt_addr); -+ -+error_alloc_er_ctxt: -+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * -+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, -+ mhi_ctxt->chan_ctxt_addr); -+ -+error_alloc_chan_ctxt: -+ kfree(mhi_ctxt); -+ -+ return ret; -+} -+ -+int mhi_init_mmio(struct mhi_controller *mhi_cntrl) -+{ -+ u32 val; -+ int i, ret; -+ struct mhi_chan *mhi_chan; -+ struct mhi_event *mhi_event; -+ void __iomem *base = mhi_cntrl->regs; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ struct { -+ u32 offset; -+ u32 mask; -+ u32 shift; -+ u32 val; -+ } reg_info[] = { -+ { -+ CCABAP_HIGHER, U32_MAX, 0, -+ upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), -+ }, -+ { -+ CCABAP_LOWER, U32_MAX, 0, -+ lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), -+ }, -+ { -+ ECABAP_HIGHER, U32_MAX, 0, -+ upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), -+ }, -+ { -+ ECABAP_LOWER, U32_MAX, 0, -+ lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), -+ }, -+ { -+ CRCBAP_HIGHER, U32_MAX, 0, -+ upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), -+ }, -+ { -+ CRCBAP_LOWER, U32_MAX, 0, -+ lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), -+ }, -+ { -+ MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, -+ mhi_cntrl->total_ev_rings, -+ }, -+ { -+ MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, -+ mhi_cntrl->hw_ev_rings, -+ }, -+ { -+ MHICTRLBASE_HIGHER, U32_MAX, 0, -+ upper_32_bits(mhi_cntrl->iova_start), -+ }, -+ { -+ MHICTRLBASE_LOWER, U32_MAX, 0, -+ lower_32_bits(mhi_cntrl->iova_start), -+ }, -+ { -+ MHIDATABASE_HIGHER, U32_MAX, 0, -+ upper_32_bits(mhi_cntrl->iova_start), -+ }, -+ { -+ MHIDATABASE_LOWER, U32_MAX, 0, -+ lower_32_bits(mhi_cntrl->iova_start), -+ }, -+ { -+ MHICTRLLIMIT_HIGHER, U32_MAX, 0, -+ upper_32_bits(mhi_cntrl->iova_stop), -+ }, -+ { -+ MHICTRLLIMIT_LOWER, U32_MAX, 0, -+ lower_32_bits(mhi_cntrl->iova_stop), -+ }, -+ { -+ MHIDATALIMIT_HIGHER, U32_MAX, 0, -+ upper_32_bits(mhi_cntrl->iova_stop), -+ }, -+ { -+ MHIDATALIMIT_LOWER, U32_MAX, 0, -+ lower_32_bits(mhi_cntrl->iova_stop), -+ }, -+ { 0, 0, 0 } -+ }; -+ -+ dev_dbg(dev, "Initializing MHI registers\n"); -+ -+ /* Read channel db offset */ -+ ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, -+ CHDBOFF_CHDBOFF_SHIFT, &val); -+ if (ret) { -+ dev_err(dev, "Unable to read CHDBOFF register\n"); -+ return -EIO; -+ } -+ -+ if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) { -+ dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n", -+ val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)); -+ return -ERANGE; -+ } -+ -+ /* Setup wake db */ -+ mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); -+ mhi_cntrl->wake_set = false; -+ -+ /* Setup channel db address for each channel in tre_ring */ -+ mhi_chan = mhi_cntrl->mhi_chan; -+ for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) -+ mhi_chan->tre_ring.db_addr = base + val; -+ -+ /* Read event ring db offset */ -+ ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, -+ ERDBOFF_ERDBOFF_SHIFT, &val); -+ if (ret) { -+ dev_err(dev, "Unable to read ERDBOFF register\n"); -+ return -EIO; -+ } -+ -+ if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) { -+ dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n", -+ val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)); -+ return -ERANGE; -+ } -+ -+ /* Setup event db address for each ev_ring */ -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { -+ if (mhi_event->offload_ev) -+ continue; -+ -+ mhi_event->ring.db_addr = base + val; -+ } -+ -+ /* Setup DB register for primary CMD rings */ -+ mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; -+ -+ /* Write to MMIO registers */ -+ for (i = 0; reg_info[i].offset; i++) -+ mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, -+ reg_info[i].mask, reg_info[i].shift, -+ reg_info[i].val); -+ -+ return 0; -+} -+ -+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan) -+{ -+ struct mhi_ring *buf_ring; -+ struct mhi_ring *tre_ring; -+ struct mhi_chan_ctxt *chan_ctxt; -+ u32 tmp; -+ -+ buf_ring = &mhi_chan->buf_ring; -+ tre_ring = &mhi_chan->tre_ring; -+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; -+ -+ if (!chan_ctxt->rbase) /* Already uninitialized */ -+ return; -+ -+ dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, -+ tre_ring->pre_aligned, tre_ring->dma_handle); -+ vfree(buf_ring->base); -+ -+ buf_ring->base = tre_ring->base = NULL; -+ tre_ring->ctxt_wp = NULL; -+ chan_ctxt->rbase = 0; -+ chan_ctxt->rlen = 0; -+ chan_ctxt->rp = 0; -+ chan_ctxt->wp = 0; -+ -+ tmp = le32_to_cpu(chan_ctxt->chcfg); -+ tmp &= ~CHAN_CTX_CHSTATE_MASK; -+ tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); -+ chan_ctxt->chcfg = cpu_to_le32(tmp); -+ -+ /* Update to all cores */ -+ smp_wmb(); -+} -+ -+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan) -+{ -+ struct mhi_ring *buf_ring; -+ struct mhi_ring *tre_ring; -+ struct mhi_chan_ctxt *chan_ctxt; -+ u32 tmp; -+ int ret; -+ -+ buf_ring = &mhi_chan->buf_ring; -+ tre_ring = &mhi_chan->tre_ring; -+ tre_ring->el_size = sizeof(struct mhi_tre); -+ tre_ring->len = tre_ring->el_size * tre_ring->elements; -+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; -+ ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); -+ if (ret) -+ return -ENOMEM; -+ -+ buf_ring->el_size = sizeof(struct mhi_buf_info); -+ buf_ring->len = buf_ring->el_size * buf_ring->elements; -+ buf_ring->base = vzalloc(buf_ring->len); -+ -+ if (!buf_ring->base) { -+ dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, -+ tre_ring->pre_aligned, tre_ring->dma_handle); -+ return -ENOMEM; -+ } -+ -+ tmp = le32_to_cpu(chan_ctxt->chcfg); -+ tmp &= ~CHAN_CTX_CHSTATE_MASK; -+ tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT); -+ chan_ctxt->chcfg = cpu_to_le32(tmp); -+ -+ chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); -+ chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; -+ chan_ctxt->rlen = cpu_to_le64(tre_ring->len); -+ tre_ring->ctxt_wp = &chan_ctxt->wp; -+ -+ tre_ring->rp = tre_ring->wp = tre_ring->base; -+ buf_ring->rp = buf_ring->wp = buf_ring->base; -+ mhi_chan->db_cfg.db_mode = 1; -+ -+ /* Update to all cores */ -+ smp_wmb(); -+ -+ return 0; -+} -+ -+static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, -+ const struct mhi_controller_config *config) -+{ -+ struct mhi_event *mhi_event; -+ const struct mhi_event_config *event_cfg; -+ struct device *dev = mhi_cntrl->cntrl_dev; -+ int i, num; -+ -+ num = config->num_events; -+ mhi_cntrl->total_ev_rings = num; -+ mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), -+ GFP_KERNEL); -+ if (!mhi_cntrl->mhi_event) -+ return -ENOMEM; -+ -+ /* Populate event ring */ -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < num; i++) { -+ event_cfg = &config->event_cfg[i]; -+ -+ mhi_event->er_index = i; -+ mhi_event->ring.elements = event_cfg->num_elements; -+ mhi_event->intmod = event_cfg->irq_moderation_ms; -+ mhi_event->irq = event_cfg->irq; -+ -+ if (event_cfg->channel != U32_MAX) { -+ /* This event ring has a dedicated channel */ -+ mhi_event->chan = event_cfg->channel; -+ if (mhi_event->chan >= mhi_cntrl->max_chan) { -+ dev_err(dev, -+ "Event Ring channel not available\n"); -+ goto error_ev_cfg; -+ } -+ -+ mhi_event->mhi_chan = -+ &mhi_cntrl->mhi_chan[mhi_event->chan]; -+ } -+ -+ /* Priority is fixed to 1 for now */ -+ mhi_event->priority = 1; -+ -+ mhi_event->db_cfg.brstmode = event_cfg->mode; -+ if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) -+ goto error_ev_cfg; -+ -+ if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) -+ mhi_event->db_cfg.process_db = mhi_db_brstmode; -+ else -+ mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; -+ -+ mhi_event->data_type = event_cfg->data_type; -+ -+ switch (mhi_event->data_type) { -+ case MHI_ER_DATA: -+ mhi_event->process_event = mhi_process_data_event_ring; -+ break; -+ case MHI_ER_CTRL: -+ mhi_event->process_event = mhi_process_ctrl_ev_ring; -+ break; -+ default: -+ dev_err(dev, "Event Ring type not supported\n"); -+ goto error_ev_cfg; -+ } -+ -+ mhi_event->hw_ring = event_cfg->hardware_event; -+ if (mhi_event->hw_ring) -+ mhi_cntrl->hw_ev_rings++; -+ else -+ mhi_cntrl->sw_ev_rings++; -+ -+ mhi_event->cl_manage = event_cfg->client_managed; -+ mhi_event->offload_ev = event_cfg->offload_channel; -+ mhi_event++; -+ } -+ -+ return 0; -+ -+error_ev_cfg: -+ -+ kfree(mhi_cntrl->mhi_event); -+ return -EINVAL; -+} -+ -+static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, -+ const struct mhi_controller_config *config) -+{ -+ const struct mhi_channel_config *ch_cfg; -+ struct device *dev = mhi_cntrl->cntrl_dev; -+ int i; -+ u32 chan; -+ -+ mhi_cntrl->max_chan = config->max_channels; -+ -+ /* -+ * The allocation of MHI channels can exceed 32KB in some scenarios, -+ * so to avoid any memory possible allocation failures, vzalloc is -+ * used here -+ */ -+ mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * -+ sizeof(*mhi_cntrl->mhi_chan)); -+ if (!mhi_cntrl->mhi_chan) -+ return -ENOMEM; -+ -+ INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); -+ -+ /* Populate channel configurations */ -+ for (i = 0; i < config->num_channels; i++) { -+ struct mhi_chan *mhi_chan; -+ -+ ch_cfg = &config->ch_cfg[i]; -+ -+ chan = ch_cfg->num; -+ if (chan >= mhi_cntrl->max_chan) { -+ dev_err(dev, "Channel %d not available\n", chan); -+ goto error_chan_cfg; -+ } -+ -+ mhi_chan = &mhi_cntrl->mhi_chan[chan]; -+ mhi_chan->name = ch_cfg->name; -+ mhi_chan->chan = chan; -+ -+ mhi_chan->tre_ring.elements = ch_cfg->num_elements; -+ if (!mhi_chan->tre_ring.elements) -+ goto error_chan_cfg; -+ -+ /* -+ * For some channels, local ring length should be bigger than -+ * the transfer ring length due to internal logical channels -+ * in device. So host can queue much more buffers than transfer -+ * ring length. Example, RSC channels should have a larger local -+ * channel length than transfer ring length. -+ */ -+ mhi_chan->buf_ring.elements = ch_cfg->local_elements; -+ if (!mhi_chan->buf_ring.elements) -+ mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; -+ mhi_chan->er_index = ch_cfg->event_ring; -+ mhi_chan->dir = ch_cfg->dir; -+ -+ /* -+ * For most channels, chtype is identical to channel directions. -+ * So, if it is not defined then assign channel direction to -+ * chtype -+ */ -+ mhi_chan->type = ch_cfg->type; -+ if (!mhi_chan->type) -+ mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; -+ -+ mhi_chan->ee_mask = ch_cfg->ee_mask; -+ mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; -+ mhi_chan->lpm_notify = ch_cfg->lpm_notify; -+ mhi_chan->offload_ch = ch_cfg->offload_channel; -+ mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; -+ mhi_chan->pre_alloc = ch_cfg->auto_queue; -+ mhi_chan->wake_capable = ch_cfg->wake_capable; -+ -+ /* -+ * If MHI host allocates buffers, then the channel direction -+ * should be DMA_FROM_DEVICE -+ */ -+ if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { -+ dev_err(dev, "Invalid channel configuration\n"); -+ goto error_chan_cfg; -+ } -+ -+ /* -+ * Bi-directional and direction less channel must be an -+ * offload channel -+ */ -+ if ((mhi_chan->dir == DMA_BIDIRECTIONAL || -+ mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { -+ dev_err(dev, "Invalid channel configuration\n"); -+ goto error_chan_cfg; -+ } -+ -+ if (!mhi_chan->offload_ch) { -+ mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; -+ if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { -+ dev_err(dev, "Invalid Door bell mode\n"); -+ goto error_chan_cfg; -+ } -+ } -+ -+ if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) -+ mhi_chan->db_cfg.process_db = mhi_db_brstmode; -+ else -+ mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; -+ -+ mhi_chan->configured = true; -+ -+ if (mhi_chan->lpm_notify) -+ list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); -+ } -+ -+ return 0; -+ -+error_chan_cfg: -+ vfree(mhi_cntrl->mhi_chan); -+ -+ return -EINVAL; -+} -+ -+static int parse_config(struct mhi_controller *mhi_cntrl, -+ const struct mhi_controller_config *config) -+{ -+ int ret; -+ -+ /* Parse MHI channel configuration */ -+ ret = parse_ch_cfg(mhi_cntrl, config); -+ if (ret) -+ return ret; -+ -+ /* Parse MHI event configuration */ -+ ret = parse_ev_cfg(mhi_cntrl, config); -+ if (ret) -+ goto error_ev_cfg; -+ -+ mhi_cntrl->timeout_ms = config->timeout_ms; -+ if (!mhi_cntrl->timeout_ms) -+ mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; -+ -+ mhi_cntrl->bounce_buf = config->use_bounce_buf; -+ mhi_cntrl->buffer_len = config->buf_len; -+ if (!mhi_cntrl->buffer_len) -+ mhi_cntrl->buffer_len = MHI_MAX_MTU; -+ -+ /* By default, host is allowed to ring DB in both M0 and M2 states */ -+ mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; -+ if (config->m2_no_db) -+ mhi_cntrl->db_access &= ~MHI_PM_M2; -+ -+ return 0; -+ -+error_ev_cfg: -+ vfree(mhi_cntrl->mhi_chan); -+ -+ return ret; -+} -+ -+int mhi_register_controller(struct mhi_controller *mhi_cntrl, -+ const struct mhi_controller_config *config) -+{ -+ struct mhi_event *mhi_event; -+ struct mhi_chan *mhi_chan; -+ struct mhi_cmd *mhi_cmd; -+ struct mhi_device *mhi_dev; -+ u32 soc_info; -+ int ret, i; -+ -+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || -+ !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || -+ !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || -+ !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || -+ !mhi_cntrl->irq || !mhi_cntrl->reg_len) -+ return -EINVAL; -+ -+ ret = parse_config(mhi_cntrl, config); -+ if (ret) -+ return -EINVAL; -+ -+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, -+ sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); -+ if (!mhi_cntrl->mhi_cmd) { -+ ret = -ENOMEM; -+ goto err_free_event; -+ } -+ -+ INIT_LIST_HEAD(&mhi_cntrl->transition_list); -+ mutex_init(&mhi_cntrl->pm_mutex); -+ rwlock_init(&mhi_cntrl->pm_lock); -+ spin_lock_init(&mhi_cntrl->transition_lock); -+ spin_lock_init(&mhi_cntrl->wlock); -+ INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); -+ init_waitqueue_head(&mhi_cntrl->state_event); -+ -+ mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); -+ if (!mhi_cntrl->hiprio_wq) { -+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); -+ ret = -ENOMEM; -+ goto err_free_cmd; -+ } -+ -+ mhi_cmd = mhi_cntrl->mhi_cmd; -+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) -+ spin_lock_init(&mhi_cmd->lock); -+ -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -+ /* Skip for offload events */ -+ if (mhi_event->offload_ev) -+ continue; -+ -+ mhi_event->mhi_cntrl = mhi_cntrl; -+ spin_lock_init(&mhi_event->lock); -+ if (mhi_event->data_type == MHI_ER_CTRL) -+ tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, -+ (ulong)mhi_event); -+ else -+ tasklet_init(&mhi_event->task, mhi_ev_task, -+ (ulong)mhi_event); -+ } -+ -+ mhi_chan = mhi_cntrl->mhi_chan; -+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { -+ mutex_init(&mhi_chan->mutex); -+ init_completion(&mhi_chan->completion); -+ rwlock_init(&mhi_chan->lock); -+ -+ /* used in setting bei field of TRE */ -+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; -+ mhi_chan->intmod = mhi_event->intmod; -+ } -+ -+ if (mhi_cntrl->bounce_buf) { -+ mhi_cntrl->map_single = mhi_map_single_use_bb; -+ mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; -+ } else { -+ mhi_cntrl->map_single = mhi_map_single_no_bb; -+ mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; -+ } -+ -+ /* Read the MHI device info */ -+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, -+ SOC_HW_VERSION_OFFS, &soc_info); -+ if (ret) -+ goto err_destroy_wq; -+ -+ mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >> -+ SOC_HW_VERSION_FAM_NUM_SHFT; -+ mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >> -+ SOC_HW_VERSION_DEV_NUM_SHFT; -+ mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >> -+ SOC_HW_VERSION_MAJOR_VER_SHFT; -+ mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >> -+ SOC_HW_VERSION_MINOR_VER_SHFT; -+ -+ mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); -+ if (mhi_cntrl->index < 0) { -+ ret = mhi_cntrl->index; -+ goto err_destroy_wq; -+ } -+ -+ /* Register controller with MHI bus */ -+ mhi_dev = mhi_alloc_device(mhi_cntrl); -+ if (IS_ERR(mhi_dev)) { -+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); -+ ret = PTR_ERR(mhi_dev); -+ goto err_ida_free; -+ } -+ -+ mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; -+ mhi_dev->mhi_cntrl = mhi_cntrl; -+ dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); -+ mhi_dev->name = dev_name(&mhi_dev->dev); -+ -+ /* Init wakeup source */ -+ device_init_wakeup(&mhi_dev->dev, true); -+ -+ ret = device_add(&mhi_dev->dev); -+ if (ret) -+ goto err_release_dev; -+ -+ mhi_cntrl->mhi_dev = mhi_dev; -+ -+ mhi_create_debugfs(mhi_cntrl); -+ -+ return 0; -+ -+err_release_dev: -+ put_device(&mhi_dev->dev); -+err_ida_free: -+ ida_free(&mhi_controller_ida, mhi_cntrl->index); -+err_destroy_wq: -+ destroy_workqueue(mhi_cntrl->hiprio_wq); -+err_free_cmd: -+ kfree(mhi_cntrl->mhi_cmd); -+err_free_event: -+ kfree(mhi_cntrl->mhi_event); -+ vfree(mhi_cntrl->mhi_chan); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mhi_register_controller); -+ -+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) -+{ -+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; -+ struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; -+ unsigned int i; -+ -+ mhi_destroy_debugfs(mhi_cntrl); -+ -+ destroy_workqueue(mhi_cntrl->hiprio_wq); -+ kfree(mhi_cntrl->mhi_cmd); -+ kfree(mhi_cntrl->mhi_event); -+ -+ /* Drop the references to MHI devices created for channels */ -+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { -+ if (!mhi_chan->mhi_dev) -+ continue; -+ -+ put_device(&mhi_chan->mhi_dev->dev); -+ } -+ vfree(mhi_cntrl->mhi_chan); -+ -+ device_del(&mhi_dev->dev); -+ put_device(&mhi_dev->dev); -+ -+ ida_free(&mhi_controller_ida, mhi_cntrl->index); -+} -+EXPORT_SYMBOL_GPL(mhi_unregister_controller); -+ -+struct mhi_controller *mhi_alloc_controller(void) -+{ -+ struct mhi_controller *mhi_cntrl; -+ -+ mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL); -+ -+ return mhi_cntrl; -+} -+EXPORT_SYMBOL_GPL(mhi_alloc_controller); -+ -+void mhi_free_controller(struct mhi_controller *mhi_cntrl) -+{ -+ kfree(mhi_cntrl); -+} -+EXPORT_SYMBOL_GPL(mhi_free_controller); -+ -+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) -+{ -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ u32 bhi_off, bhie_off; -+ int ret; -+ -+ mutex_lock(&mhi_cntrl->pm_mutex); -+ -+ ret = mhi_init_dev_ctxt(mhi_cntrl); -+ if (ret) -+ goto error_dev_ctxt; -+ -+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); -+ if (ret) { -+ dev_err(dev, "Error getting BHI offset\n"); -+ goto error_reg_offset; -+ } -+ -+ if (bhi_off >= mhi_cntrl->reg_len) { -+ dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n", -+ bhi_off, mhi_cntrl->reg_len); -+ ret = -EINVAL; -+ goto error_reg_offset; -+ } -+ mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; -+ -+ if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { -+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, -+ &bhie_off); -+ if (ret) { -+ dev_err(dev, "Error getting BHIE offset\n"); -+ goto error_reg_offset; -+ } -+ -+ if (bhie_off >= mhi_cntrl->reg_len) { -+ dev_err(dev, -+ "BHIe offset: 0x%x is out of range: 0x%zx\n", -+ bhie_off, mhi_cntrl->reg_len); -+ ret = -EINVAL; -+ goto error_reg_offset; -+ } -+ mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; -+ } -+ -+ if (mhi_cntrl->rddm_size) { -+ /* -+ * This controller supports RDDM, so we need to manually clear -+ * BHIE RX registers since POR values are undefined. -+ */ -+ memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, -+ 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + -+ 4); -+ /* -+ * Allocate RDDM table for debugging purpose if specified -+ */ -+ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, -+ mhi_cntrl->rddm_size); -+ if (mhi_cntrl->rddm_image) -+ mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); -+ } -+ -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+ -+ return 0; -+ -+error_reg_offset: -+ mhi_deinit_dev_ctxt(mhi_cntrl); -+ -+error_dev_ctxt: -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); -+ -+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) -+{ -+ if (mhi_cntrl->fbc_image) { -+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); -+ mhi_cntrl->fbc_image = NULL; -+ } -+ -+ if (mhi_cntrl->rddm_image) { -+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); -+ mhi_cntrl->rddm_image = NULL; -+ } -+ -+ mhi_cntrl->bhi = NULL; -+ mhi_cntrl->bhie = NULL; -+ -+ mhi_deinit_dev_ctxt(mhi_cntrl); -+} -+EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); -+ -+static void mhi_release_device(struct device *dev) -+{ -+ struct mhi_device *mhi_dev = to_mhi_device(dev); -+ -+ /* -+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI -+ * devices for the channels will only get created if the mhi_dev -+ * associated with it is NULL. This scenario will happen during the -+ * controller suspend and resume. -+ */ -+ if (mhi_dev->ul_chan) -+ mhi_dev->ul_chan->mhi_dev = NULL; -+ -+ if (mhi_dev->dl_chan) -+ mhi_dev->dl_chan->mhi_dev = NULL; -+ -+ kfree(mhi_dev); -+} -+ -+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) -+{ -+ struct mhi_device *mhi_dev; -+ struct device *dev; -+ -+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); -+ if (!mhi_dev) -+ return ERR_PTR(-ENOMEM); -+ -+ dev = &mhi_dev->dev; -+ device_initialize(dev); -+ dev->bus = &mhi_bus_type; -+ dev->release = mhi_release_device; -+ -+ if (mhi_cntrl->mhi_dev) { -+ /* for MHI client devices, parent is the MHI controller device */ -+ dev->parent = &mhi_cntrl->mhi_dev->dev; -+ } else { -+ /* for MHI controller device, parent is the bus device (e.g. pci device) */ -+ dev->parent = mhi_cntrl->cntrl_dev; -+ } -+ -+ mhi_dev->mhi_cntrl = mhi_cntrl; -+ mhi_dev->dev_wake = 0; -+ -+ return mhi_dev; -+} -+ -+static int mhi_driver_probe(struct device *dev) -+{ -+ struct mhi_device *mhi_dev = to_mhi_device(dev); -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ struct device_driver *drv = dev->driver; -+ struct mhi_driver *mhi_drv = to_mhi_driver(drv); -+ struct mhi_event *mhi_event; -+ struct mhi_chan *ul_chan = mhi_dev->ul_chan; -+ struct mhi_chan *dl_chan = mhi_dev->dl_chan; -+ int ret; -+ -+ /* Bring device out of LPM */ -+ ret = mhi_device_get_sync(mhi_dev); -+ if (ret) -+ return ret; -+ -+ ret = -EINVAL; -+ -+ if (ul_chan) { -+ /* -+ * If channel supports LPM notifications then status_cb should -+ * be provided -+ */ -+ if (ul_chan->lpm_notify && !mhi_drv->status_cb) -+ goto exit_probe; -+ -+ /* For non-offload channels then xfer_cb should be provided */ -+ if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) -+ goto exit_probe; -+ -+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; -+ } -+ -+ ret = -EINVAL; -+ if (dl_chan) { -+ /* -+ * If channel supports LPM notifications then status_cb should -+ * be provided -+ */ -+ if (dl_chan->lpm_notify && !mhi_drv->status_cb) -+ goto exit_probe; -+ -+ /* For non-offload channels then xfer_cb should be provided */ -+ if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) -+ goto exit_probe; -+ -+ mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; -+ -+ /* -+ * If the channel event ring is managed by client, then -+ * status_cb must be provided so that the framework can -+ * notify pending data -+ */ -+ if (mhi_event->cl_manage && !mhi_drv->status_cb) -+ goto exit_probe; -+ -+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; -+ } -+ -+ /* Call the user provided probe function */ -+ ret = mhi_drv->probe(mhi_dev, mhi_dev->id); -+ if (ret) -+ goto exit_probe; -+ -+ mhi_device_put(mhi_dev); -+ -+ return ret; -+ -+exit_probe: -+ mhi_unprepare_from_transfer(mhi_dev); -+ -+ mhi_device_put(mhi_dev); -+ -+ return ret; -+} -+ -+static int mhi_driver_remove(struct device *dev) -+{ -+ struct mhi_device *mhi_dev = to_mhi_device(dev); -+ struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ struct mhi_chan *mhi_chan; -+ enum mhi_ch_state ch_state[] = { -+ MHI_CH_STATE_DISABLED, -+ MHI_CH_STATE_DISABLED -+ }; -+ int dir; -+ -+ /* Skip if it is a controller device */ -+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) -+ return 0; -+ -+ /* Reset both channels */ -+ for (dir = 0; dir < 2; dir++) { -+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; -+ -+ if (!mhi_chan) -+ continue; -+ -+ /* Wake all threads waiting for completion */ -+ write_lock_irq(&mhi_chan->lock); -+ mhi_chan->ccs = MHI_EV_CC_INVALID; -+ complete_all(&mhi_chan->completion); -+ write_unlock_irq(&mhi_chan->lock); -+ -+ /* Set the channel state to disabled */ -+ mutex_lock(&mhi_chan->mutex); -+ write_lock_irq(&mhi_chan->lock); -+ ch_state[dir] = mhi_chan->ch_state; -+ mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; -+ write_unlock_irq(&mhi_chan->lock); -+ -+ /* Reset the non-offload channel */ -+ if (!mhi_chan->offload_ch) -+ mhi_reset_chan(mhi_cntrl, mhi_chan); -+ -+ mutex_unlock(&mhi_chan->mutex); -+ } -+ -+ mhi_drv->remove(mhi_dev); -+ -+ /* De-init channel if it was enabled */ -+ for (dir = 0; dir < 2; dir++) { -+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; -+ -+ if (!mhi_chan) -+ continue; -+ -+ mutex_lock(&mhi_chan->mutex); -+ -+ if ((ch_state[dir] == MHI_CH_STATE_ENABLED || -+ ch_state[dir] == MHI_CH_STATE_STOP) && -+ !mhi_chan->offload_ch) -+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); -+ -+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED; -+ -+ mutex_unlock(&mhi_chan->mutex); -+ } -+ -+ while (mhi_dev->dev_wake) -+ mhi_device_put(mhi_dev); -+ -+ return 0; -+} -+ -+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) -+{ -+ struct device_driver *driver = &mhi_drv->driver; -+ -+ if (!mhi_drv->probe || !mhi_drv->remove) -+ return -EINVAL; -+ -+ driver->bus = &mhi_bus_type; -+ driver->owner = owner; -+ driver->probe = mhi_driver_probe; -+ driver->remove = mhi_driver_remove; -+ -+ return driver_register(driver); -+} -+EXPORT_SYMBOL_GPL(__mhi_driver_register); -+ -+void mhi_driver_unregister(struct mhi_driver *mhi_drv) -+{ -+ driver_unregister(&mhi_drv->driver); -+} -+EXPORT_SYMBOL_GPL(mhi_driver_unregister); -+ -+static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) -+{ -+ struct mhi_device *mhi_dev = to_mhi_device(dev); -+ -+ return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, -+ mhi_dev->name); -+} -+ -+static int mhi_match(struct device *dev, struct device_driver *drv) -+{ -+ struct mhi_device *mhi_dev = to_mhi_device(dev); -+ struct mhi_driver *mhi_drv = to_mhi_driver(drv); -+ const struct mhi_device_id *id; -+ -+ /* -+ * If the device is a controller type then there is no client driver -+ * associated with it -+ */ -+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) -+ return 0; -+ -+ for (id = mhi_drv->id_table; id->chan[0]; id++) -+ if (!strcmp(mhi_dev->name, id->chan)) { -+ mhi_dev->id = id; -+ return 1; -+ } -+ -+ return 0; -+}; -+ -+struct bus_type mhi_bus_type = { -+ .name = "mhi", -+ .dev_name = "mhi", -+ .match = mhi_match, -+ .uevent = mhi_uevent, -+ .dev_groups = mhi_dev_groups, -+}; -+ -+static int __init mhi_init(void) -+{ -+ mhi_debugfs_init(); -+ return bus_register(&mhi_bus_type); -+} -+ -+static void __exit mhi_exit(void) -+{ -+ mhi_debugfs_exit(); -+ bus_unregister(&mhi_bus_type); -+} -+ -+postcore_initcall(mhi_init); -+module_exit(mhi_exit); -+ -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("MHI Host Interface"); -diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h -new file mode 100644 -index 0000000000000..71f181402be98 ---- /dev/null -+++ b/drivers/bus/mhi/host/internal.h -@@ -0,0 +1,718 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -+ * -+ */ -+ -+#ifndef _MHI_INT_H -+#define _MHI_INT_H -+ -+#include -+ -+extern struct bus_type mhi_bus_type; -+ -+#define MHIREGLEN (0x0) -+#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) -+#define MHIREGLEN_MHIREGLEN_SHIFT (0) -+ -+#define MHIVER (0x8) -+#define MHIVER_MHIVER_MASK (0xFFFFFFFF) -+#define MHIVER_MHIVER_SHIFT (0) -+ -+#define MHICFG (0x10) -+#define MHICFG_NHWER_MASK (0xFF000000) -+#define MHICFG_NHWER_SHIFT (24) -+#define MHICFG_NER_MASK (0xFF0000) -+#define MHICFG_NER_SHIFT (16) -+#define MHICFG_NHWCH_MASK (0xFF00) -+#define MHICFG_NHWCH_SHIFT (8) -+#define MHICFG_NCH_MASK (0xFF) -+#define MHICFG_NCH_SHIFT (0) -+ -+#define CHDBOFF (0x18) -+#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) -+#define CHDBOFF_CHDBOFF_SHIFT (0) -+ -+#define ERDBOFF (0x20) -+#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) -+#define ERDBOFF_ERDBOFF_SHIFT (0) -+ -+#define BHIOFF (0x28) -+#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) -+#define BHIOFF_BHIOFF_SHIFT (0) -+ -+#define BHIEOFF (0x2C) -+#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) -+#define BHIEOFF_BHIEOFF_SHIFT (0) -+ -+#define DEBUGOFF (0x30) -+#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) -+#define DEBUGOFF_DEBUGOFF_SHIFT (0) -+ -+#define MHICTRL (0x38) -+#define MHICTRL_MHISTATE_MASK (0x0000FF00) -+#define MHICTRL_MHISTATE_SHIFT (8) -+#define MHICTRL_RESET_MASK (0x2) -+#define MHICTRL_RESET_SHIFT (1) -+ -+#define MHISTATUS (0x48) -+#define MHISTATUS_MHISTATE_MASK (0x0000FF00) -+#define MHISTATUS_MHISTATE_SHIFT (8) -+#define MHISTATUS_SYSERR_MASK (0x4) -+#define MHISTATUS_SYSERR_SHIFT (2) -+#define MHISTATUS_READY_MASK (0x1) -+#define MHISTATUS_READY_SHIFT (0) -+ -+#define CCABAP_LOWER (0x58) -+#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) -+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) -+ -+#define CCABAP_HIGHER (0x5C) -+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) -+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) -+ -+#define ECABAP_LOWER (0x60) -+#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) -+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) -+ -+#define ECABAP_HIGHER (0x64) -+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) -+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) -+ -+#define CRCBAP_LOWER (0x68) -+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) -+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) -+ -+#define CRCBAP_HIGHER (0x6C) -+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) -+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) -+ -+#define CRDB_LOWER (0x70) -+#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) -+#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) -+ -+#define CRDB_HIGHER (0x74) -+#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) -+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) -+ -+#define MHICTRLBASE_LOWER (0x80) -+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) -+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) -+ -+#define MHICTRLBASE_HIGHER (0x84) -+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) -+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) -+ -+#define MHICTRLLIMIT_LOWER (0x88) -+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) -+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) -+ -+#define MHICTRLLIMIT_HIGHER (0x8C) -+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) -+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) -+ -+#define MHIDATABASE_LOWER (0x98) -+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) -+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) -+ -+#define MHIDATABASE_HIGHER (0x9C) -+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) -+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) -+ -+#define MHIDATALIMIT_LOWER (0xA0) -+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) -+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) -+ -+#define MHIDATALIMIT_HIGHER (0xA4) -+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) -+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) -+ -+/* Host request register */ -+#define MHI_SOC_RESET_REQ_OFFSET (0xB0) -+#define MHI_SOC_RESET_REQ BIT(0) -+ -+/* MHI BHI offfsets */ -+#define BHI_BHIVERSION_MINOR (0x00) -+#define BHI_BHIVERSION_MAJOR (0x04) -+#define BHI_IMGADDR_LOW (0x08) -+#define BHI_IMGADDR_HIGH (0x0C) -+#define BHI_IMGSIZE (0x10) -+#define BHI_RSVD1 (0x14) -+#define BHI_IMGTXDB (0x18) -+#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) -+#define BHI_TXDB_SEQNUM_SHFT (0) -+#define BHI_RSVD2 (0x1C) -+#define BHI_INTVEC (0x20) -+#define BHI_RSVD3 (0x24) -+#define BHI_EXECENV (0x28) -+#define BHI_STATUS (0x2C) -+#define BHI_ERRCODE (0x30) -+#define BHI_ERRDBG1 (0x34) -+#define BHI_ERRDBG2 (0x38) -+#define BHI_ERRDBG3 (0x3C) -+#define BHI_SERIALNU (0x40) -+#define BHI_SBLANTIROLLVER (0x44) -+#define BHI_NUMSEG (0x48) -+#define BHI_MSMHWID(n) (0x4C + (0x4 * (n))) -+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n))) -+#define BHI_RSVD5 (0xC4) -+#define BHI_STATUS_MASK (0xC0000000) -+#define BHI_STATUS_SHIFT (30) -+#define BHI_STATUS_ERROR (3) -+#define BHI_STATUS_SUCCESS (2) -+#define BHI_STATUS_RESET (0) -+ -+/* MHI BHIE offsets */ -+#define BHIE_MSMSOCID_OFFS (0x0000) -+#define BHIE_TXVECADDR_LOW_OFFS (0x002C) -+#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) -+#define BHIE_TXVECSIZE_OFFS (0x0034) -+#define BHIE_TXVECDB_OFFS (0x003C) -+#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) -+#define BHIE_TXVECDB_SEQNUM_SHFT (0) -+#define BHIE_TXVECSTATUS_OFFS (0x0044) -+#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) -+#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) -+#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) -+#define BHIE_TXVECSTATUS_STATUS_SHFT (30) -+#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) -+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) -+#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) -+#define BHIE_RXVECADDR_LOW_OFFS (0x0060) -+#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) -+#define BHIE_RXVECSIZE_OFFS (0x0068) -+#define BHIE_RXVECDB_OFFS (0x0070) -+#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) -+#define BHIE_RXVECDB_SEQNUM_SHFT (0) -+#define BHIE_RXVECSTATUS_OFFS (0x0078) -+#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) -+#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) -+#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) -+#define BHIE_RXVECSTATUS_STATUS_SHFT (30) -+#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) -+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) -+#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) -+ -+#define SOC_HW_VERSION_OFFS (0x224) -+#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000) -+#define SOC_HW_VERSION_FAM_NUM_SHFT (28) -+#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000) -+#define SOC_HW_VERSION_DEV_NUM_SHFT (16) -+#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00) -+#define SOC_HW_VERSION_MAJOR_VER_SHFT (8) -+#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF) -+#define SOC_HW_VERSION_MINOR_VER_SHFT (0) -+ -+#define EV_CTX_RESERVED_MASK GENMASK(7, 0) -+#define EV_CTX_INTMODC_MASK GENMASK(15, 8) -+#define EV_CTX_INTMODC_SHIFT 8 -+#define EV_CTX_INTMODT_MASK GENMASK(31, 16) -+#define EV_CTX_INTMODT_SHIFT 16 -+struct mhi_event_ctxt { -+ __le32 intmod; -+ __le32 ertype; -+ __le32 msivec; -+ -+ __le64 rbase __packed __aligned(4); -+ __le64 rlen __packed __aligned(4); -+ __le64 rp __packed __aligned(4); -+ __le64 wp __packed __aligned(4); -+}; -+ -+#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) -+#define CHAN_CTX_CHSTATE_SHIFT 0 -+#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) -+#define CHAN_CTX_BRSTMODE_SHIFT 8 -+#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) -+#define CHAN_CTX_POLLCFG_SHIFT 10 -+#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) -+struct mhi_chan_ctxt { -+ __le32 chcfg; -+ __le32 chtype; -+ __le32 erindex; -+ -+ __le64 rbase __packed __aligned(4); -+ __le64 rlen __packed __aligned(4); -+ __le64 rp __packed __aligned(4); -+ __le64 wp __packed __aligned(4); -+}; -+ -+struct mhi_cmd_ctxt { -+ __le32 reserved0; -+ __le32 reserved1; -+ __le32 reserved2; -+ -+ __le64 rbase __packed __aligned(4); -+ __le64 rlen __packed __aligned(4); -+ __le64 rp __packed __aligned(4); -+ __le64 wp __packed __aligned(4); -+}; -+ -+struct mhi_ctxt { -+ struct mhi_event_ctxt *er_ctxt; -+ struct mhi_chan_ctxt *chan_ctxt; -+ struct mhi_cmd_ctxt *cmd_ctxt; -+ dma_addr_t er_ctxt_addr; -+ dma_addr_t chan_ctxt_addr; -+ dma_addr_t cmd_ctxt_addr; -+}; -+ -+struct mhi_tre { -+ __le64 ptr; -+ __le32 dword[2]; -+}; -+ -+struct bhi_vec_entry { -+ u64 dma_addr; -+ u64 size; -+}; -+ -+enum mhi_cmd_type { -+ MHI_CMD_NOP = 1, -+ MHI_CMD_RESET_CHAN = 16, -+ MHI_CMD_STOP_CHAN = 17, -+ MHI_CMD_START_CHAN = 18, -+}; -+ -+/* No operation command */ -+#define MHI_TRE_CMD_NOOP_PTR (0) -+#define MHI_TRE_CMD_NOOP_DWORD0 (0) -+#define MHI_TRE_CMD_NOOP_DWORD1 (cpu_to_le32(MHI_CMD_NOP << 16)) -+ -+/* Channel reset command */ -+#define MHI_TRE_CMD_RESET_PTR (0) -+#define MHI_TRE_CMD_RESET_DWORD0 (0) -+#define MHI_TRE_CMD_RESET_DWORD1(chid) (cpu_to_le32((chid << 24) | \ -+ (MHI_CMD_RESET_CHAN << 16))) -+ -+/* Channel stop command */ -+#define MHI_TRE_CMD_STOP_PTR (0) -+#define MHI_TRE_CMD_STOP_DWORD0 (0) -+#define MHI_TRE_CMD_STOP_DWORD1(chid) (cpu_to_le32((chid << 24) | \ -+ (MHI_CMD_STOP_CHAN << 16))) -+ -+/* Channel start command */ -+#define MHI_TRE_CMD_START_PTR (0) -+#define MHI_TRE_CMD_START_DWORD0 (0) -+#define MHI_TRE_CMD_START_DWORD1(chid) (cpu_to_le32((chid << 24) | \ -+ (MHI_CMD_START_CHAN << 16))) -+ -+#define MHI_TRE_GET_DWORD(tre, word) (le32_to_cpu((tre)->dword[(word)])) -+#define MHI_TRE_GET_CMD_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) -+#define MHI_TRE_GET_CMD_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) -+ -+/* Event descriptor macros */ -+#define MHI_TRE_EV_PTR(ptr) (cpu_to_le64(ptr)) -+#define MHI_TRE_EV_DWORD0(code, len) (cpu_to_le32((code << 24) | len)) -+#define MHI_TRE_EV_DWORD1(chid, type) (cpu_to_le32((chid << 24) | (type << 16))) -+#define MHI_TRE_GET_EV_PTR(tre) (le64_to_cpu((tre)->ptr)) -+#define MHI_TRE_GET_EV_CODE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) -+#define MHI_TRE_GET_EV_LEN(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFFFF) -+#define MHI_TRE_GET_EV_CHID(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) -+#define MHI_TRE_GET_EV_TYPE(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 16) & 0xFF) -+#define MHI_TRE_GET_EV_STATE(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) -+#define MHI_TRE_GET_EV_EXECENV(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 24) & 0xFF) -+#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0) -+#define MHI_TRE_GET_EV_TIME(tre) (MHI_TRE_GET_EV_PTR(tre)) -+#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre)) -+#define MHI_TRE_GET_EV_VEID(tre) ((MHI_TRE_GET_DWORD(tre, 0) >> 16) & 0xFF) -+#define MHI_TRE_GET_EV_LINKSPEED(tre) ((MHI_TRE_GET_DWORD(tre, 1) >> 24) & 0xFF) -+#define MHI_TRE_GET_EV_LINKWIDTH(tre) (MHI_TRE_GET_DWORD(tre, 0) & 0xFF) -+ -+/* Transfer descriptor macros */ -+#define MHI_TRE_DATA_PTR(ptr) (cpu_to_le64(ptr)) -+#define MHI_TRE_DATA_DWORD0(len) (cpu_to_le32(len & MHI_MAX_MTU)) -+#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) (cpu_to_le32((2 << 16) | (bei << 10) \ -+ | (ieot << 9) | (ieob << 8) | chain)) -+ -+/* RSC transfer descriptor macros */ -+#define MHI_RSCTRE_DATA_PTR(ptr, len) (cpu_to_le64(((u64)len << 48) | ptr)) -+#define MHI_RSCTRE_DATA_DWORD0(cookie) (cpu_to_le32(cookie)) -+#define MHI_RSCTRE_DATA_DWORD1 (cpu_to_le32(MHI_PKT_TYPE_COALESCING << 16)) -+ -+enum mhi_pkt_type { -+ MHI_PKT_TYPE_INVALID = 0x0, -+ MHI_PKT_TYPE_NOOP_CMD = 0x1, -+ MHI_PKT_TYPE_TRANSFER = 0x2, -+ MHI_PKT_TYPE_COALESCING = 0x8, -+ MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, -+ MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, -+ MHI_PKT_TYPE_START_CHAN_CMD = 0x12, -+ MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, -+ MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, -+ MHI_PKT_TYPE_TX_EVENT = 0x22, -+ MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, -+ MHI_PKT_TYPE_EE_EVENT = 0x40, -+ MHI_PKT_TYPE_TSYNC_EVENT = 0x48, -+ MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, -+ MHI_PKT_TYPE_STALE_EVENT, /* internal event */ -+}; -+ -+/* MHI transfer completion events */ -+enum mhi_ev_ccs { -+ MHI_EV_CC_INVALID = 0x0, -+ MHI_EV_CC_SUCCESS = 0x1, -+ MHI_EV_CC_EOT = 0x2, /* End of transfer event */ -+ MHI_EV_CC_OVERFLOW = 0x3, -+ MHI_EV_CC_EOB = 0x4, /* End of block event */ -+ MHI_EV_CC_OOB = 0x5, /* Out of block event */ -+ MHI_EV_CC_DB_MODE = 0x6, -+ MHI_EV_CC_UNDEFINED_ERR = 0x10, -+ MHI_EV_CC_BAD_TRE = 0x11, -+}; -+ -+enum mhi_ch_state { -+ MHI_CH_STATE_DISABLED = 0x0, -+ MHI_CH_STATE_ENABLED = 0x1, -+ MHI_CH_STATE_RUNNING = 0x2, -+ MHI_CH_STATE_SUSPENDED = 0x3, -+ MHI_CH_STATE_STOP = 0x4, -+ MHI_CH_STATE_ERROR = 0x5, -+}; -+ -+enum mhi_ch_state_type { -+ MHI_CH_STATE_TYPE_RESET, -+ MHI_CH_STATE_TYPE_STOP, -+ MHI_CH_STATE_TYPE_START, -+ MHI_CH_STATE_TYPE_MAX, -+}; -+ -+extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; -+#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \ -+ "INVALID_STATE" : \ -+ mhi_ch_state_type_str[(state)]) -+ -+#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ -+ mode != MHI_DB_BRST_ENABLE) -+ -+extern const char * const mhi_ee_str[MHI_EE_MAX]; -+#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ -+ "INVALID_EE" : mhi_ee_str[ee]) -+ -+#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ -+ ee == MHI_EE_EDL) -+ -+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \ -+ ee == MHI_EE_FP) -+ -+enum dev_st_transition { -+ DEV_ST_TRANSITION_PBL, -+ DEV_ST_TRANSITION_READY, -+ DEV_ST_TRANSITION_SBL, -+ DEV_ST_TRANSITION_MISSION_MODE, -+ DEV_ST_TRANSITION_FP, -+ DEV_ST_TRANSITION_SYS_ERR, -+ DEV_ST_TRANSITION_DISABLE, -+ DEV_ST_TRANSITION_MAX, -+}; -+ -+extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; -+#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ -+ "INVALID_STATE" : dev_state_tran_str[state]) -+ -+extern const char * const mhi_state_str[MHI_STATE_MAX]; -+#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ -+ !mhi_state_str[state]) ? \ -+ "INVALID_STATE" : mhi_state_str[state]) -+ -+/* internal power states */ -+enum mhi_pm_state { -+ MHI_PM_STATE_DISABLE, -+ MHI_PM_STATE_POR, -+ MHI_PM_STATE_M0, -+ MHI_PM_STATE_M2, -+ MHI_PM_STATE_M3_ENTER, -+ MHI_PM_STATE_M3, -+ MHI_PM_STATE_M3_EXIT, -+ MHI_PM_STATE_FW_DL_ERR, -+ MHI_PM_STATE_SYS_ERR_DETECT, -+ MHI_PM_STATE_SYS_ERR_PROCESS, -+ MHI_PM_STATE_SHUTDOWN_PROCESS, -+ MHI_PM_STATE_LD_ERR_FATAL_DETECT, -+ MHI_PM_STATE_MAX -+}; -+ -+#define MHI_PM_DISABLE BIT(0) -+#define MHI_PM_POR BIT(1) -+#define MHI_PM_M0 BIT(2) -+#define MHI_PM_M2 BIT(3) -+#define MHI_PM_M3_ENTER BIT(4) -+#define MHI_PM_M3 BIT(5) -+#define MHI_PM_M3_EXIT BIT(6) -+/* firmware download failure state */ -+#define MHI_PM_FW_DL_ERR BIT(7) -+#define MHI_PM_SYS_ERR_DETECT BIT(8) -+#define MHI_PM_SYS_ERR_PROCESS BIT(9) -+#define MHI_PM_SHUTDOWN_PROCESS BIT(10) -+/* link not accessible */ -+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) -+ -+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ -+ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ -+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ -+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) -+#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) -+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) -+#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \ -+ mhi_cntrl->db_access) -+#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ -+ MHI_PM_M2 | MHI_PM_M3_EXIT)) -+#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) -+#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) -+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ -+ MHI_PM_IN_ERROR_STATE(pm_state)) -+#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ -+ (MHI_PM_M3_ENTER | MHI_PM_M3)) -+ -+#define NR_OF_CMD_RINGS 1 -+#define CMD_EL_PER_RING 128 -+#define PRIMARY_CMD_RING 0 -+#define MHI_DEV_WAKE_DB 127 -+#define MHI_MAX_MTU 0xffff -+#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) -+ -+enum mhi_er_type { -+ MHI_ER_TYPE_INVALID = 0x0, -+ MHI_ER_TYPE_VALID = 0x1, -+}; -+ -+struct db_cfg { -+ bool reset_req; -+ bool db_mode; -+ u32 pollcfg; -+ enum mhi_db_brst_mode brstmode; -+ dma_addr_t db_val; -+ void (*process_db)(struct mhi_controller *mhi_cntrl, -+ struct db_cfg *db_cfg, void __iomem *io_addr, -+ dma_addr_t db_val); -+}; -+ -+struct mhi_pm_transitions { -+ enum mhi_pm_state from_state; -+ u32 to_states; -+}; -+ -+struct state_transition { -+ struct list_head node; -+ enum dev_st_transition state; -+}; -+ -+struct mhi_ring { -+ dma_addr_t dma_handle; -+ dma_addr_t iommu_base; -+ __le64 *ctxt_wp; /* point to ctxt wp */ -+ void *pre_aligned; -+ void *base; -+ void *rp; -+ void *wp; -+ size_t el_size; -+ size_t len; -+ size_t elements; -+ size_t alloc_size; -+ void __iomem *db_addr; -+}; -+ -+struct mhi_cmd { -+ struct mhi_ring ring; -+ spinlock_t lock; -+}; -+ -+struct mhi_buf_info { -+ void *v_addr; -+ void *bb_addr; -+ void *wp; -+ void *cb_buf; -+ dma_addr_t p_addr; -+ size_t len; -+ enum dma_data_direction dir; -+ bool used; /* Indicates whether the buffer is used or not */ -+ bool pre_mapped; /* Already pre-mapped by client */ -+}; -+ -+struct mhi_event { -+ struct mhi_controller *mhi_cntrl; -+ struct mhi_chan *mhi_chan; /* dedicated to channel */ -+ u32 er_index; -+ u32 intmod; -+ u32 irq; -+ int chan; /* this event ring is dedicated to a channel (optional) */ -+ u32 priority; -+ enum mhi_er_data_type data_type; -+ struct mhi_ring ring; -+ struct db_cfg db_cfg; -+ struct tasklet_struct task; -+ spinlock_t lock; -+ int (*process_event)(struct mhi_controller *mhi_cntrl, -+ struct mhi_event *mhi_event, -+ u32 event_quota); -+ bool hw_ring; -+ bool cl_manage; -+ bool offload_ev; /* managed by a device driver */ -+}; -+ -+struct mhi_chan { -+ const char *name; -+ /* -+ * Important: When consuming, increment tre_ring first and when -+ * releasing, decrement buf_ring first. If tre_ring has space, buf_ring -+ * is guranteed to have space so we do not need to check both rings. -+ */ -+ struct mhi_ring buf_ring; -+ struct mhi_ring tre_ring; -+ u32 chan; -+ u32 er_index; -+ u32 intmod; -+ enum mhi_ch_type type; -+ enum dma_data_direction dir; -+ struct db_cfg db_cfg; -+ enum mhi_ch_ee_mask ee_mask; -+ enum mhi_ch_state ch_state; -+ enum mhi_ev_ccs ccs; -+ struct mhi_device *mhi_dev; -+ void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); -+ struct mutex mutex; -+ struct completion completion; -+ rwlock_t lock; -+ struct list_head node; -+ bool lpm_notify; -+ bool configured; -+ bool offload_ch; -+ bool pre_alloc; -+ bool wake_capable; -+}; -+ -+/* Default MHI timeout */ -+#define MHI_TIMEOUT_MS (1000) -+ -+/* debugfs related functions */ -+#ifdef CONFIG_MHI_BUS_DEBUG -+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl); -+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl); -+void mhi_debugfs_init(void); -+void mhi_debugfs_exit(void); -+#else -+static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl) -+{ -+} -+ -+static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl) -+{ -+} -+ -+static inline void mhi_debugfs_init(void) -+{ -+} -+ -+static inline void mhi_debugfs_exit(void) -+{ -+} -+#endif -+ -+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); -+ -+int mhi_destroy_device(struct device *dev, void *data); -+void mhi_create_devices(struct mhi_controller *mhi_cntrl); -+ -+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, -+ struct image_info **image_info, size_t alloc_size); -+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, -+ struct image_info *image_info); -+ -+/* Power management APIs */ -+enum mhi_pm_state __must_check mhi_tryset_pm_state( -+ struct mhi_controller *mhi_cntrl, -+ enum mhi_pm_state state); -+const char *to_mhi_pm_state_str(u32 state); -+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, -+ enum dev_st_transition state); -+void mhi_pm_st_worker(struct work_struct *work); -+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); -+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); -+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); -+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); -+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); -+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); -+int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, -+ enum mhi_cmd_type cmd); -+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl); -+static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl) -+{ -+ return (mhi_cntrl->dev_state >= MHI_STATE_M0 && -+ mhi_cntrl->dev_state <= MHI_STATE_M3_FAST); -+} -+ -+static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl) -+{ -+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); -+ mhi_cntrl->runtime_get(mhi_cntrl); -+ mhi_cntrl->runtime_put(mhi_cntrl); -+} -+ -+/* Register access methods */ -+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, -+ void __iomem *db_addr, dma_addr_t db_val); -+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, -+ struct db_cfg *db_mode, void __iomem *db_addr, -+ dma_addr_t db_val); -+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, -+ void __iomem *base, u32 offset, u32 *out); -+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, -+ void __iomem *base, u32 offset, u32 mask, -+ u32 shift, u32 *out); -+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, -+ void __iomem *base, u32 offset, u32 mask, -+ u32 shift, u32 val, u32 delayus); -+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, -+ u32 offset, u32 val); -+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, -+ u32 offset, u32 mask, u32 shift, u32 val); -+void mhi_ring_er_db(struct mhi_event *mhi_event); -+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, -+ dma_addr_t db_val); -+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); -+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan); -+ -+/* Initialization methods */ -+int mhi_init_mmio(struct mhi_controller *mhi_cntrl); -+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); -+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); -+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); -+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); -+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, -+ struct image_info *img_info); -+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); -+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan); -+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan); -+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan); -+void mhi_reset_chan(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan); -+ -+/* Event processing methods */ -+void mhi_ctrl_ev_task(unsigned long data); -+void mhi_ev_task(unsigned long data); -+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, -+ struct mhi_event *mhi_event, u32 event_quota); -+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, -+ struct mhi_event *mhi_event, u32 event_quota); -+ -+/* ISR handlers */ -+irqreturn_t mhi_irq_handler(int irq_number, void *dev); -+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev); -+irqreturn_t mhi_intvec_handler(int irq_number, void *dev); -+ -+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, -+ struct mhi_buf_info *info, enum mhi_flags flags); -+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, -+ struct mhi_buf_info *buf_info); -+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, -+ struct mhi_buf_info *buf_info); -+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, -+ struct mhi_buf_info *buf_info); -+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, -+ struct mhi_buf_info *buf_info); -+ -+#endif /* _MHI_INT_H */ -diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c -new file mode 100644 -index 0000000000000..6b36689999427 ---- /dev/null -+++ b/drivers/bus/mhi/host/main.c -@@ -0,0 +1,1673 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "internal.h" -+ -+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, -+ void __iomem *base, u32 offset, u32 *out) -+{ -+ return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); -+} -+ -+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, -+ void __iomem *base, u32 offset, -+ u32 mask, u32 shift, u32 *out) -+{ -+ u32 tmp; -+ int ret; -+ -+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); -+ if (ret) -+ return ret; -+ -+ *out = (tmp & mask) >> shift; -+ -+ return 0; -+} -+ -+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, -+ void __iomem *base, u32 offset, -+ u32 mask, u32 shift, u32 val, u32 delayus) -+{ -+ int ret; -+ u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; -+ -+ while (retry--) { -+ ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, shift, -+ &out); -+ if (ret) -+ return ret; -+ -+ if (out == val) -+ return 0; -+ -+ fsleep(delayus); -+ } -+ -+ return -ETIMEDOUT; -+} -+ -+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, -+ u32 offset, u32 val) -+{ -+ mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); -+} -+ -+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, -+ u32 offset, u32 mask, u32 shift, u32 val) -+{ -+ int ret; -+ u32 tmp; -+ -+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); -+ if (ret) -+ return; -+ -+ tmp &= ~mask; -+ tmp |= (val << shift); -+ mhi_write_reg(mhi_cntrl, base, offset, tmp); -+} -+ -+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, -+ dma_addr_t db_val) -+{ -+ mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); -+ mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); -+} -+ -+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, -+ struct db_cfg *db_cfg, -+ void __iomem *db_addr, -+ dma_addr_t db_val) -+{ -+ if (db_cfg->db_mode) { -+ db_cfg->db_val = db_val; -+ mhi_write_db(mhi_cntrl, db_addr, db_val); -+ db_cfg->db_mode = 0; -+ } -+} -+ -+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, -+ struct db_cfg *db_cfg, -+ void __iomem *db_addr, -+ dma_addr_t db_val) -+{ -+ db_cfg->db_val = db_val; -+ mhi_write_db(mhi_cntrl, db_addr, db_val); -+} -+ -+void mhi_ring_er_db(struct mhi_event *mhi_event) -+{ -+ struct mhi_ring *ring = &mhi_event->ring; -+ -+ mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, -+ ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); -+} -+ -+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) -+{ -+ dma_addr_t db; -+ struct mhi_ring *ring = &mhi_cmd->ring; -+ -+ db = ring->iommu_base + (ring->wp - ring->base); -+ *ring->ctxt_wp = cpu_to_le64(db); -+ mhi_write_db(mhi_cntrl, ring->db_addr, db); -+} -+ -+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan) -+{ -+ struct mhi_ring *ring = &mhi_chan->tre_ring; -+ dma_addr_t db; -+ -+ db = ring->iommu_base + (ring->wp - ring->base); -+ -+ /* -+ * Writes to the new ring element must be visible to the hardware -+ * before letting h/w know there is new element to fetch. -+ */ -+ dma_wmb(); -+ *ring->ctxt_wp = cpu_to_le64(db); -+ -+ mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, -+ ring->db_addr, db); -+} -+ -+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) -+{ -+ u32 exec; -+ int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); -+ -+ return (ret) ? MHI_EE_MAX : exec; -+} -+EXPORT_SYMBOL_GPL(mhi_get_exec_env); -+ -+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) -+{ -+ u32 state; -+ int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, -+ MHISTATUS_MHISTATE_MASK, -+ MHISTATUS_MHISTATE_SHIFT, &state); -+ return ret ? MHI_STATE_MAX : state; -+} -+EXPORT_SYMBOL_GPL(mhi_get_mhi_state); -+ -+void mhi_soc_reset(struct mhi_controller *mhi_cntrl) -+{ -+ if (mhi_cntrl->reset) { -+ mhi_cntrl->reset(mhi_cntrl); -+ return; -+ } -+ -+ /* Generic MHI SoC reset */ -+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, -+ MHI_SOC_RESET_REQ); -+} -+EXPORT_SYMBOL_GPL(mhi_soc_reset); -+ -+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, -+ struct mhi_buf_info *buf_info) -+{ -+ buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, -+ buf_info->v_addr, buf_info->len, -+ buf_info->dir); -+ if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, -+ struct mhi_buf_info *buf_info) -+{ -+ void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, -+ &buf_info->p_addr, GFP_ATOMIC); -+ -+ if (!buf) -+ return -ENOMEM; -+ -+ if (buf_info->dir == DMA_TO_DEVICE) -+ memcpy(buf, buf_info->v_addr, buf_info->len); -+ -+ buf_info->bb_addr = buf; -+ -+ return 0; -+} -+ -+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, -+ struct mhi_buf_info *buf_info) -+{ -+ dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, -+ buf_info->dir); -+} -+ -+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, -+ struct mhi_buf_info *buf_info) -+{ -+ if (buf_info->dir == DMA_FROM_DEVICE) -+ memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); -+ -+ dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, -+ buf_info->bb_addr, buf_info->p_addr); -+} -+ -+static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, -+ struct mhi_ring *ring) -+{ -+ int nr_el; -+ -+ if (ring->wp < ring->rp) { -+ nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; -+ } else { -+ nr_el = (ring->rp - ring->base) / ring->el_size; -+ nr_el += ((ring->base + ring->len - ring->wp) / -+ ring->el_size) - 1; -+ } -+ -+ return nr_el; -+} -+ -+static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) -+{ -+ return (addr - ring->iommu_base) + ring->base; -+} -+ -+static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, -+ struct mhi_ring *ring) -+{ -+ ring->wp += ring->el_size; -+ if (ring->wp >= (ring->base + ring->len)) -+ ring->wp = ring->base; -+ /* smp update */ -+ smp_wmb(); -+} -+ -+static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, -+ struct mhi_ring *ring) -+{ -+ ring->rp += ring->el_size; -+ if (ring->rp >= (ring->base + ring->len)) -+ ring->rp = ring->base; -+ /* smp update */ -+ smp_wmb(); -+} -+ -+static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) -+{ -+ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; -+} -+ -+int mhi_destroy_device(struct device *dev, void *data) -+{ -+ struct mhi_chan *ul_chan, *dl_chan; -+ struct mhi_device *mhi_dev; -+ struct mhi_controller *mhi_cntrl; -+ enum mhi_ee_type ee = MHI_EE_MAX; -+ -+ if (dev->bus != &mhi_bus_type) -+ return 0; -+ -+ mhi_dev = to_mhi_device(dev); -+ mhi_cntrl = mhi_dev->mhi_cntrl; -+ -+ /* Only destroy virtual devices thats attached to bus */ -+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) -+ return 0; -+ -+ ul_chan = mhi_dev->ul_chan; -+ dl_chan = mhi_dev->dl_chan; -+ -+ /* -+ * If execution environment is specified, remove only those devices that -+ * started in them based on ee_mask for the channels as we move on to a -+ * different execution environment -+ */ -+ if (data) -+ ee = *(enum mhi_ee_type *)data; -+ -+ /* -+ * For the suspend and resume case, this function will get called -+ * without mhi_unregister_controller(). Hence, we need to drop the -+ * references to mhi_dev created for ul and dl channels. We can -+ * be sure that there will be no instances of mhi_dev left after -+ * this. -+ */ -+ if (ul_chan) { -+ if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) -+ return 0; -+ -+ put_device(&ul_chan->mhi_dev->dev); -+ } -+ -+ if (dl_chan) { -+ if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) -+ return 0; -+ -+ put_device(&dl_chan->mhi_dev->dev); -+ } -+ -+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", -+ mhi_dev->name); -+ -+ /* Notify the client and remove the device from MHI bus */ -+ device_del(dev); -+ put_device(dev); -+ -+ return 0; -+} -+ -+int mhi_get_free_desc_count(struct mhi_device *mhi_dev, -+ enum dma_data_direction dir) -+{ -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? -+ mhi_dev->ul_chan : mhi_dev->dl_chan; -+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring; -+ -+ return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); -+} -+EXPORT_SYMBOL_GPL(mhi_get_free_desc_count); -+ -+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) -+{ -+ struct mhi_driver *mhi_drv; -+ -+ if (!mhi_dev->dev.driver) -+ return; -+ -+ mhi_drv = to_mhi_driver(mhi_dev->dev.driver); -+ -+ if (mhi_drv->status_cb) -+ mhi_drv->status_cb(mhi_dev, cb_reason); -+} -+EXPORT_SYMBOL_GPL(mhi_notify); -+ -+/* Bind MHI channels to MHI devices */ -+void mhi_create_devices(struct mhi_controller *mhi_cntrl) -+{ -+ struct mhi_chan *mhi_chan; -+ struct mhi_device *mhi_dev; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ int i, ret; -+ -+ mhi_chan = mhi_cntrl->mhi_chan; -+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { -+ if (!mhi_chan->configured || mhi_chan->mhi_dev || -+ !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) -+ continue; -+ mhi_dev = mhi_alloc_device(mhi_cntrl); -+ if (IS_ERR(mhi_dev)) -+ return; -+ -+ mhi_dev->dev_type = MHI_DEVICE_XFER; -+ switch (mhi_chan->dir) { -+ case DMA_TO_DEVICE: -+ mhi_dev->ul_chan = mhi_chan; -+ mhi_dev->ul_chan_id = mhi_chan->chan; -+ break; -+ case DMA_FROM_DEVICE: -+ /* We use dl_chan as offload channels */ -+ mhi_dev->dl_chan = mhi_chan; -+ mhi_dev->dl_chan_id = mhi_chan->chan; -+ break; -+ default: -+ dev_err(dev, "Direction not supported\n"); -+ put_device(&mhi_dev->dev); -+ return; -+ } -+ -+ get_device(&mhi_dev->dev); -+ mhi_chan->mhi_dev = mhi_dev; -+ -+ /* Check next channel if it matches */ -+ if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { -+ if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { -+ i++; -+ mhi_chan++; -+ if (mhi_chan->dir == DMA_TO_DEVICE) { -+ mhi_dev->ul_chan = mhi_chan; -+ mhi_dev->ul_chan_id = mhi_chan->chan; -+ } else { -+ mhi_dev->dl_chan = mhi_chan; -+ mhi_dev->dl_chan_id = mhi_chan->chan; -+ } -+ get_device(&mhi_dev->dev); -+ mhi_chan->mhi_dev = mhi_dev; -+ } -+ } -+ -+ /* Channel name is same for both UL and DL */ -+ mhi_dev->name = mhi_chan->name; -+ dev_set_name(&mhi_dev->dev, "%s_%s", -+ dev_name(&mhi_cntrl->mhi_dev->dev), -+ mhi_dev->name); -+ -+ /* Init wakeup source if available */ -+ if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) -+ device_init_wakeup(&mhi_dev->dev, true); -+ -+ ret = device_add(&mhi_dev->dev); -+ if (ret) -+ put_device(&mhi_dev->dev); -+ } -+} -+ -+irqreturn_t mhi_irq_handler(int irq_number, void *dev) -+{ -+ struct mhi_event *mhi_event = dev; -+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; -+ struct mhi_event_ctxt *er_ctxt = -+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; -+ struct mhi_ring *ev_ring = &mhi_event->ring; -+ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); -+ void *dev_rp; -+ -+ if (!is_valid_ring_ptr(ev_ring, ptr)) { -+ dev_err(&mhi_cntrl->mhi_dev->dev, -+ "Event ring rp points outside of the event ring\n"); -+ return IRQ_HANDLED; -+ } -+ -+ dev_rp = mhi_to_virtual(ev_ring, ptr); -+ -+ /* Only proceed if event ring has pending events */ -+ if (ev_ring->rp == dev_rp) -+ return IRQ_HANDLED; -+ -+ /* For client managed event ring, notify pending data */ -+ if (mhi_event->cl_manage) { -+ struct mhi_chan *mhi_chan = mhi_event->mhi_chan; -+ struct mhi_device *mhi_dev = mhi_chan->mhi_dev; -+ -+ if (mhi_dev) -+ mhi_notify(mhi_dev, MHI_CB_PENDING_DATA); -+ } else { -+ tasklet_schedule(&mhi_event->task); -+ } -+ -+ return IRQ_HANDLED; -+} -+ -+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) -+{ -+ struct mhi_controller *mhi_cntrl = priv; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ enum mhi_state state; -+ enum mhi_pm_state pm_state = 0; -+ enum mhi_ee_type ee; -+ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ goto exit_intvec; -+ } -+ -+ state = mhi_get_mhi_state(mhi_cntrl); -+ ee = mhi_get_exec_env(mhi_cntrl); -+ dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n", -+ TO_MHI_EXEC_STR(mhi_cntrl->ee), -+ TO_MHI_STATE_STR(mhi_cntrl->dev_state), -+ TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state)); -+ -+ if (state == MHI_STATE_SYS_ERR) { -+ dev_dbg(dev, "System error detected\n"); -+ pm_state = mhi_tryset_pm_state(mhi_cntrl, -+ MHI_PM_SYS_ERR_DETECT); -+ } -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ -+ if (pm_state != MHI_PM_SYS_ERR_DETECT) -+ goto exit_intvec; -+ -+ switch (ee) { -+ case MHI_EE_RDDM: -+ /* proceed if power down is not already in progress */ -+ if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { -+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); -+ mhi_cntrl->ee = ee; -+ wake_up_all(&mhi_cntrl->state_event); -+ } -+ break; -+ case MHI_EE_PBL: -+ case MHI_EE_EDL: -+ case MHI_EE_PTHRU: -+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); -+ mhi_cntrl->ee = ee; -+ wake_up_all(&mhi_cntrl->state_event); -+ mhi_pm_sys_err_handler(mhi_cntrl); -+ break; -+ default: -+ wake_up_all(&mhi_cntrl->state_event); -+ mhi_pm_sys_err_handler(mhi_cntrl); -+ break; -+ } -+ -+exit_intvec: -+ -+ return IRQ_HANDLED; -+} -+ -+irqreturn_t mhi_intvec_handler(int irq_number, void *dev) -+{ -+ struct mhi_controller *mhi_cntrl = dev; -+ -+ /* Wake up events waiting for state change */ -+ wake_up_all(&mhi_cntrl->state_event); -+ -+ return IRQ_WAKE_THREAD; -+} -+ -+static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, -+ struct mhi_ring *ring) -+{ -+ dma_addr_t ctxt_wp; -+ -+ /* Update the WP */ -+ ring->wp += ring->el_size; -+ ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size; -+ -+ if (ring->wp >= (ring->base + ring->len)) { -+ ring->wp = ring->base; -+ ctxt_wp = ring->iommu_base; -+ } -+ -+ *ring->ctxt_wp = cpu_to_le64(ctxt_wp); -+ -+ /* Update the RP */ -+ ring->rp += ring->el_size; -+ if (ring->rp >= (ring->base + ring->len)) -+ ring->rp = ring->base; -+ -+ /* Update to all cores */ -+ smp_wmb(); -+} -+ -+static int parse_xfer_event(struct mhi_controller *mhi_cntrl, -+ struct mhi_tre *event, -+ struct mhi_chan *mhi_chan) -+{ -+ struct mhi_ring *buf_ring, *tre_ring; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ struct mhi_result result; -+ unsigned long flags = 0; -+ u32 ev_code; -+ -+ ev_code = MHI_TRE_GET_EV_CODE(event); -+ buf_ring = &mhi_chan->buf_ring; -+ tre_ring = &mhi_chan->tre_ring; -+ -+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? -+ -EOVERFLOW : 0; -+ -+ /* -+ * If it's a DB Event then we need to grab the lock -+ * with preemption disabled and as a write because we -+ * have to update db register and there are chances that -+ * another thread could be doing the same. -+ */ -+ if (ev_code >= MHI_EV_CC_OOB) -+ write_lock_irqsave(&mhi_chan->lock, flags); -+ else -+ read_lock_bh(&mhi_chan->lock); -+ -+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) -+ goto end_process_tx_event; -+ -+ switch (ev_code) { -+ case MHI_EV_CC_OVERFLOW: -+ case MHI_EV_CC_EOB: -+ case MHI_EV_CC_EOT: -+ { -+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); -+ struct mhi_tre *local_rp, *ev_tre; -+ void *dev_rp; -+ struct mhi_buf_info *buf_info; -+ u16 xfer_len; -+ -+ if (!is_valid_ring_ptr(tre_ring, ptr)) { -+ dev_err(&mhi_cntrl->mhi_dev->dev, -+ "Event element points outside of the tre ring\n"); -+ break; -+ } -+ /* Get the TRB this event points to */ -+ ev_tre = mhi_to_virtual(tre_ring, ptr); -+ -+ dev_rp = ev_tre + 1; -+ if (dev_rp >= (tre_ring->base + tre_ring->len)) -+ dev_rp = tre_ring->base; -+ -+ result.dir = mhi_chan->dir; -+ -+ local_rp = tre_ring->rp; -+ while (local_rp != dev_rp) { -+ buf_info = buf_ring->rp; -+ /* If it's the last TRE, get length from the event */ -+ if (local_rp == ev_tre) -+ xfer_len = MHI_TRE_GET_EV_LEN(event); -+ else -+ xfer_len = buf_info->len; -+ -+ /* Unmap if it's not pre-mapped by client */ -+ if (likely(!buf_info->pre_mapped)) -+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info); -+ -+ result.buf_addr = buf_info->cb_buf; -+ -+ /* truncate to buf len if xfer_len is larger */ -+ result.bytes_xferd = -+ min_t(u16, xfer_len, buf_info->len); -+ mhi_del_ring_element(mhi_cntrl, buf_ring); -+ mhi_del_ring_element(mhi_cntrl, tre_ring); -+ local_rp = tre_ring->rp; -+ -+ /* notify client */ -+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); -+ -+ if (mhi_chan->dir == DMA_TO_DEVICE) { -+ atomic_dec(&mhi_cntrl->pending_pkts); -+ /* Release the reference got from mhi_queue() */ -+ mhi_cntrl->runtime_put(mhi_cntrl); -+ } -+ -+ /* -+ * Recycle the buffer if buffer is pre-allocated, -+ * if there is an error, not much we can do apart -+ * from dropping the packet -+ */ -+ if (mhi_chan->pre_alloc) { -+ if (mhi_queue_buf(mhi_chan->mhi_dev, -+ mhi_chan->dir, -+ buf_info->cb_buf, -+ buf_info->len, MHI_EOT)) { -+ dev_err(dev, -+ "Error recycling buffer for chan:%d\n", -+ mhi_chan->chan); -+ kfree(buf_info->cb_buf); -+ } -+ } -+ } -+ break; -+ } /* CC_EOT */ -+ case MHI_EV_CC_OOB: -+ case MHI_EV_CC_DB_MODE: -+ { -+ unsigned long pm_lock_flags; -+ -+ mhi_chan->db_cfg.db_mode = 1; -+ read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); -+ if (tre_ring->wp != tre_ring->rp && -+ MHI_DB_ACCESS_VALID(mhi_cntrl)) { -+ mhi_ring_chan_db(mhi_cntrl, mhi_chan); -+ } -+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); -+ break; -+ } -+ case MHI_EV_CC_BAD_TRE: -+ default: -+ dev_err(dev, "Unknown event 0x%x\n", ev_code); -+ break; -+ } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ -+ -+end_process_tx_event: -+ if (ev_code >= MHI_EV_CC_OOB) -+ write_unlock_irqrestore(&mhi_chan->lock, flags); -+ else -+ read_unlock_bh(&mhi_chan->lock); -+ -+ return 0; -+} -+ -+static int parse_rsc_event(struct mhi_controller *mhi_cntrl, -+ struct mhi_tre *event, -+ struct mhi_chan *mhi_chan) -+{ -+ struct mhi_ring *buf_ring, *tre_ring; -+ struct mhi_buf_info *buf_info; -+ struct mhi_result result; -+ int ev_code; -+ u32 cookie; /* offset to local descriptor */ -+ u16 xfer_len; -+ -+ buf_ring = &mhi_chan->buf_ring; -+ tre_ring = &mhi_chan->tre_ring; -+ -+ ev_code = MHI_TRE_GET_EV_CODE(event); -+ cookie = MHI_TRE_GET_EV_COOKIE(event); -+ xfer_len = MHI_TRE_GET_EV_LEN(event); -+ -+ /* Received out of bound cookie */ -+ WARN_ON(cookie >= buf_ring->len); -+ -+ buf_info = buf_ring->base + cookie; -+ -+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? -+ -EOVERFLOW : 0; -+ -+ /* truncate to buf len if xfer_len is larger */ -+ result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); -+ result.buf_addr = buf_info->cb_buf; -+ result.dir = mhi_chan->dir; -+ -+ read_lock_bh(&mhi_chan->lock); -+ -+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) -+ goto end_process_rsc_event; -+ -+ WARN_ON(!buf_info->used); -+ -+ /* notify the client */ -+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); -+ -+ /* -+ * Note: We're arbitrarily incrementing RP even though, completion -+ * packet we processed might not be the same one, reason we can do this -+ * is because device guaranteed to cache descriptors in order it -+ * receive, so even though completion event is different we can re-use -+ * all descriptors in between. -+ * Example: -+ * Transfer Ring has descriptors: A, B, C, D -+ * Last descriptor host queue is D (WP) and first descriptor -+ * host queue is A (RP). -+ * The completion event we just serviced is descriptor C. -+ * Then we can safely queue descriptors to replace A, B, and C -+ * even though host did not receive any completions. -+ */ -+ mhi_del_ring_element(mhi_cntrl, tre_ring); -+ buf_info->used = false; -+ -+end_process_rsc_event: -+ read_unlock_bh(&mhi_chan->lock); -+ -+ return 0; -+} -+ -+static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, -+ struct mhi_tre *tre) -+{ -+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); -+ struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; -+ struct mhi_ring *mhi_ring = &cmd_ring->ring; -+ struct mhi_tre *cmd_pkt; -+ struct mhi_chan *mhi_chan; -+ u32 chan; -+ -+ if (!is_valid_ring_ptr(mhi_ring, ptr)) { -+ dev_err(&mhi_cntrl->mhi_dev->dev, -+ "Event element points outside of the cmd ring\n"); -+ return; -+ } -+ -+ cmd_pkt = mhi_to_virtual(mhi_ring, ptr); -+ -+ chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); -+ -+ if (chan < mhi_cntrl->max_chan && -+ mhi_cntrl->mhi_chan[chan].configured) { -+ mhi_chan = &mhi_cntrl->mhi_chan[chan]; -+ write_lock_bh(&mhi_chan->lock); -+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); -+ complete(&mhi_chan->completion); -+ write_unlock_bh(&mhi_chan->lock); -+ } else { -+ dev_err(&mhi_cntrl->mhi_dev->dev, -+ "Completion packet for invalid channel ID: %d\n", chan); -+ } -+ -+ mhi_del_ring_element(mhi_cntrl, mhi_ring); -+} -+ -+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, -+ struct mhi_event *mhi_event, -+ u32 event_quota) -+{ -+ struct mhi_tre *dev_rp, *local_rp; -+ struct mhi_ring *ev_ring = &mhi_event->ring; -+ struct mhi_event_ctxt *er_ctxt = -+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; -+ struct mhi_chan *mhi_chan; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ u32 chan; -+ int count = 0; -+ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); -+ -+ /* -+ * This is a quick check to avoid unnecessary event processing -+ * in case MHI is already in error state, but it's still possible -+ * to transition to error state while processing events -+ */ -+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) -+ return -EIO; -+ -+ if (!is_valid_ring_ptr(ev_ring, ptr)) { -+ dev_err(&mhi_cntrl->mhi_dev->dev, -+ "Event ring rp points outside of the event ring\n"); -+ return -EIO; -+ } -+ -+ dev_rp = mhi_to_virtual(ev_ring, ptr); -+ local_rp = ev_ring->rp; -+ -+ while (dev_rp != local_rp) { -+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); -+ -+ switch (type) { -+ case MHI_PKT_TYPE_BW_REQ_EVENT: -+ { -+ struct mhi_link_info *link_info; -+ -+ link_info = &mhi_cntrl->mhi_link_info; -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ link_info->target_link_speed = -+ MHI_TRE_GET_EV_LINKSPEED(local_rp); -+ link_info->target_link_width = -+ MHI_TRE_GET_EV_LINKWIDTH(local_rp); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ dev_dbg(dev, "Received BW_REQ event\n"); -+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); -+ break; -+ } -+ case MHI_PKT_TYPE_STATE_CHANGE_EVENT: -+ { -+ enum mhi_state new_state; -+ -+ new_state = MHI_TRE_GET_EV_STATE(local_rp); -+ -+ dev_dbg(dev, "State change event to state: %s\n", -+ TO_MHI_STATE_STR(new_state)); -+ -+ switch (new_state) { -+ case MHI_STATE_M0: -+ mhi_pm_m0_transition(mhi_cntrl); -+ break; -+ case MHI_STATE_M1: -+ mhi_pm_m1_transition(mhi_cntrl); -+ break; -+ case MHI_STATE_M3: -+ mhi_pm_m3_transition(mhi_cntrl); -+ break; -+ case MHI_STATE_SYS_ERR: -+ { -+ enum mhi_pm_state pm_state; -+ -+ dev_dbg(dev, "System error detected\n"); -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ pm_state = mhi_tryset_pm_state(mhi_cntrl, -+ MHI_PM_SYS_ERR_DETECT); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ if (pm_state == MHI_PM_SYS_ERR_DETECT) -+ mhi_pm_sys_err_handler(mhi_cntrl); -+ break; -+ } -+ default: -+ dev_err(dev, "Invalid state: %s\n", -+ TO_MHI_STATE_STR(new_state)); -+ } -+ -+ break; -+ } -+ case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: -+ mhi_process_cmd_completion(mhi_cntrl, local_rp); -+ break; -+ case MHI_PKT_TYPE_EE_EVENT: -+ { -+ enum dev_st_transition st = DEV_ST_TRANSITION_MAX; -+ enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp); -+ -+ dev_dbg(dev, "Received EE event: %s\n", -+ TO_MHI_EXEC_STR(event)); -+ switch (event) { -+ case MHI_EE_SBL: -+ st = DEV_ST_TRANSITION_SBL; -+ break; -+ case MHI_EE_WFW: -+ case MHI_EE_AMSS: -+ st = DEV_ST_TRANSITION_MISSION_MODE; -+ break; -+ case MHI_EE_FP: -+ st = DEV_ST_TRANSITION_FP; -+ break; -+ case MHI_EE_RDDM: -+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ mhi_cntrl->ee = event; -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ wake_up_all(&mhi_cntrl->state_event); -+ break; -+ default: -+ dev_err(dev, -+ "Unhandled EE event: 0x%x\n", type); -+ } -+ if (st != DEV_ST_TRANSITION_MAX) -+ mhi_queue_state_transition(mhi_cntrl, st); -+ -+ break; -+ } -+ case MHI_PKT_TYPE_TX_EVENT: -+ chan = MHI_TRE_GET_EV_CHID(local_rp); -+ -+ WARN_ON(chan >= mhi_cntrl->max_chan); -+ -+ /* -+ * Only process the event ring elements whose channel -+ * ID is within the maximum supported range. -+ */ -+ if (chan < mhi_cntrl->max_chan) { -+ mhi_chan = &mhi_cntrl->mhi_chan[chan]; -+ if (!mhi_chan->configured) -+ break; -+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); -+ event_quota--; -+ } -+ break; -+ default: -+ dev_err(dev, "Unhandled event type: %d\n", type); -+ break; -+ } -+ -+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); -+ local_rp = ev_ring->rp; -+ -+ ptr = le64_to_cpu(er_ctxt->rp); -+ if (!is_valid_ring_ptr(ev_ring, ptr)) { -+ dev_err(&mhi_cntrl->mhi_dev->dev, -+ "Event ring rp points outside of the event ring\n"); -+ return -EIO; -+ } -+ -+ dev_rp = mhi_to_virtual(ev_ring, ptr); -+ count++; -+ } -+ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) -+ mhi_ring_er_db(mhi_event); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ -+ return count; -+} -+ -+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, -+ struct mhi_event *mhi_event, -+ u32 event_quota) -+{ -+ struct mhi_tre *dev_rp, *local_rp; -+ struct mhi_ring *ev_ring = &mhi_event->ring; -+ struct mhi_event_ctxt *er_ctxt = -+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; -+ int count = 0; -+ u32 chan; -+ struct mhi_chan *mhi_chan; -+ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); -+ -+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) -+ return -EIO; -+ -+ if (!is_valid_ring_ptr(ev_ring, ptr)) { -+ dev_err(&mhi_cntrl->mhi_dev->dev, -+ "Event ring rp points outside of the event ring\n"); -+ return -EIO; -+ } -+ -+ dev_rp = mhi_to_virtual(ev_ring, ptr); -+ local_rp = ev_ring->rp; -+ -+ while (dev_rp != local_rp && event_quota > 0) { -+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); -+ -+ chan = MHI_TRE_GET_EV_CHID(local_rp); -+ -+ WARN_ON(chan >= mhi_cntrl->max_chan); -+ -+ /* -+ * Only process the event ring elements whose channel -+ * ID is within the maximum supported range. -+ */ -+ if (chan < mhi_cntrl->max_chan && -+ mhi_cntrl->mhi_chan[chan].configured) { -+ mhi_chan = &mhi_cntrl->mhi_chan[chan]; -+ -+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { -+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); -+ event_quota--; -+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { -+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); -+ event_quota--; -+ } -+ } -+ -+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); -+ local_rp = ev_ring->rp; -+ -+ ptr = le64_to_cpu(er_ctxt->rp); -+ if (!is_valid_ring_ptr(ev_ring, ptr)) { -+ dev_err(&mhi_cntrl->mhi_dev->dev, -+ "Event ring rp points outside of the event ring\n"); -+ return -EIO; -+ } -+ -+ dev_rp = mhi_to_virtual(ev_ring, ptr); -+ count++; -+ } -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) -+ mhi_ring_er_db(mhi_event); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ -+ return count; -+} -+ -+void mhi_ev_task(unsigned long data) -+{ -+ struct mhi_event *mhi_event = (struct mhi_event *)data; -+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; -+ -+ /* process all pending events */ -+ spin_lock_bh(&mhi_event->lock); -+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); -+ spin_unlock_bh(&mhi_event->lock); -+} -+ -+void mhi_ctrl_ev_task(unsigned long data) -+{ -+ struct mhi_event *mhi_event = (struct mhi_event *)data; -+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ enum mhi_state state; -+ enum mhi_pm_state pm_state = 0; -+ int ret; -+ -+ /* -+ * We can check PM state w/o a lock here because there is no way -+ * PM state can change from reg access valid to no access while this -+ * thread being executed. -+ */ -+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -+ /* -+ * We may have a pending event but not allowed to -+ * process it since we are probably in a suspended state, -+ * so trigger a resume. -+ */ -+ mhi_trigger_resume(mhi_cntrl); -+ -+ return; -+ } -+ -+ /* Process ctrl events events */ -+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); -+ -+ /* -+ * We received an IRQ but no events to process, maybe device went to -+ * SYS_ERR state? Check the state to confirm. -+ */ -+ if (!ret) { -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ state = mhi_get_mhi_state(mhi_cntrl); -+ if (state == MHI_STATE_SYS_ERR) { -+ dev_dbg(dev, "System error detected\n"); -+ pm_state = mhi_tryset_pm_state(mhi_cntrl, -+ MHI_PM_SYS_ERR_DETECT); -+ } -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ if (pm_state == MHI_PM_SYS_ERR_DETECT) -+ mhi_pm_sys_err_handler(mhi_cntrl); -+ } -+} -+ -+static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, -+ struct mhi_ring *ring) -+{ -+ void *tmp = ring->wp + ring->el_size; -+ -+ if (tmp >= (ring->base + ring->len)) -+ tmp = ring->base; -+ -+ return (tmp == ring->rp); -+} -+ -+static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, -+ enum dma_data_direction dir, enum mhi_flags mflags) -+{ -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : -+ mhi_dev->dl_chan; -+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring; -+ unsigned long flags; -+ int ret; -+ -+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) -+ return -EIO; -+ -+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags); -+ -+ ret = mhi_is_ring_full(mhi_cntrl, tre_ring); -+ if (unlikely(ret)) { -+ ret = -EAGAIN; -+ goto exit_unlock; -+ } -+ -+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); -+ if (unlikely(ret)) -+ goto exit_unlock; -+ -+ /* Packet is queued, take a usage ref to exit M3 if necessary -+ * for host->device buffer, balanced put is done on buffer completion -+ * for device->host buffer, balanced put is after ringing the DB -+ */ -+ mhi_cntrl->runtime_get(mhi_cntrl); -+ -+ /* Assert dev_wake (to exit/prevent M1/M2)*/ -+ mhi_cntrl->wake_toggle(mhi_cntrl); -+ -+ if (mhi_chan->dir == DMA_TO_DEVICE) -+ atomic_inc(&mhi_cntrl->pending_pkts); -+ -+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) -+ mhi_ring_chan_db(mhi_cntrl, mhi_chan); -+ -+ if (dir == DMA_FROM_DEVICE) -+ mhi_cntrl->runtime_put(mhi_cntrl); -+ -+exit_unlock: -+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); -+ -+ return ret; -+} -+ -+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, -+ struct sk_buff *skb, size_t len, enum mhi_flags mflags) -+{ -+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : -+ mhi_dev->dl_chan; -+ struct mhi_buf_info buf_info = { }; -+ -+ buf_info.v_addr = skb->data; -+ buf_info.cb_buf = skb; -+ buf_info.len = len; -+ -+ if (unlikely(mhi_chan->pre_alloc)) -+ return -EINVAL; -+ -+ return mhi_queue(mhi_dev, &buf_info, dir, mflags); -+} -+EXPORT_SYMBOL_GPL(mhi_queue_skb); -+ -+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, -+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) -+{ -+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : -+ mhi_dev->dl_chan; -+ struct mhi_buf_info buf_info = { }; -+ -+ buf_info.p_addr = mhi_buf->dma_addr; -+ buf_info.cb_buf = mhi_buf; -+ buf_info.pre_mapped = true; -+ buf_info.len = len; -+ -+ if (unlikely(mhi_chan->pre_alloc)) -+ return -EINVAL; -+ -+ return mhi_queue(mhi_dev, &buf_info, dir, mflags); -+} -+EXPORT_SYMBOL_GPL(mhi_queue_dma); -+ -+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, -+ struct mhi_buf_info *info, enum mhi_flags flags) -+{ -+ struct mhi_ring *buf_ring, *tre_ring; -+ struct mhi_tre *mhi_tre; -+ struct mhi_buf_info *buf_info; -+ int eot, eob, chain, bei; -+ int ret; -+ -+ buf_ring = &mhi_chan->buf_ring; -+ tre_ring = &mhi_chan->tre_ring; -+ -+ buf_info = buf_ring->wp; -+ WARN_ON(buf_info->used); -+ buf_info->pre_mapped = info->pre_mapped; -+ if (info->pre_mapped) -+ buf_info->p_addr = info->p_addr; -+ else -+ buf_info->v_addr = info->v_addr; -+ buf_info->cb_buf = info->cb_buf; -+ buf_info->wp = tre_ring->wp; -+ buf_info->dir = mhi_chan->dir; -+ buf_info->len = info->len; -+ -+ if (!info->pre_mapped) { -+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); -+ if (ret) -+ return ret; -+ } -+ -+ eob = !!(flags & MHI_EOB); -+ eot = !!(flags & MHI_EOT); -+ chain = !!(flags & MHI_CHAIN); -+ bei = !!(mhi_chan->intmod); -+ -+ mhi_tre = tre_ring->wp; -+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); -+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); -+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); -+ -+ /* increment WP */ -+ mhi_add_ring_element(mhi_cntrl, tre_ring); -+ mhi_add_ring_element(mhi_cntrl, buf_ring); -+ -+ return 0; -+} -+ -+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, -+ void *buf, size_t len, enum mhi_flags mflags) -+{ -+ struct mhi_buf_info buf_info = { }; -+ -+ buf_info.v_addr = buf; -+ buf_info.cb_buf = buf; -+ buf_info.len = len; -+ -+ return mhi_queue(mhi_dev, &buf_info, dir, mflags); -+} -+EXPORT_SYMBOL_GPL(mhi_queue_buf); -+ -+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir) -+{ -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? -+ mhi_dev->ul_chan : mhi_dev->dl_chan; -+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring; -+ -+ return mhi_is_ring_full(mhi_cntrl, tre_ring); -+} -+EXPORT_SYMBOL_GPL(mhi_queue_is_full); -+ -+int mhi_send_cmd(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan, -+ enum mhi_cmd_type cmd) -+{ -+ struct mhi_tre *cmd_tre = NULL; -+ struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; -+ struct mhi_ring *ring = &mhi_cmd->ring; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ int chan = 0; -+ -+ if (mhi_chan) -+ chan = mhi_chan->chan; -+ -+ spin_lock_bh(&mhi_cmd->lock); -+ if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { -+ spin_unlock_bh(&mhi_cmd->lock); -+ return -ENOMEM; -+ } -+ -+ /* prepare the cmd tre */ -+ cmd_tre = ring->wp; -+ switch (cmd) { -+ case MHI_CMD_RESET_CHAN: -+ cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; -+ cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; -+ cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); -+ break; -+ case MHI_CMD_STOP_CHAN: -+ cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; -+ cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; -+ cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); -+ break; -+ case MHI_CMD_START_CHAN: -+ cmd_tre->ptr = MHI_TRE_CMD_START_PTR; -+ cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; -+ cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); -+ break; -+ default: -+ dev_err(dev, "Command not supported\n"); -+ break; -+ } -+ -+ /* queue to hardware */ -+ mhi_add_ring_element(mhi_cntrl, ring); -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) -+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ spin_unlock_bh(&mhi_cmd->lock); -+ -+ return 0; -+} -+ -+static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan, -+ enum mhi_ch_state_type to_state) -+{ -+ struct device *dev = &mhi_chan->mhi_dev->dev; -+ enum mhi_cmd_type cmd = MHI_CMD_NOP; -+ int ret; -+ -+ dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, -+ TO_CH_STATE_TYPE_STR(to_state)); -+ -+ switch (to_state) { -+ case MHI_CH_STATE_TYPE_RESET: -+ write_lock_irq(&mhi_chan->lock); -+ if (mhi_chan->ch_state != MHI_CH_STATE_STOP && -+ mhi_chan->ch_state != MHI_CH_STATE_ENABLED && -+ mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { -+ write_unlock_irq(&mhi_chan->lock); -+ return -EINVAL; -+ } -+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED; -+ write_unlock_irq(&mhi_chan->lock); -+ -+ cmd = MHI_CMD_RESET_CHAN; -+ break; -+ case MHI_CH_STATE_TYPE_STOP: -+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) -+ return -EINVAL; -+ -+ cmd = MHI_CMD_STOP_CHAN; -+ break; -+ case MHI_CH_STATE_TYPE_START: -+ if (mhi_chan->ch_state != MHI_CH_STATE_STOP && -+ mhi_chan->ch_state != MHI_CH_STATE_DISABLED) -+ return -EINVAL; -+ -+ cmd = MHI_CMD_START_CHAN; -+ break; -+ default: -+ dev_err(dev, "%d: Channel state update to %s not allowed\n", -+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); -+ return -EINVAL; -+ } -+ -+ /* bring host and device out of suspended states */ -+ ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); -+ if (ret) -+ return ret; -+ mhi_cntrl->runtime_get(mhi_cntrl); -+ -+ reinit_completion(&mhi_chan->completion); -+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); -+ if (ret) { -+ dev_err(dev, "%d: Failed to send %s channel command\n", -+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); -+ goto exit_channel_update; -+ } -+ -+ ret = wait_for_completion_timeout(&mhi_chan->completion, -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { -+ dev_err(dev, -+ "%d: Failed to receive %s channel command completion\n", -+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); -+ ret = -EIO; -+ goto exit_channel_update; -+ } -+ -+ ret = 0; -+ -+ if (to_state != MHI_CH_STATE_TYPE_RESET) { -+ write_lock_irq(&mhi_chan->lock); -+ mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? -+ MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP; -+ write_unlock_irq(&mhi_chan->lock); -+ } -+ -+ dev_dbg(dev, "%d: Channel state change to %s successful\n", -+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); -+ -+exit_channel_update: -+ mhi_cntrl->runtime_put(mhi_cntrl); -+ mhi_device_put(mhi_cntrl->mhi_dev); -+ -+ return ret; -+} -+ -+static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan) -+{ -+ int ret; -+ struct device *dev = &mhi_chan->mhi_dev->dev; -+ -+ mutex_lock(&mhi_chan->mutex); -+ -+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { -+ dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n", -+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); -+ goto exit_unprepare_channel; -+ } -+ -+ /* no more processing events for this channel */ -+ ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, -+ MHI_CH_STATE_TYPE_RESET); -+ if (ret) -+ dev_err(dev, "%d: Failed to reset channel, still resetting\n", -+ mhi_chan->chan); -+ -+exit_unprepare_channel: -+ write_lock_irq(&mhi_chan->lock); -+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED; -+ write_unlock_irq(&mhi_chan->lock); -+ -+ if (!mhi_chan->offload_ch) { -+ mhi_reset_chan(mhi_cntrl, mhi_chan); -+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); -+ } -+ dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); -+ -+ mutex_unlock(&mhi_chan->mutex); -+} -+ -+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan) -+{ -+ int ret = 0; -+ struct device *dev = &mhi_chan->mhi_dev->dev; -+ -+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { -+ dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n", -+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); -+ return -ENOTCONN; -+ } -+ -+ mutex_lock(&mhi_chan->mutex); -+ -+ /* Check of client manages channel context for offload channels */ -+ if (!mhi_chan->offload_ch) { -+ ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); -+ if (ret) -+ goto error_init_chan; -+ } -+ -+ ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, -+ MHI_CH_STATE_TYPE_START); -+ if (ret) -+ goto error_pm_state; -+ -+ /* Pre-allocate buffer for xfer ring */ -+ if (mhi_chan->pre_alloc) { -+ int nr_el = get_nr_avail_ring_elements(mhi_cntrl, -+ &mhi_chan->tre_ring); -+ size_t len = mhi_cntrl->buffer_len; -+ -+ while (nr_el--) { -+ void *buf; -+ struct mhi_buf_info info = { }; -+ buf = kmalloc(len, GFP_KERNEL); -+ if (!buf) { -+ ret = -ENOMEM; -+ goto error_pre_alloc; -+ } -+ -+ /* Prepare transfer descriptors */ -+ info.v_addr = buf; -+ info.cb_buf = buf; -+ info.len = len; -+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); -+ if (ret) { -+ kfree(buf); -+ goto error_pre_alloc; -+ } -+ } -+ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { -+ read_lock_irq(&mhi_chan->lock); -+ mhi_ring_chan_db(mhi_cntrl, mhi_chan); -+ read_unlock_irq(&mhi_chan->lock); -+ } -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ } -+ -+ mutex_unlock(&mhi_chan->mutex); -+ -+ return 0; -+ -+error_pm_state: -+ if (!mhi_chan->offload_ch) -+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); -+ -+error_init_chan: -+ mutex_unlock(&mhi_chan->mutex); -+ -+ return ret; -+ -+error_pre_alloc: -+ mutex_unlock(&mhi_chan->mutex); -+ mhi_unprepare_channel(mhi_cntrl, mhi_chan); -+ -+ return ret; -+} -+ -+static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, -+ struct mhi_event *mhi_event, -+ struct mhi_event_ctxt *er_ctxt, -+ int chan) -+ -+{ -+ struct mhi_tre *dev_rp, *local_rp; -+ struct mhi_ring *ev_ring; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ unsigned long flags; -+ dma_addr_t ptr; -+ -+ dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan); -+ -+ ev_ring = &mhi_event->ring; -+ -+ /* mark all stale events related to channel as STALE event */ -+ spin_lock_irqsave(&mhi_event->lock, flags); -+ -+ ptr = le64_to_cpu(er_ctxt->rp); -+ if (!is_valid_ring_ptr(ev_ring, ptr)) { -+ dev_err(&mhi_cntrl->mhi_dev->dev, -+ "Event ring rp points outside of the event ring\n"); -+ dev_rp = ev_ring->rp; -+ } else { -+ dev_rp = mhi_to_virtual(ev_ring, ptr); -+ } -+ -+ local_rp = ev_ring->rp; -+ while (dev_rp != local_rp) { -+ if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT && -+ chan == MHI_TRE_GET_EV_CHID(local_rp)) -+ local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, -+ MHI_PKT_TYPE_STALE_EVENT); -+ local_rp++; -+ if (local_rp == (ev_ring->base + ev_ring->len)) -+ local_rp = ev_ring->base; -+ } -+ -+ dev_dbg(dev, "Finished marking events as stale events\n"); -+ spin_unlock_irqrestore(&mhi_event->lock, flags); -+} -+ -+static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, -+ struct mhi_chan *mhi_chan) -+{ -+ struct mhi_ring *buf_ring, *tre_ring; -+ struct mhi_result result; -+ -+ /* Reset any pending buffers */ -+ buf_ring = &mhi_chan->buf_ring; -+ tre_ring = &mhi_chan->tre_ring; -+ result.transaction_status = -ENOTCONN; -+ result.bytes_xferd = 0; -+ while (tre_ring->rp != tre_ring->wp) { -+ struct mhi_buf_info *buf_info = buf_ring->rp; -+ -+ if (mhi_chan->dir == DMA_TO_DEVICE) { -+ atomic_dec(&mhi_cntrl->pending_pkts); -+ /* Release the reference got from mhi_queue() */ -+ mhi_cntrl->runtime_put(mhi_cntrl); -+ } -+ -+ if (!buf_info->pre_mapped) -+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info); -+ -+ mhi_del_ring_element(mhi_cntrl, buf_ring); -+ mhi_del_ring_element(mhi_cntrl, tre_ring); -+ -+ if (mhi_chan->pre_alloc) { -+ kfree(buf_info->cb_buf); -+ } else { -+ result.buf_addr = buf_info->cb_buf; -+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); -+ } -+ } -+} -+ -+void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) -+{ -+ struct mhi_event *mhi_event; -+ struct mhi_event_ctxt *er_ctxt; -+ int chan = mhi_chan->chan; -+ -+ /* Nothing to reset, client doesn't queue buffers */ -+ if (mhi_chan->offload_ch) -+ return; -+ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; -+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; -+ -+ mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); -+ -+ mhi_reset_data_chan(mhi_cntrl, mhi_chan); -+ -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+} -+ -+/* Move channel to start state */ -+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) -+{ -+ int ret, dir; -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ struct mhi_chan *mhi_chan; -+ -+ for (dir = 0; dir < 2; dir++) { -+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; -+ if (!mhi_chan) -+ continue; -+ -+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); -+ if (ret) -+ goto error_open_chan; -+ } -+ -+ return 0; -+ -+error_open_chan: -+ for (--dir; dir >= 0; dir--) { -+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; -+ if (!mhi_chan) -+ continue; -+ -+ mhi_unprepare_channel(mhi_cntrl, mhi_chan); -+ } -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); -+ -+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) -+{ -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ struct mhi_chan *mhi_chan; -+ int dir; -+ -+ for (dir = 0; dir < 2; dir++) { -+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; -+ if (!mhi_chan) -+ continue; -+ -+ mhi_unprepare_channel(mhi_cntrl, mhi_chan); -+ } -+} -+EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); -+ -+int mhi_poll(struct mhi_device *mhi_dev, u32 budget) -+{ -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ struct mhi_chan *mhi_chan = mhi_dev->dl_chan; -+ struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; -+ int ret; -+ -+ spin_lock_bh(&mhi_event->lock); -+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); -+ spin_unlock_bh(&mhi_event->lock); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mhi_poll); -diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c -new file mode 100644 -index 0000000000000..b780990faf806 ---- /dev/null -+++ b/drivers/bus/mhi/host/pci_generic.c -@@ -0,0 +1,1146 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+/* -+ * MHI PCI driver - MHI over PCI controller driver -+ * -+ * This module is a generic driver for registering MHI-over-PCI devices, -+ * such as PCIe QCOM modems. -+ * -+ * Copyright (C) 2020 Linaro Ltd -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MHI_PCI_DEFAULT_BAR_NUM 0 -+ -+#define MHI_POST_RESET_DELAY_MS 2000 -+ -+#define HEALTH_CHECK_PERIOD (HZ * 2) -+ -+/** -+ * struct mhi_pci_dev_info - MHI PCI device specific information -+ * @config: MHI controller configuration -+ * @name: name of the PCI module -+ * @fw: firmware path (if any) -+ * @edl: emergency download mode firmware path (if any) -+ * @bar_num: PCI base address register to use for MHI MMIO register space -+ * @dma_data_width: DMA transfer word size (32 or 64 bits) -+ * @mru_default: default MRU size for MBIM network packets -+ * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead -+ * of inband wake support (such as sdx24) -+ */ -+struct mhi_pci_dev_info { -+ const struct mhi_controller_config *config; -+ const char *name; -+ const char *fw; -+ const char *edl; -+ unsigned int bar_num; -+ unsigned int dma_data_width; -+ unsigned int mru_default; -+ bool sideband_wake; -+}; -+ -+#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ -+ { \ -+ .num = ch_num, \ -+ .name = ch_name, \ -+ .num_elements = el_count, \ -+ .event_ring = ev_ring, \ -+ .dir = DMA_TO_DEVICE, \ -+ .ee_mask = BIT(MHI_EE_AMSS), \ -+ .pollcfg = 0, \ -+ .doorbell = MHI_DB_BRST_DISABLE, \ -+ .lpm_notify = false, \ -+ .offload_channel = false, \ -+ .doorbell_mode_switch = false, \ -+ } \ -+ -+#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \ -+ { \ -+ .num = ch_num, \ -+ .name = ch_name, \ -+ .num_elements = el_count, \ -+ .event_ring = ev_ring, \ -+ .dir = DMA_FROM_DEVICE, \ -+ .ee_mask = BIT(MHI_EE_AMSS), \ -+ .pollcfg = 0, \ -+ .doorbell = MHI_DB_BRST_DISABLE, \ -+ .lpm_notify = false, \ -+ .offload_channel = false, \ -+ .doorbell_mode_switch = false, \ -+ } -+ -+#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \ -+ { \ -+ .num = ch_num, \ -+ .name = ch_name, \ -+ .num_elements = el_count, \ -+ .event_ring = ev_ring, \ -+ .dir = DMA_FROM_DEVICE, \ -+ .ee_mask = BIT(MHI_EE_AMSS), \ -+ .pollcfg = 0, \ -+ .doorbell = MHI_DB_BRST_DISABLE, \ -+ .lpm_notify = false, \ -+ .offload_channel = false, \ -+ .doorbell_mode_switch = false, \ -+ .auto_queue = true, \ -+ } -+ -+#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \ -+ { \ -+ .num_elements = el_count, \ -+ .irq_moderation_ms = 0, \ -+ .irq = (ev_ring) + 1, \ -+ .priority = 1, \ -+ .mode = MHI_DB_BRST_DISABLE, \ -+ .data_type = MHI_ER_CTRL, \ -+ .hardware_event = false, \ -+ .client_managed = false, \ -+ .offload_channel = false, \ -+ } -+ -+#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \ -+ { \ -+ .num = ch_num, \ -+ .name = ch_name, \ -+ .num_elements = el_count, \ -+ .event_ring = ev_ring, \ -+ .dir = DMA_TO_DEVICE, \ -+ .ee_mask = BIT(MHI_EE_AMSS), \ -+ .pollcfg = 0, \ -+ .doorbell = MHI_DB_BRST_ENABLE, \ -+ .lpm_notify = false, \ -+ .offload_channel = false, \ -+ .doorbell_mode_switch = true, \ -+ } \ -+ -+#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \ -+ { \ -+ .num = ch_num, \ -+ .name = ch_name, \ -+ .num_elements = el_count, \ -+ .event_ring = ev_ring, \ -+ .dir = DMA_FROM_DEVICE, \ -+ .ee_mask = BIT(MHI_EE_AMSS), \ -+ .pollcfg = 0, \ -+ .doorbell = MHI_DB_BRST_ENABLE, \ -+ .lpm_notify = false, \ -+ .offload_channel = false, \ -+ .doorbell_mode_switch = true, \ -+ } -+ -+#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \ -+ { \ -+ .num = ch_num, \ -+ .name = ch_name, \ -+ .num_elements = el_count, \ -+ .event_ring = ev_ring, \ -+ .dir = DMA_TO_DEVICE, \ -+ .ee_mask = BIT(MHI_EE_SBL), \ -+ .pollcfg = 0, \ -+ .doorbell = MHI_DB_BRST_DISABLE, \ -+ .lpm_notify = false, \ -+ .offload_channel = false, \ -+ .doorbell_mode_switch = false, \ -+ } \ -+ -+#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \ -+ { \ -+ .num = ch_num, \ -+ .name = ch_name, \ -+ .num_elements = el_count, \ -+ .event_ring = ev_ring, \ -+ .dir = DMA_FROM_DEVICE, \ -+ .ee_mask = BIT(MHI_EE_SBL), \ -+ .pollcfg = 0, \ -+ .doorbell = MHI_DB_BRST_DISABLE, \ -+ .lpm_notify = false, \ -+ .offload_channel = false, \ -+ .doorbell_mode_switch = false, \ -+ } -+ -+#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \ -+ { \ -+ .num = ch_num, \ -+ .name = ch_name, \ -+ .num_elements = el_count, \ -+ .event_ring = ev_ring, \ -+ .dir = DMA_TO_DEVICE, \ -+ .ee_mask = BIT(MHI_EE_FP), \ -+ .pollcfg = 0, \ -+ .doorbell = MHI_DB_BRST_DISABLE, \ -+ .lpm_notify = false, \ -+ .offload_channel = false, \ -+ .doorbell_mode_switch = false, \ -+ } \ -+ -+#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \ -+ { \ -+ .num = ch_num, \ -+ .name = ch_name, \ -+ .num_elements = el_count, \ -+ .event_ring = ev_ring, \ -+ .dir = DMA_FROM_DEVICE, \ -+ .ee_mask = BIT(MHI_EE_FP), \ -+ .pollcfg = 0, \ -+ .doorbell = MHI_DB_BRST_DISABLE, \ -+ .lpm_notify = false, \ -+ .offload_channel = false, \ -+ .doorbell_mode_switch = false, \ -+ } -+ -+#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \ -+ { \ -+ .num_elements = el_count, \ -+ .irq_moderation_ms = 5, \ -+ .irq = (ev_ring) + 1, \ -+ .priority = 1, \ -+ .mode = MHI_DB_BRST_DISABLE, \ -+ .data_type = MHI_ER_DATA, \ -+ .hardware_event = false, \ -+ .client_managed = false, \ -+ .offload_channel = false, \ -+ } -+ -+#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \ -+ { \ -+ .num_elements = el_count, \ -+ .irq_moderation_ms = 1, \ -+ .irq = (ev_ring) + 1, \ -+ .priority = 1, \ -+ .mode = MHI_DB_BRST_DISABLE, \ -+ .data_type = MHI_ER_DATA, \ -+ .hardware_event = true, \ -+ .client_managed = false, \ -+ .offload_channel = false, \ -+ .channel = ch_num, \ -+ } -+ -+static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { -+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1), -+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1), -+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0), -+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0), -+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), -+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), -+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), -+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), -+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), -+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), -+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), -+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3), -+}; -+ -+static struct mhi_event_config modem_qcom_v1_mhi_events[] = { -+ /* first ring is control+data ring */ -+ MHI_EVENT_CONFIG_CTRL(0, 64), -+ /* DIAG dedicated event ring */ -+ MHI_EVENT_CONFIG_DATA(1, 128), -+ /* Hardware channels request dedicated hardware event rings */ -+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), -+ MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) -+}; -+ -+static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { -+ .max_channels = 128, -+ .timeout_ms = 8000, -+ .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), -+ .ch_cfg = modem_qcom_v1_mhi_channels, -+ .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), -+ .event_cfg = modem_qcom_v1_mhi_events, -+}; -+ -+static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { -+ .name = "qcom-sdx65m", -+ .fw = "qcom/sdx65m/xbl.elf", -+ .edl = "qcom/sdx65m/edl.mbn", -+ .config = &modem_qcom_v1_mhiv_config, -+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -+ .dma_data_width = 32, -+ .sideband_wake = false, -+}; -+ -+static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { -+ .name = "qcom-sdx55m", -+ .fw = "qcom/sdx55m/sbl1.mbn", -+ .edl = "qcom/sdx55m/edl.mbn", -+ .config = &modem_qcom_v1_mhiv_config, -+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -+ .dma_data_width = 32, -+ .mru_default = 32768, -+ .sideband_wake = false, -+}; -+ -+static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { -+ .name = "qcom-sdx24", -+ .edl = "qcom/prog_firehose_sdx24.mbn", -+ .config = &modem_qcom_v1_mhiv_config, -+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -+ .dma_data_width = 32, -+ .sideband_wake = true, -+}; -+ -+static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = { -+ MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0), -+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), -+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), -+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), -+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), -+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), -+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), -+ /* The EDL firmware is a flash-programmer exposing firehose protocol */ -+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), -+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), -+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), -+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), -+}; -+ -+static struct mhi_event_config mhi_quectel_em1xx_events[] = { -+ MHI_EVENT_CONFIG_CTRL(0, 128), -+ MHI_EVENT_CONFIG_DATA(1, 128), -+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), -+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) -+}; -+ -+static const struct mhi_controller_config modem_quectel_em1xx_config = { -+ .max_channels = 128, -+ .timeout_ms = 20000, -+ .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels), -+ .ch_cfg = mhi_quectel_em1xx_channels, -+ .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events), -+ .event_cfg = mhi_quectel_em1xx_events, -+}; -+ -+static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { -+ .name = "quectel-em1xx", -+ .edl = "qcom/prog_firehose_sdx24.mbn", -+ .config = &modem_quectel_em1xx_config, -+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -+ .dma_data_width = 32, -+ .mru_default = 32768, -+ .sideband_wake = true, -+}; -+ -+static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { -+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0), -+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), -+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), -+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), -+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), -+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), -+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), -+}; -+ -+static struct mhi_event_config mhi_foxconn_sdx55_events[] = { -+ MHI_EVENT_CONFIG_CTRL(0, 128), -+ MHI_EVENT_CONFIG_DATA(1, 128), -+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), -+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) -+}; -+ -+static const struct mhi_controller_config modem_foxconn_sdx55_config = { -+ .max_channels = 128, -+ .timeout_ms = 20000, -+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels), -+ .ch_cfg = mhi_foxconn_sdx55_channels, -+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events), -+ .event_cfg = mhi_foxconn_sdx55_events, -+}; -+ -+static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { -+ .name = "foxconn-sdx55", -+ .fw = "qcom/sdx55m/sbl1.mbn", -+ .edl = "qcom/sdx55m/edl.mbn", -+ .config = &modem_foxconn_sdx55_config, -+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -+ .dma_data_width = 32, -+ .mru_default = 32768, -+ .sideband_wake = false, -+}; -+ -+static const struct mhi_channel_config mhi_mv31_channels[] = { -+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), -+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), -+ /* MBIM Control Channel */ -+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0), -+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0), -+ /* MBIM Data Channel */ -+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2), -+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), -+}; -+ -+static struct mhi_event_config mhi_mv31_events[] = { -+ MHI_EVENT_CONFIG_CTRL(0, 256), -+ MHI_EVENT_CONFIG_DATA(1, 256), -+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), -+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), -+}; -+ -+static const struct mhi_controller_config modem_mv31_config = { -+ .max_channels = 128, -+ .timeout_ms = 20000, -+ .num_channels = ARRAY_SIZE(mhi_mv31_channels), -+ .ch_cfg = mhi_mv31_channels, -+ .num_events = ARRAY_SIZE(mhi_mv31_events), -+ .event_cfg = mhi_mv31_events, -+}; -+ -+static const struct mhi_pci_dev_info mhi_mv31_info = { -+ .name = "cinterion-mv31", -+ .config = &modem_mv31_config, -+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -+ .dma_data_width = 32, -+ .mru_default = 32768, -+}; -+ -+static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = { -+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), -+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0), -+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0), -+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1), -+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2), -+}; -+ -+static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = { -+ MHI_EVENT_CONFIG_CTRL(0, 128), -+ MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100), -+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) -+}; -+ -+static struct mhi_controller_config modem_telit_fn980_hw_v1_config = { -+ .max_channels = 128, -+ .timeout_ms = 20000, -+ .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels), -+ .ch_cfg = mhi_telit_fn980_hw_v1_channels, -+ .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events), -+ .event_cfg = mhi_telit_fn980_hw_v1_events, -+}; -+ -+static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = { -+ .name = "telit-fn980-hwv1", -+ .fw = "qcom/sdx55m/sbl1.mbn", -+ .edl = "qcom/sdx55m/edl.mbn", -+ .config = &modem_telit_fn980_hw_v1_config, -+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -+ .dma_data_width = 32, -+ .mru_default = 32768, -+ .sideband_wake = false, -+}; -+ -+static const struct mhi_channel_config mhi_telit_fn990_channels[] = { -+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), -+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), -+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1), -+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1), -+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), -+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), -+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), -+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), -+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), -+}; -+ -+static struct mhi_event_config mhi_telit_fn990_events[] = { -+ MHI_EVENT_CONFIG_CTRL(0, 128), -+ MHI_EVENT_CONFIG_DATA(1, 128), -+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), -+ MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) -+}; -+ -+static const struct mhi_controller_config modem_telit_fn990_config = { -+ .max_channels = 128, -+ .timeout_ms = 20000, -+ .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels), -+ .ch_cfg = mhi_telit_fn990_channels, -+ .num_events = ARRAY_SIZE(mhi_telit_fn990_events), -+ .event_cfg = mhi_telit_fn990_events, -+}; -+ -+static const struct mhi_pci_dev_info mhi_telit_fn990_info = { -+ .name = "telit-fn990", -+ .config = &modem_telit_fn990_config, -+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -+ .dma_data_width = 32, -+ .sideband_wake = false, -+ .mru_default = 32768, -+}; -+ -+static const struct pci_device_id mhi_pci_id_table[] = { -+ /* Telit FN980 hardware revision v1 */ -+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), -+ .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, -+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), -+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, -+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), -+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, -+ /* Telit FN990 */ -+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), -+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, -+ { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ -+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, -+ { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ -+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, -+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), -+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, -+ /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ -+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), -+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, -+ /* DW5930e (sdx55), With eSIM, It's also T99W175 */ -+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0), -+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, -+ /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */ -+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1), -+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, -+ /* MV31-W (Cinterion) */ -+ { PCI_DEVICE(0x1269, 0x00b3), -+ .driver_data = (kernel_ulong_t) &mhi_mv31_info }, -+ { } -+}; -+MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); -+ -+enum mhi_pci_device_status { -+ MHI_PCI_DEV_STARTED, -+ MHI_PCI_DEV_SUSPENDED, -+}; -+ -+struct mhi_pci_device { -+ struct mhi_controller mhi_cntrl; -+ struct pci_saved_state *pci_state; -+ struct work_struct recovery_work; -+ struct timer_list health_check_timer; -+ unsigned long status; -+}; -+ -+static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, -+ void __iomem *addr, u32 *out) -+{ -+ *out = readl(addr); -+ return 0; -+} -+ -+static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl, -+ void __iomem *addr, u32 val) -+{ -+ writel(val, addr); -+} -+ -+static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl, -+ enum mhi_callback cb) -+{ -+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -+ -+ /* Nothing to do for now */ -+ switch (cb) { -+ case MHI_CB_FATAL_ERROR: -+ case MHI_CB_SYS_ERROR: -+ dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb); -+ pm_runtime_forbid(&pdev->dev); -+ break; -+ case MHI_CB_EE_MISSION_MODE: -+ pm_runtime_allow(&pdev->dev); -+ break; -+ default: -+ break; -+ } -+} -+ -+static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force) -+{ -+ /* no-op */ -+} -+ -+static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override) -+{ -+ /* no-op */ -+} -+ -+static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl) -+{ -+ /* no-op */ -+} -+ -+static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl) -+{ -+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -+ u16 vendor = 0; -+ -+ if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) -+ return false; -+ -+ if (vendor == (u16) ~0 || vendor == 0) -+ return false; -+ -+ return true; -+} -+ -+static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, -+ unsigned int bar_num, u64 dma_mask) -+{ -+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -+ int err; -+ -+ err = pci_assign_resource(pdev, bar_num); -+ if (err) -+ return err; -+ -+ err = pcim_enable_device(pdev); -+ if (err) { -+ dev_err(&pdev->dev, "failed to enable pci device: %d\n", err); -+ return err; -+ } -+ -+ err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev)); -+ if (err) { -+ dev_err(&pdev->dev, "failed to map pci region: %d\n", err); -+ return err; -+ } -+ mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; -+ mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); -+ -+ err = pci_set_dma_mask(pdev, dma_mask); -+ if (err) { -+ dev_err(&pdev->dev, "Cannot set proper DMA mask\n"); -+ return err; -+ } -+ -+ err = pci_set_consistent_dma_mask(pdev, dma_mask); -+ if (err) { -+ dev_err(&pdev->dev, "set consistent dma mask failed\n"); -+ return err; -+ } -+ -+ pci_set_master(pdev); -+ -+ return 0; -+} -+ -+static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, -+ const struct mhi_controller_config *mhi_cntrl_config) -+{ -+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -+ int nr_vectors, i; -+ int *irq; -+ -+ /* -+ * Alloc one MSI vector for BHI + one vector per event ring, ideally... -+ * No explicit pci_free_irq_vectors required, done by pcim_release. -+ */ -+ mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events; -+ -+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI); -+ if (nr_vectors < 0) { -+ dev_err(&pdev->dev, "Error allocating MSI vectors %d\n", -+ nr_vectors); -+ return nr_vectors; -+ } -+ -+ if (nr_vectors < mhi_cntrl->nr_irqs) { -+ dev_warn(&pdev->dev, "using shared MSI\n"); -+ -+ /* Patch msi vectors, use only one (shared) */ -+ for (i = 0; i < mhi_cntrl_config->num_events; i++) -+ mhi_cntrl_config->event_cfg[i].irq = 0; -+ mhi_cntrl->nr_irqs = 1; -+ } -+ -+ irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL); -+ if (!irq) -+ return -ENOMEM; -+ -+ for (i = 0; i < mhi_cntrl->nr_irqs; i++) { -+ int vector = i >= nr_vectors ? (nr_vectors - 1) : i; -+ -+ irq[i] = pci_irq_vector(pdev, vector); -+ } -+ -+ mhi_cntrl->irq = irq; -+ -+ return 0; -+} -+ -+static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl) -+{ -+ /* The runtime_get() MHI callback means: -+ * Do whatever is requested to leave M3. -+ */ -+ return pm_runtime_get(mhi_cntrl->cntrl_dev); -+} -+ -+static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl) -+{ -+ /* The runtime_put() MHI callback means: -+ * Device can be moved in M3 state. -+ */ -+ pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); -+ pm_runtime_put(mhi_cntrl->cntrl_dev); -+} -+ -+static void mhi_pci_recovery_work(struct work_struct *work) -+{ -+ struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device, -+ recovery_work); -+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -+ int err; -+ -+ dev_warn(&pdev->dev, "device recovery started\n"); -+ -+ del_timer(&mhi_pdev->health_check_timer); -+ pm_runtime_forbid(&pdev->dev); -+ -+ /* Clean up MHI state */ -+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -+ mhi_power_down(mhi_cntrl, false); -+ mhi_unprepare_after_power_down(mhi_cntrl); -+ } -+ -+ pci_set_power_state(pdev, PCI_D0); -+ pci_load_saved_state(pdev, mhi_pdev->pci_state); -+ pci_restore_state(pdev); -+ -+ if (!mhi_pci_is_alive(mhi_cntrl)) -+ goto err_try_reset; -+ -+ err = mhi_prepare_for_power_up(mhi_cntrl); -+ if (err) -+ goto err_try_reset; -+ -+ err = mhi_sync_power_up(mhi_cntrl); -+ if (err) -+ goto err_unprepare; -+ -+ dev_dbg(&pdev->dev, "Recovery completed\n"); -+ -+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); -+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -+ return; -+ -+err_unprepare: -+ mhi_unprepare_after_power_down(mhi_cntrl); -+err_try_reset: -+ if (pci_reset_function(pdev)) -+ dev_err(&pdev->dev, "Recovery failed\n"); -+} -+ -+static void health_check(struct timer_list *t) -+{ -+ struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer); -+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ -+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || -+ test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) -+ return; -+ -+ if (!mhi_pci_is_alive(mhi_cntrl)) { -+ dev_err(mhi_cntrl->cntrl_dev, "Device died\n"); -+ queue_work(system_long_wq, &mhi_pdev->recovery_work); -+ return; -+ } -+ -+ /* reschedule in two seconds */ -+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -+} -+ -+static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) -+{ -+ const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; -+ const struct mhi_controller_config *mhi_cntrl_config; -+ struct mhi_pci_device *mhi_pdev; -+ struct mhi_controller *mhi_cntrl; -+ int err; -+ -+ dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name); -+ -+ /* mhi_pdev.mhi_cntrl must be zero-initialized */ -+ mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL); -+ if (!mhi_pdev) -+ return -ENOMEM; -+ -+ INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work); -+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0); -+ -+ mhi_cntrl_config = info->config; -+ mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ -+ mhi_cntrl->cntrl_dev = &pdev->dev; -+ mhi_cntrl->iova_start = 0; -+ mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width); -+ mhi_cntrl->fw_image = info->fw; -+ mhi_cntrl->edl_image = info->edl; -+ -+ mhi_cntrl->read_reg = mhi_pci_read_reg; -+ mhi_cntrl->write_reg = mhi_pci_write_reg; -+ mhi_cntrl->status_cb = mhi_pci_status_cb; -+ mhi_cntrl->runtime_get = mhi_pci_runtime_get; -+ mhi_cntrl->runtime_put = mhi_pci_runtime_put; -+ mhi_cntrl->mru = info->mru_default; -+ -+ if (info->sideband_wake) { -+ mhi_cntrl->wake_get = mhi_pci_wake_get_nop; -+ mhi_cntrl->wake_put = mhi_pci_wake_put_nop; -+ mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; -+ } -+ -+ err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); -+ if (err) -+ return err; -+ -+ err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config); -+ if (err) -+ return err; -+ -+ pci_set_drvdata(pdev, mhi_pdev); -+ -+ /* Have stored pci confspace at hand for restore in sudden PCI error. -+ * cache the state locally and discard the PCI core one. -+ */ -+ pci_save_state(pdev); -+ mhi_pdev->pci_state = pci_store_saved_state(pdev); -+ pci_load_saved_state(pdev, NULL); -+ -+ pci_enable_pcie_error_reporting(pdev); -+ -+ err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config); -+ if (err) -+ goto err_disable_reporting; -+ -+ /* MHI bus does not power up the controller by default */ -+ err = mhi_prepare_for_power_up(mhi_cntrl); -+ if (err) { -+ dev_err(&pdev->dev, "failed to prepare MHI controller\n"); -+ goto err_unregister; -+ } -+ -+ err = mhi_sync_power_up(mhi_cntrl); -+ if (err) { -+ dev_err(&pdev->dev, "failed to power up MHI controller\n"); -+ goto err_unprepare; -+ } -+ -+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); -+ -+ /* start health check */ -+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -+ -+ /* Only allow runtime-suspend if PME capable (for wakeup) */ -+ if (pci_pme_capable(pdev, PCI_D3hot)) { -+ pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); -+ pm_runtime_use_autosuspend(&pdev->dev); -+ pm_runtime_mark_last_busy(&pdev->dev); -+ pm_runtime_put_noidle(&pdev->dev); -+ } -+ -+ return 0; -+ -+err_unprepare: -+ mhi_unprepare_after_power_down(mhi_cntrl); -+err_unregister: -+ mhi_unregister_controller(mhi_cntrl); -+err_disable_reporting: -+ pci_disable_pcie_error_reporting(pdev); -+ -+ return err; -+} -+ -+static void mhi_pci_remove(struct pci_dev *pdev) -+{ -+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ -+ del_timer_sync(&mhi_pdev->health_check_timer); -+ cancel_work_sync(&mhi_pdev->recovery_work); -+ -+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -+ mhi_power_down(mhi_cntrl, true); -+ mhi_unprepare_after_power_down(mhi_cntrl); -+ } -+ -+ /* balancing probe put_noidle */ -+ if (pci_pme_capable(pdev, PCI_D3hot)) -+ pm_runtime_get_noresume(&pdev->dev); -+ -+ mhi_unregister_controller(mhi_cntrl); -+ pci_disable_pcie_error_reporting(pdev); -+} -+ -+static void mhi_pci_shutdown(struct pci_dev *pdev) -+{ -+ mhi_pci_remove(pdev); -+ pci_set_power_state(pdev, PCI_D3hot); -+} -+ -+static void mhi_pci_reset_prepare(struct pci_dev *pdev) -+{ -+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ -+ dev_info(&pdev->dev, "reset\n"); -+ -+ del_timer(&mhi_pdev->health_check_timer); -+ -+ /* Clean up MHI state */ -+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -+ mhi_power_down(mhi_cntrl, false); -+ mhi_unprepare_after_power_down(mhi_cntrl); -+ } -+ -+ /* cause internal device reset */ -+ mhi_soc_reset(mhi_cntrl); -+ -+ /* Be sure device reset has been executed */ -+ msleep(MHI_POST_RESET_DELAY_MS); -+} -+ -+static void mhi_pci_reset_done(struct pci_dev *pdev) -+{ -+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ int err; -+ -+ /* Restore initial known working PCI state */ -+ pci_load_saved_state(pdev, mhi_pdev->pci_state); -+ pci_restore_state(pdev); -+ -+ /* Is device status available ? */ -+ if (!mhi_pci_is_alive(mhi_cntrl)) { -+ dev_err(&pdev->dev, "reset failed\n"); -+ return; -+ } -+ -+ err = mhi_prepare_for_power_up(mhi_cntrl); -+ if (err) { -+ dev_err(&pdev->dev, "failed to prepare MHI controller\n"); -+ return; -+ } -+ -+ err = mhi_sync_power_up(mhi_cntrl); -+ if (err) { -+ dev_err(&pdev->dev, "failed to power up MHI controller\n"); -+ mhi_unprepare_after_power_down(mhi_cntrl); -+ return; -+ } -+ -+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); -+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -+} -+ -+static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev, -+ pci_channel_state_t state) -+{ -+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ -+ dev_err(&pdev->dev, "PCI error detected, state = %u\n", state); -+ -+ if (state == pci_channel_io_perm_failure) -+ return PCI_ERS_RESULT_DISCONNECT; -+ -+ /* Clean up MHI state */ -+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -+ mhi_power_down(mhi_cntrl, false); -+ mhi_unprepare_after_power_down(mhi_cntrl); -+ } else { -+ /* Nothing to do */ -+ return PCI_ERS_RESULT_RECOVERED; -+ } -+ -+ pci_disable_device(pdev); -+ -+ return PCI_ERS_RESULT_NEED_RESET; -+} -+ -+static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev) -+{ -+ if (pci_enable_device(pdev)) { -+ dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); -+ return PCI_ERS_RESULT_DISCONNECT; -+ } -+ -+ return PCI_ERS_RESULT_RECOVERED; -+} -+ -+static void mhi_pci_io_resume(struct pci_dev *pdev) -+{ -+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -+ -+ dev_err(&pdev->dev, "PCI slot reset done\n"); -+ -+ queue_work(system_long_wq, &mhi_pdev->recovery_work); -+} -+ -+static const struct pci_error_handlers mhi_pci_err_handler = { -+ .error_detected = mhi_pci_error_detected, -+ .slot_reset = mhi_pci_slot_reset, -+ .resume = mhi_pci_io_resume, -+ .reset_prepare = mhi_pci_reset_prepare, -+ .reset_done = mhi_pci_reset_done, -+}; -+ -+static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev) -+{ -+ struct pci_dev *pdev = to_pci_dev(dev); -+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); -+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ int err; -+ -+ if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) -+ return 0; -+ -+ del_timer(&mhi_pdev->health_check_timer); -+ cancel_work_sync(&mhi_pdev->recovery_work); -+ -+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || -+ mhi_cntrl->ee != MHI_EE_AMSS) -+ goto pci_suspend; /* Nothing to do at MHI level */ -+ -+ /* Transition to M3 state */ -+ err = mhi_pm_suspend(mhi_cntrl); -+ if (err) { -+ dev_err(&pdev->dev, "failed to suspend device: %d\n", err); -+ clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status); -+ return -EBUSY; -+ } -+ -+pci_suspend: -+ pci_disable_device(pdev); -+ pci_wake_from_d3(pdev, true); -+ -+ return 0; -+} -+ -+static int __maybe_unused mhi_pci_runtime_resume(struct device *dev) -+{ -+ struct pci_dev *pdev = to_pci_dev(dev); -+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); -+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ int err; -+ -+ if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) -+ return 0; -+ -+ err = pci_enable_device(pdev); -+ if (err) -+ goto err_recovery; -+ -+ pci_set_master(pdev); -+ pci_wake_from_d3(pdev, false); -+ -+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || -+ mhi_cntrl->ee != MHI_EE_AMSS) -+ return 0; /* Nothing to do at MHI level */ -+ -+ /* Exit M3, transition to M0 state */ -+ err = mhi_pm_resume(mhi_cntrl); -+ if (err) { -+ dev_err(&pdev->dev, "failed to resume device: %d\n", err); -+ goto err_recovery; -+ } -+ -+ /* Resume health check */ -+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -+ -+ /* It can be a remote wakeup (no mhi runtime_get), update access time */ -+ pm_runtime_mark_last_busy(dev); -+ -+ return 0; -+ -+err_recovery: -+ /* Do not fail to not mess up our PCI device state, the device likely -+ * lost power (d3cold) and we simply need to reset it from the recovery -+ * procedure, trigger the recovery asynchronously to prevent system -+ * suspend exit delaying. -+ */ -+ queue_work(system_long_wq, &mhi_pdev->recovery_work); -+ pm_runtime_mark_last_busy(dev); -+ -+ return 0; -+} -+ -+static int __maybe_unused mhi_pci_suspend(struct device *dev) -+{ -+ pm_runtime_disable(dev); -+ return mhi_pci_runtime_suspend(dev); -+} -+ -+static int __maybe_unused mhi_pci_resume(struct device *dev) -+{ -+ int ret; -+ -+ /* Depending the platform, device may have lost power (d3cold), we need -+ * to resume it now to check its state and recover when necessary. -+ */ -+ ret = mhi_pci_runtime_resume(dev); -+ pm_runtime_enable(dev); -+ -+ return ret; -+} -+ -+static int __maybe_unused mhi_pci_freeze(struct device *dev) -+{ -+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); -+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -+ -+ /* We want to stop all operations, hibernation does not guarantee that -+ * device will be in the same state as before freezing, especially if -+ * the intermediate restore kernel reinitializes MHI device with new -+ * context. -+ */ -+ flush_work(&mhi_pdev->recovery_work); -+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -+ mhi_power_down(mhi_cntrl, true); -+ mhi_unprepare_after_power_down(mhi_cntrl); -+ } -+ -+ return 0; -+} -+ -+static int __maybe_unused mhi_pci_restore(struct device *dev) -+{ -+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); -+ -+ /* Reinitialize the device */ -+ queue_work(system_long_wq, &mhi_pdev->recovery_work); -+ -+ return 0; -+} -+ -+static const struct dev_pm_ops mhi_pci_pm_ops = { -+ SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL) -+#ifdef CONFIG_PM_SLEEP -+ .suspend = mhi_pci_suspend, -+ .resume = mhi_pci_resume, -+ .freeze = mhi_pci_freeze, -+ .thaw = mhi_pci_restore, -+ .poweroff = mhi_pci_freeze, -+ .restore = mhi_pci_restore, -+#endif -+}; -+ -+static struct pci_driver mhi_pci_driver = { -+ .name = "mhi-pci-generic", -+ .id_table = mhi_pci_id_table, -+ .probe = mhi_pci_probe, -+ .remove = mhi_pci_remove, -+ .shutdown = mhi_pci_shutdown, -+ .err_handler = &mhi_pci_err_handler, -+ .driver.pm = &mhi_pci_pm_ops -+}; -+module_pci_driver(mhi_pci_driver); -+ -+MODULE_AUTHOR("Loic Poulain "); -+MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c -new file mode 100644 -index 0000000000000..470dddca025dc ---- /dev/null -+++ b/drivers/bus/mhi/host/pm.c -@@ -0,0 +1,1266 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "internal.h" -+ -+/* -+ * Not all MHI state transitions are synchronous. Transitions like Linkdown, -+ * SYS_ERR, and shutdown can happen anytime asynchronously. This function will -+ * transition to a new state only if we're allowed to. -+ * -+ * Priority increases as we go down. For instance, from any state in L0, the -+ * transition can be made to states in L1, L2 and L3. A notable exception to -+ * this rule is state DISABLE. From DISABLE state we can only transition to -+ * POR state. Also, while in L2 state, user cannot jump back to previous -+ * L1 or L0 states. -+ * -+ * Valid transitions: -+ * L0: DISABLE <--> POR -+ * POR <--> POR -+ * POR -> M0 -> M2 --> M0 -+ * POR -> FW_DL_ERR -+ * FW_DL_ERR <--> FW_DL_ERR -+ * M0 <--> M0 -+ * M0 -> FW_DL_ERR -+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 -+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR -+ * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT -+ * SHUTDOWN_PROCESS -> DISABLE -+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT -+ * LD_ERR_FATAL_DETECT -> DISABLE -+ */ -+static struct mhi_pm_transitions const dev_state_transitions[] = { -+ /* L0 States */ -+ { -+ MHI_PM_DISABLE, -+ MHI_PM_POR -+ }, -+ { -+ MHI_PM_POR, -+ MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | -+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR -+ }, -+ { -+ MHI_PM_M0, -+ MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | -+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR -+ }, -+ { -+ MHI_PM_M2, -+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -+ MHI_PM_LD_ERR_FATAL_DETECT -+ }, -+ { -+ MHI_PM_M3_ENTER, -+ MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -+ MHI_PM_LD_ERR_FATAL_DETECT -+ }, -+ { -+ MHI_PM_M3, -+ MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | -+ MHI_PM_LD_ERR_FATAL_DETECT -+ }, -+ { -+ MHI_PM_M3_EXIT, -+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | -+ MHI_PM_LD_ERR_FATAL_DETECT -+ }, -+ { -+ MHI_PM_FW_DL_ERR, -+ MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | -+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT -+ }, -+ /* L1 States */ -+ { -+ MHI_PM_SYS_ERR_DETECT, -+ MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | -+ MHI_PM_LD_ERR_FATAL_DETECT -+ }, -+ { -+ MHI_PM_SYS_ERR_PROCESS, -+ MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | -+ MHI_PM_LD_ERR_FATAL_DETECT -+ }, -+ /* L2 States */ -+ { -+ MHI_PM_SHUTDOWN_PROCESS, -+ MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT -+ }, -+ /* L3 States */ -+ { -+ MHI_PM_LD_ERR_FATAL_DETECT, -+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE -+ }, -+}; -+ -+enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, -+ enum mhi_pm_state state) -+{ -+ unsigned long cur_state = mhi_cntrl->pm_state; -+ int index = find_last_bit(&cur_state, 32); -+ -+ if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) -+ return cur_state; -+ -+ if (unlikely(dev_state_transitions[index].from_state != cur_state)) -+ return cur_state; -+ -+ if (unlikely(!(dev_state_transitions[index].to_states & state))) -+ return cur_state; -+ -+ mhi_cntrl->pm_state = state; -+ return mhi_cntrl->pm_state; -+} -+ -+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) -+{ -+ if (state == MHI_STATE_RESET) { -+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, -+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); -+ } else { -+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, -+ MHICTRL_MHISTATE_MASK, -+ MHICTRL_MHISTATE_SHIFT, state); -+ } -+} -+ -+/* NOP for backward compatibility, host allowed to ring DB in M2 state */ -+static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) -+{ -+} -+ -+static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) -+{ -+ mhi_cntrl->wake_get(mhi_cntrl, false); -+ mhi_cntrl->wake_put(mhi_cntrl, true); -+} -+ -+/* Handle device ready state transition */ -+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) -+{ -+ struct mhi_event *mhi_event; -+ enum mhi_pm_state cur_state; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */ -+ int ret, i; -+ -+ /* Check if device entered error state */ -+ if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { -+ dev_err(dev, "Device link is not accessible\n"); -+ return -EIO; -+ } -+ -+ /* Wait for RESET to be cleared and READY bit to be set by the device */ -+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, -+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, -+ interval_us); -+ if (ret) { -+ dev_err(dev, "Device failed to clear MHI Reset\n"); -+ return ret; -+ } -+ -+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, -+ MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1, -+ interval_us); -+ if (ret) { -+ dev_err(dev, "Device failed to enter MHI Ready\n"); -+ return ret; -+ } -+ -+ dev_dbg(dev, "Device in READY State\n"); -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); -+ mhi_cntrl->dev_state = MHI_STATE_READY; -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ -+ if (cur_state != MHI_PM_POR) { -+ dev_err(dev, "Error moving to state %s from %s\n", -+ to_mhi_pm_state_str(MHI_PM_POR), -+ to_mhi_pm_state_str(cur_state)); -+ return -EIO; -+ } -+ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { -+ dev_err(dev, "Device registers not accessible\n"); -+ goto error_mmio; -+ } -+ -+ /* Configure MMIO registers */ -+ ret = mhi_init_mmio(mhi_cntrl); -+ if (ret) { -+ dev_err(dev, "Error configuring MMIO registers\n"); -+ goto error_mmio; -+ } -+ -+ /* Add elements to all SW event rings */ -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -+ struct mhi_ring *ring = &mhi_event->ring; -+ -+ /* Skip if this is an offload or HW event */ -+ if (mhi_event->offload_ev || mhi_event->hw_ring) -+ continue; -+ -+ ring->wp = ring->base + ring->len - ring->el_size; -+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); -+ /* Update all cores */ -+ smp_wmb(); -+ -+ /* Ring the event ring db */ -+ spin_lock_irq(&mhi_event->lock); -+ mhi_ring_er_db(mhi_event); -+ spin_unlock_irq(&mhi_event->lock); -+ } -+ -+ /* Set MHI to M0 state */ -+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ -+ return 0; -+ -+error_mmio: -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ -+ return -EIO; -+} -+ -+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) -+{ -+ enum mhi_pm_state cur_state; -+ struct mhi_chan *mhi_chan; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ int i; -+ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ mhi_cntrl->dev_state = MHI_STATE_M0; -+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ if (unlikely(cur_state != MHI_PM_M0)) { -+ dev_err(dev, "Unable to transition to M0 state\n"); -+ return -EIO; -+ } -+ mhi_cntrl->M0++; -+ -+ /* Wake up the device */ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ mhi_cntrl->wake_get(mhi_cntrl, true); -+ -+ /* Ring all event rings and CMD ring only if we're in mission mode */ -+ if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { -+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event; -+ struct mhi_cmd *mhi_cmd = -+ &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; -+ -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -+ if (mhi_event->offload_ev) -+ continue; -+ -+ spin_lock_irq(&mhi_event->lock); -+ mhi_ring_er_db(mhi_event); -+ spin_unlock_irq(&mhi_event->lock); -+ } -+ -+ /* Only ring primary cmd ring if ring is not empty */ -+ spin_lock_irq(&mhi_cmd->lock); -+ if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) -+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); -+ spin_unlock_irq(&mhi_cmd->lock); -+ } -+ -+ /* Ring channel DB registers */ -+ mhi_chan = mhi_cntrl->mhi_chan; -+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { -+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring; -+ -+ if (mhi_chan->db_cfg.reset_req) { -+ write_lock_irq(&mhi_chan->lock); -+ mhi_chan->db_cfg.db_mode = true; -+ write_unlock_irq(&mhi_chan->lock); -+ } -+ -+ read_lock_irq(&mhi_chan->lock); -+ -+ /* Only ring DB if ring is not empty */ -+ if (tre_ring->base && tre_ring->wp != tre_ring->rp && -+ mhi_chan->ch_state == MHI_CH_STATE_ENABLED) -+ mhi_ring_chan_db(mhi_cntrl, mhi_chan); -+ read_unlock_irq(&mhi_chan->lock); -+ } -+ -+ mhi_cntrl->wake_put(mhi_cntrl, false); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ wake_up_all(&mhi_cntrl->state_event); -+ -+ return 0; -+} -+ -+/* -+ * After receiving the MHI state change event from the device indicating the -+ * transition to M1 state, the host can transition the device to M2 state -+ * for keeping it in low power state. -+ */ -+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) -+{ -+ enum mhi_pm_state state; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); -+ if (state == MHI_PM_M2) { -+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); -+ mhi_cntrl->dev_state = MHI_STATE_M2; -+ -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ -+ mhi_cntrl->M2++; -+ wake_up_all(&mhi_cntrl->state_event); -+ -+ /* If there are any pending resources, exit M2 immediately */ -+ if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || -+ atomic_read(&mhi_cntrl->dev_wake))) { -+ dev_dbg(dev, -+ "Exiting M2, pending_pkts: %d dev_wake: %d\n", -+ atomic_read(&mhi_cntrl->pending_pkts), -+ atomic_read(&mhi_cntrl->dev_wake)); -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ mhi_cntrl->wake_get(mhi_cntrl, true); -+ mhi_cntrl->wake_put(mhi_cntrl, true); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ } else { -+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); -+ } -+ } else { -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ } -+} -+ -+/* MHI M3 completion handler */ -+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) -+{ -+ enum mhi_pm_state state; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ mhi_cntrl->dev_state = MHI_STATE_M3; -+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ if (state != MHI_PM_M3) { -+ dev_err(dev, "Unable to transition to M3 state\n"); -+ return -EIO; -+ } -+ -+ mhi_cntrl->M3++; -+ wake_up_all(&mhi_cntrl->state_event); -+ -+ return 0; -+} -+ -+/* Handle device Mission Mode transition */ -+static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) -+{ -+ struct mhi_event *mhi_event; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee; -+ int i, ret; -+ -+ dev_dbg(dev, "Processing Mission Mode transition\n"); -+ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) -+ ee = mhi_get_exec_env(mhi_cntrl); -+ -+ if (!MHI_IN_MISSION_MODE(ee)) { -+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ wake_up_all(&mhi_cntrl->state_event); -+ return -EIO; -+ } -+ mhi_cntrl->ee = ee; -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ -+ wake_up_all(&mhi_cntrl->state_event); -+ -+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee, -+ mhi_destroy_device); -+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); -+ -+ /* Force MHI to be in M0 state before continuing */ -+ ret = __mhi_device_get_sync(mhi_cntrl); -+ if (ret) -+ return ret; -+ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ -+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -+ ret = -EIO; -+ goto error_mission_mode; -+ } -+ -+ /* Add elements to all HW event rings */ -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -+ struct mhi_ring *ring = &mhi_event->ring; -+ -+ if (mhi_event->offload_ev || !mhi_event->hw_ring) -+ continue; -+ -+ ring->wp = ring->base + ring->len - ring->el_size; -+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size); -+ /* Update to all cores */ -+ smp_wmb(); -+ -+ spin_lock_irq(&mhi_event->lock); -+ if (MHI_DB_ACCESS_VALID(mhi_cntrl)) -+ mhi_ring_er_db(mhi_event); -+ spin_unlock_irq(&mhi_event->lock); -+ } -+ -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ -+ /* -+ * The MHI devices are only created when the client device switches its -+ * Execution Environment (EE) to either SBL or AMSS states -+ */ -+ mhi_create_devices(mhi_cntrl); -+ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ -+error_mission_mode: -+ mhi_cntrl->wake_put(mhi_cntrl, false); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ -+ return ret; -+} -+ -+/* Handle shutdown transitions */ -+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) -+{ -+ enum mhi_pm_state cur_state; -+ struct mhi_event *mhi_event; -+ struct mhi_cmd_ctxt *cmd_ctxt; -+ struct mhi_cmd *mhi_cmd; -+ struct mhi_event_ctxt *er_ctxt; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ int ret, i; -+ -+ dev_dbg(dev, "Processing disable transition with PM state: %s\n", -+ to_mhi_pm_state_str(mhi_cntrl->pm_state)); -+ -+ mutex_lock(&mhi_cntrl->pm_mutex); -+ -+ /* Trigger MHI RESET so that the device will not access host memory */ -+ if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { -+ /* Skip MHI RESET if in RDDM state */ -+ if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM) -+ goto skip_mhi_reset; -+ -+ dev_dbg(dev, "Triggering MHI Reset in device\n"); -+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); -+ -+ /* Wait for the reset bit to be cleared by the device */ -+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, -+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, -+ 25000); -+ if (ret) -+ dev_err(dev, "Device failed to clear MHI Reset\n"); -+ -+ /* -+ * Device will clear BHI_INTVEC as a part of RESET processing, -+ * hence re-program it -+ */ -+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); -+ } -+ -+skip_mhi_reset: -+ dev_dbg(dev, -+ "Waiting for all pending event ring processing to complete\n"); -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -+ if (mhi_event->offload_ev) -+ continue; -+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); -+ tasklet_kill(&mhi_event->task); -+ } -+ -+ /* Release lock and wait for all pending threads to complete */ -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+ dev_dbg(dev, "Waiting for all pending threads to complete\n"); -+ wake_up_all(&mhi_cntrl->state_event); -+ -+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); -+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); -+ -+ mutex_lock(&mhi_cntrl->pm_mutex); -+ -+ WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); -+ WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); -+ -+ /* Reset the ev rings and cmd rings */ -+ dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); -+ mhi_cmd = mhi_cntrl->mhi_cmd; -+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; -+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { -+ struct mhi_ring *ring = &mhi_cmd->ring; -+ -+ ring->rp = ring->base; -+ ring->wp = ring->base; -+ cmd_ctxt->rp = cmd_ctxt->rbase; -+ cmd_ctxt->wp = cmd_ctxt->rbase; -+ } -+ -+ mhi_event = mhi_cntrl->mhi_event; -+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, -+ mhi_event++) { -+ struct mhi_ring *ring = &mhi_event->ring; -+ -+ /* Skip offload events */ -+ if (mhi_event->offload_ev) -+ continue; -+ -+ ring->rp = ring->base; -+ ring->wp = ring->base; -+ er_ctxt->rp = er_ctxt->rbase; -+ er_ctxt->wp = er_ctxt->rbase; -+ } -+ -+ /* Move to disable state */ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ if (unlikely(cur_state != MHI_PM_DISABLE)) -+ dev_err(dev, "Error moving from PM state: %s to: %s\n", -+ to_mhi_pm_state_str(cur_state), -+ to_mhi_pm_state_str(MHI_PM_DISABLE)); -+ -+ dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", -+ to_mhi_pm_state_str(mhi_cntrl->pm_state), -+ TO_MHI_STATE_STR(mhi_cntrl->dev_state)); -+ -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+} -+ -+/* Handle system error transitions */ -+static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) -+{ -+ enum mhi_pm_state cur_state, prev_state; -+ enum dev_st_transition next_state; -+ struct mhi_event *mhi_event; -+ struct mhi_cmd_ctxt *cmd_ctxt; -+ struct mhi_cmd *mhi_cmd; -+ struct mhi_event_ctxt *er_ctxt; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ int ret, i; -+ -+ dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", -+ to_mhi_pm_state_str(mhi_cntrl->pm_state), -+ to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); -+ -+ /* We must notify MHI control driver so it can clean up first */ -+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); -+ -+ mutex_lock(&mhi_cntrl->pm_mutex); -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ prev_state = mhi_cntrl->pm_state; -+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ -+ if (cur_state != MHI_PM_SYS_ERR_PROCESS) { -+ dev_err(dev, "Failed to transition from PM state: %s to: %s\n", -+ to_mhi_pm_state_str(cur_state), -+ to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); -+ goto exit_sys_error_transition; -+ } -+ -+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; -+ mhi_cntrl->dev_state = MHI_STATE_RESET; -+ -+ /* Wake up threads waiting for state transition */ -+ wake_up_all(&mhi_cntrl->state_event); -+ -+ /* Trigger MHI RESET so that the device will not access host memory */ -+ if (MHI_REG_ACCESS_VALID(prev_state)) { -+ u32 in_reset = -1; -+ unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); -+ -+ dev_dbg(dev, "Triggering MHI Reset in device\n"); -+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); -+ -+ /* Wait for the reset bit to be cleared by the device */ -+ ret = wait_event_timeout(mhi_cntrl->state_event, -+ mhi_read_reg_field(mhi_cntrl, -+ mhi_cntrl->regs, -+ MHICTRL, -+ MHICTRL_RESET_MASK, -+ MHICTRL_RESET_SHIFT, -+ &in_reset) || -+ !in_reset, timeout); -+ if (!ret || in_reset) { -+ dev_err(dev, "Device failed to exit MHI Reset state\n"); -+ goto exit_sys_error_transition; -+ } -+ -+ /* -+ * Device will clear BHI_INTVEC as a part of RESET processing, -+ * hence re-program it -+ */ -+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); -+ } -+ -+ dev_dbg(dev, -+ "Waiting for all pending event ring processing to complete\n"); -+ mhi_event = mhi_cntrl->mhi_event; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { -+ if (mhi_event->offload_ev) -+ continue; -+ tasklet_kill(&mhi_event->task); -+ } -+ -+ /* Release lock and wait for all pending threads to complete */ -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+ dev_dbg(dev, "Waiting for all pending threads to complete\n"); -+ wake_up_all(&mhi_cntrl->state_event); -+ -+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); -+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); -+ -+ mutex_lock(&mhi_cntrl->pm_mutex); -+ -+ WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); -+ WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); -+ -+ /* Reset the ev rings and cmd rings */ -+ dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); -+ mhi_cmd = mhi_cntrl->mhi_cmd; -+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; -+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { -+ struct mhi_ring *ring = &mhi_cmd->ring; -+ -+ ring->rp = ring->base; -+ ring->wp = ring->base; -+ cmd_ctxt->rp = cmd_ctxt->rbase; -+ cmd_ctxt->wp = cmd_ctxt->rbase; -+ } -+ -+ mhi_event = mhi_cntrl->mhi_event; -+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; -+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, -+ mhi_event++) { -+ struct mhi_ring *ring = &mhi_event->ring; -+ -+ /* Skip offload events */ -+ if (mhi_event->offload_ev) -+ continue; -+ -+ ring->rp = ring->base; -+ ring->wp = ring->base; -+ er_ctxt->rp = er_ctxt->rbase; -+ er_ctxt->wp = er_ctxt->rbase; -+ } -+ -+ /* Transition to next state */ -+ if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ if (cur_state != MHI_PM_POR) { -+ dev_err(dev, "Error moving to state %s from %s\n", -+ to_mhi_pm_state_str(MHI_PM_POR), -+ to_mhi_pm_state_str(cur_state)); -+ goto exit_sys_error_transition; -+ } -+ next_state = DEV_ST_TRANSITION_PBL; -+ } else { -+ next_state = DEV_ST_TRANSITION_READY; -+ } -+ -+ mhi_queue_state_transition(mhi_cntrl, next_state); -+ -+exit_sys_error_transition: -+ dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", -+ to_mhi_pm_state_str(mhi_cntrl->pm_state), -+ TO_MHI_STATE_STR(mhi_cntrl->dev_state)); -+ -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+} -+ -+/* Queue a new work item and schedule work */ -+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, -+ enum dev_st_transition state) -+{ -+ struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); -+ unsigned long flags; -+ -+ if (!item) -+ return -ENOMEM; -+ -+ item->state = state; -+ spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); -+ list_add_tail(&item->node, &mhi_cntrl->transition_list); -+ spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); -+ -+ queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); -+ -+ return 0; -+} -+ -+/* SYS_ERR worker */ -+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) -+{ -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ -+ /* skip if controller supports RDDM */ -+ if (mhi_cntrl->rddm_image) { -+ dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); -+ return; -+ } -+ -+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); -+} -+ -+/* Device State Transition worker */ -+void mhi_pm_st_worker(struct work_struct *work) -+{ -+ struct state_transition *itr, *tmp; -+ LIST_HEAD(head); -+ struct mhi_controller *mhi_cntrl = container_of(work, -+ struct mhi_controller, -+ st_worker); -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ -+ spin_lock_irq(&mhi_cntrl->transition_lock); -+ list_splice_tail_init(&mhi_cntrl->transition_list, &head); -+ spin_unlock_irq(&mhi_cntrl->transition_lock); -+ -+ list_for_each_entry_safe(itr, tmp, &head, node) { -+ list_del(&itr->node); -+ dev_dbg(dev, "Handling state transition: %s\n", -+ TO_DEV_STATE_TRANS_STR(itr->state)); -+ -+ switch (itr->state) { -+ case DEV_ST_TRANSITION_PBL: -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) -+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ mhi_fw_load_handler(mhi_cntrl); -+ break; -+ case DEV_ST_TRANSITION_SBL: -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ mhi_cntrl->ee = MHI_EE_SBL; -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ /* -+ * The MHI devices are only created when the client -+ * device switches its Execution Environment (EE) to -+ * either SBL or AMSS states -+ */ -+ mhi_create_devices(mhi_cntrl); -+ if (mhi_cntrl->fbc_download) -+ mhi_download_amss_image(mhi_cntrl); -+ break; -+ case DEV_ST_TRANSITION_MISSION_MODE: -+ mhi_pm_mission_mode_transition(mhi_cntrl); -+ break; -+ case DEV_ST_TRANSITION_FP: -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ mhi_cntrl->ee = MHI_EE_FP; -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ mhi_create_devices(mhi_cntrl); -+ break; -+ case DEV_ST_TRANSITION_READY: -+ mhi_ready_state_transition(mhi_cntrl); -+ break; -+ case DEV_ST_TRANSITION_SYS_ERR: -+ mhi_pm_sys_error_transition(mhi_cntrl); -+ break; -+ case DEV_ST_TRANSITION_DISABLE: -+ mhi_pm_disable_transition(mhi_cntrl); -+ break; -+ default: -+ break; -+ } -+ kfree(itr); -+ } -+} -+ -+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) -+{ -+ struct mhi_chan *itr, *tmp; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ enum mhi_pm_state new_state; -+ int ret; -+ -+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE) -+ return -EINVAL; -+ -+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) -+ return -EIO; -+ -+ /* Return busy if there are any pending resources */ -+ if (atomic_read(&mhi_cntrl->dev_wake) || -+ atomic_read(&mhi_cntrl->pending_pkts)) -+ return -EBUSY; -+ -+ /* Take MHI out of M2 state */ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ mhi_cntrl->wake_get(mhi_cntrl, false); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ -+ ret = wait_event_timeout(mhi_cntrl->state_event, -+ mhi_cntrl->dev_state == MHI_STATE_M0 || -+ mhi_cntrl->dev_state == MHI_STATE_M1 || -+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ mhi_cntrl->wake_put(mhi_cntrl, false); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ -+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -+ dev_err(dev, -+ "Could not enter M0/M1 state"); -+ return -EIO; -+ } -+ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ -+ if (atomic_read(&mhi_cntrl->dev_wake) || -+ atomic_read(&mhi_cntrl->pending_pkts)) { -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ return -EBUSY; -+ } -+ -+ dev_dbg(dev, "Allowing M3 transition\n"); -+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); -+ if (new_state != MHI_PM_M3_ENTER) { -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ dev_err(dev, -+ "Error setting to PM state: %s from: %s\n", -+ to_mhi_pm_state_str(MHI_PM_M3_ENTER), -+ to_mhi_pm_state_str(mhi_cntrl->pm_state)); -+ return -EIO; -+ } -+ -+ /* Set MHI to M3 and wait for completion */ -+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ dev_dbg(dev, "Waiting for M3 completion\n"); -+ -+ ret = wait_event_timeout(mhi_cntrl->state_event, -+ mhi_cntrl->dev_state == MHI_STATE_M3 || -+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ -+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -+ dev_err(dev, -+ "Did not enter M3 state, MHI state: %s, PM state: %s\n", -+ TO_MHI_STATE_STR(mhi_cntrl->dev_state), -+ to_mhi_pm_state_str(mhi_cntrl->pm_state)); -+ return -EIO; -+ } -+ -+ /* Notify clients about entering LPM */ -+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { -+ mutex_lock(&itr->mutex); -+ if (itr->mhi_dev) -+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); -+ mutex_unlock(&itr->mutex); -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(mhi_pm_suspend); -+ -+static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force) -+{ -+ struct mhi_chan *itr, *tmp; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ enum mhi_pm_state cur_state; -+ int ret; -+ -+ dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n", -+ to_mhi_pm_state_str(mhi_cntrl->pm_state), -+ TO_MHI_STATE_STR(mhi_cntrl->dev_state)); -+ -+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE) -+ return 0; -+ -+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) -+ return -EIO; -+ -+ if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) { -+ dev_warn(dev, "Resuming from non M3 state (%s)\n", -+ TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); -+ if (!force) -+ return -EINVAL; -+ } -+ -+ /* Notify clients about exiting LPM */ -+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { -+ mutex_lock(&itr->mutex); -+ if (itr->mhi_dev) -+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); -+ mutex_unlock(&itr->mutex); -+ } -+ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); -+ if (cur_state != MHI_PM_M3_EXIT) { -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ dev_info(dev, -+ "Error setting to PM state: %s from: %s\n", -+ to_mhi_pm_state_str(MHI_PM_M3_EXIT), -+ to_mhi_pm_state_str(mhi_cntrl->pm_state)); -+ return -EIO; -+ } -+ -+ /* Set MHI to M0 and wait for completion */ -+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ -+ ret = wait_event_timeout(mhi_cntrl->state_event, -+ mhi_cntrl->dev_state == MHI_STATE_M0 || -+ mhi_cntrl->dev_state == MHI_STATE_M2 || -+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ -+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -+ dev_err(dev, -+ "Did not enter M0 state, MHI state: %s, PM state: %s\n", -+ TO_MHI_STATE_STR(mhi_cntrl->dev_state), -+ to_mhi_pm_state_str(mhi_cntrl->pm_state)); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int mhi_pm_resume(struct mhi_controller *mhi_cntrl) -+{ -+ return __mhi_pm_resume(mhi_cntrl, false); -+} -+EXPORT_SYMBOL_GPL(mhi_pm_resume); -+ -+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl) -+{ -+ return __mhi_pm_resume(mhi_cntrl, true); -+} -+EXPORT_SYMBOL_GPL(mhi_pm_resume_force); -+ -+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) -+{ -+ int ret; -+ -+ /* Wake up the device */ -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ return -EIO; -+ } -+ mhi_cntrl->wake_get(mhi_cntrl, true); -+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) -+ mhi_trigger_resume(mhi_cntrl); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ -+ ret = wait_event_timeout(mhi_cntrl->state_event, -+ mhi_cntrl->pm_state == MHI_PM_M0 || -+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ -+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ mhi_cntrl->wake_put(mhi_cntrl, false); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+/* Assert device wake db */ -+static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) -+{ -+ unsigned long flags; -+ -+ /* -+ * If force flag is set, then increment the wake count value and -+ * ring wake db -+ */ -+ if (unlikely(force)) { -+ spin_lock_irqsave(&mhi_cntrl->wlock, flags); -+ atomic_inc(&mhi_cntrl->dev_wake); -+ if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && -+ !mhi_cntrl->wake_set) { -+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); -+ mhi_cntrl->wake_set = true; -+ } -+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); -+ } else { -+ /* -+ * If resources are already requested, then just increment -+ * the wake count value and return -+ */ -+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) -+ return; -+ -+ spin_lock_irqsave(&mhi_cntrl->wlock, flags); -+ if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && -+ MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && -+ !mhi_cntrl->wake_set) { -+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); -+ mhi_cntrl->wake_set = true; -+ } -+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); -+ } -+} -+ -+/* De-assert device wake db */ -+static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, -+ bool override) -+{ -+ unsigned long flags; -+ -+ /* -+ * Only continue if there is a single resource, else just decrement -+ * and return -+ */ -+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) -+ return; -+ -+ spin_lock_irqsave(&mhi_cntrl->wlock, flags); -+ if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && -+ MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && -+ mhi_cntrl->wake_set) { -+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); -+ mhi_cntrl->wake_set = false; -+ } -+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); -+} -+ -+int mhi_async_power_up(struct mhi_controller *mhi_cntrl) -+{ -+ enum mhi_state state; -+ enum mhi_ee_type current_ee; -+ enum dev_st_transition next_state; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */ -+ int ret; -+ -+ dev_info(dev, "Requested to power ON\n"); -+ -+ /* Supply default wake routines if not provided by controller driver */ -+ if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || -+ !mhi_cntrl->wake_toggle) { -+ mhi_cntrl->wake_get = mhi_assert_dev_wake; -+ mhi_cntrl->wake_put = mhi_deassert_dev_wake; -+ mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? -+ mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; -+ } -+ -+ mutex_lock(&mhi_cntrl->pm_mutex); -+ mhi_cntrl->pm_state = MHI_PM_DISABLE; -+ -+ /* Setup BHI INTVEC */ -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); -+ mhi_cntrl->pm_state = MHI_PM_POR; -+ mhi_cntrl->ee = MHI_EE_MAX; -+ current_ee = mhi_get_exec_env(mhi_cntrl); -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ -+ /* Confirm that the device is in valid exec env */ -+ if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { -+ dev_err(dev, "%s is not a valid EE for power on\n", -+ TO_MHI_EXEC_STR(current_ee)); -+ ret = -EIO; -+ goto error_exit; -+ } -+ -+ state = mhi_get_mhi_state(mhi_cntrl); -+ dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n", -+ TO_MHI_EXEC_STR(current_ee), TO_MHI_STATE_STR(state)); -+ -+ if (state == MHI_STATE_SYS_ERR) { -+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); -+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, -+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0, -+ interval_us); -+ if (ret) { -+ dev_info(dev, "Failed to reset MHI due to syserr state\n"); -+ goto error_exit; -+ } -+ -+ /* -+ * device cleares INTVEC as part of RESET processing, -+ * re-program it -+ */ -+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); -+ } -+ -+ ret = mhi_init_irq_setup(mhi_cntrl); -+ if (ret) -+ goto error_exit; -+ -+ /* Transition to next state */ -+ next_state = MHI_IN_PBL(current_ee) ? -+ DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; -+ -+ mhi_queue_state_transition(mhi_cntrl, next_state); -+ -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+ -+ dev_info(dev, "Power on setup success\n"); -+ -+ return 0; -+ -+error_exit: -+ mhi_cntrl->pm_state = MHI_PM_DISABLE; -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mhi_async_power_up); -+ -+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) -+{ -+ enum mhi_pm_state cur_state, transition_state; -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ -+ mutex_lock(&mhi_cntrl->pm_mutex); -+ write_lock_irq(&mhi_cntrl->pm_lock); -+ cur_state = mhi_cntrl->pm_state; -+ if (cur_state == MHI_PM_DISABLE) { -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+ return; /* Already powered down */ -+ } -+ -+ /* If it's not a graceful shutdown, force MHI to linkdown state */ -+ transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS : -+ MHI_PM_LD_ERR_FATAL_DETECT; -+ -+ cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); -+ if (cur_state != transition_state) { -+ dev_err(dev, "Failed to move to state: %s from: %s\n", -+ to_mhi_pm_state_str(transition_state), -+ to_mhi_pm_state_str(mhi_cntrl->pm_state)); -+ /* Force link down or error fatal detected state */ -+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; -+ } -+ -+ /* mark device inactive to avoid any further host processing */ -+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; -+ mhi_cntrl->dev_state = MHI_STATE_RESET; -+ -+ wake_up_all(&mhi_cntrl->state_event); -+ -+ write_unlock_irq(&mhi_cntrl->pm_lock); -+ mutex_unlock(&mhi_cntrl->pm_mutex); -+ -+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); -+ -+ /* Wait for shutdown to complete */ -+ flush_work(&mhi_cntrl->st_worker); -+ -+ free_irq(mhi_cntrl->irq[0], mhi_cntrl); -+} -+EXPORT_SYMBOL_GPL(mhi_power_down); -+ -+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) -+{ -+ int ret = mhi_async_power_up(mhi_cntrl); -+ -+ if (ret) -+ return ret; -+ -+ wait_event_timeout(mhi_cntrl->state_event, -+ MHI_IN_MISSION_MODE(mhi_cntrl->ee) || -+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ -+ ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; -+ if (ret) -+ mhi_power_down(mhi_cntrl, false); -+ -+ return ret; -+} -+EXPORT_SYMBOL(mhi_sync_power_up); -+ -+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) -+{ -+ struct device *dev = &mhi_cntrl->mhi_dev->dev; -+ int ret; -+ -+ /* Check if device is already in RDDM */ -+ if (mhi_cntrl->ee == MHI_EE_RDDM) -+ return 0; -+ -+ dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n"); -+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); -+ -+ /* Wait for RDDM event */ -+ ret = wait_event_timeout(mhi_cntrl->state_event, -+ mhi_cntrl->ee == MHI_EE_RDDM, -+ msecs_to_jiffies(mhi_cntrl->timeout_ms)); -+ ret = ret ? 0 : -EIO; -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); -+ -+void mhi_device_get(struct mhi_device *mhi_dev) -+{ -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ -+ mhi_dev->dev_wake++; -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) -+ mhi_trigger_resume(mhi_cntrl); -+ -+ mhi_cntrl->wake_get(mhi_cntrl, true); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+} -+EXPORT_SYMBOL_GPL(mhi_device_get); -+ -+int mhi_device_get_sync(struct mhi_device *mhi_dev) -+{ -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ int ret; -+ -+ ret = __mhi_device_get_sync(mhi_cntrl); -+ if (!ret) -+ mhi_dev->dev_wake++; -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(mhi_device_get_sync); -+ -+void mhi_device_put(struct mhi_device *mhi_dev) -+{ -+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; -+ -+ mhi_dev->dev_wake--; -+ read_lock_bh(&mhi_cntrl->pm_lock); -+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) -+ mhi_trigger_resume(mhi_cntrl); -+ -+ mhi_cntrl->wake_put(mhi_cntrl, false); -+ read_unlock_bh(&mhi_cntrl->pm_lock); -+} -+EXPORT_SYMBOL_GPL(mhi_device_put); -diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c -deleted file mode 100644 -index 59a4896a80309..0000000000000 ---- a/drivers/bus/mhi/pci_generic.c -+++ /dev/null -@@ -1,1062 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-or-later --/* -- * MHI PCI driver - MHI over PCI controller driver -- * -- * This module is a generic driver for registering MHI-over-PCI devices, -- * such as PCIe QCOM modems. -- * -- * Copyright (C) 2020 Linaro Ltd -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --#define MHI_PCI_DEFAULT_BAR_NUM 0 -- --#define MHI_POST_RESET_DELAY_MS 500 -- --#define HEALTH_CHECK_PERIOD (HZ * 2) -- --/** -- * struct mhi_pci_dev_info - MHI PCI device specific information -- * @config: MHI controller configuration -- * @name: name of the PCI module -- * @fw: firmware path (if any) -- * @edl: emergency download mode firmware path (if any) -- * @bar_num: PCI base address register to use for MHI MMIO register space -- * @dma_data_width: DMA transfer word size (32 or 64 bits) -- * @mru_default: default MRU size for MBIM network packets -- * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead -- * of inband wake support (such as sdx24) -- */ --struct mhi_pci_dev_info { -- const struct mhi_controller_config *config; -- const char *name; -- const char *fw; -- const char *edl; -- unsigned int bar_num; -- unsigned int dma_data_width; -- unsigned int mru_default; -- bool sideband_wake; --}; -- --#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ -- { \ -- .num = ch_num, \ -- .name = ch_name, \ -- .num_elements = el_count, \ -- .event_ring = ev_ring, \ -- .dir = DMA_TO_DEVICE, \ -- .ee_mask = BIT(MHI_EE_AMSS), \ -- .pollcfg = 0, \ -- .doorbell = MHI_DB_BRST_DISABLE, \ -- .lpm_notify = false, \ -- .offload_channel = false, \ -- .doorbell_mode_switch = false, \ -- } \ -- --#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \ -- { \ -- .num = ch_num, \ -- .name = ch_name, \ -- .num_elements = el_count, \ -- .event_ring = ev_ring, \ -- .dir = DMA_FROM_DEVICE, \ -- .ee_mask = BIT(MHI_EE_AMSS), \ -- .pollcfg = 0, \ -- .doorbell = MHI_DB_BRST_DISABLE, \ -- .lpm_notify = false, \ -- .offload_channel = false, \ -- .doorbell_mode_switch = false, \ -- } -- --#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \ -- { \ -- .num = ch_num, \ -- .name = ch_name, \ -- .num_elements = el_count, \ -- .event_ring = ev_ring, \ -- .dir = DMA_FROM_DEVICE, \ -- .ee_mask = BIT(MHI_EE_AMSS), \ -- .pollcfg = 0, \ -- .doorbell = MHI_DB_BRST_DISABLE, \ -- .lpm_notify = false, \ -- .offload_channel = false, \ -- .doorbell_mode_switch = false, \ -- .auto_queue = true, \ -- } -- --#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \ -- { \ -- .num_elements = el_count, \ -- .irq_moderation_ms = 0, \ -- .irq = (ev_ring) + 1, \ -- .priority = 1, \ -- .mode = MHI_DB_BRST_DISABLE, \ -- .data_type = MHI_ER_CTRL, \ -- .hardware_event = false, \ -- .client_managed = false, \ -- .offload_channel = false, \ -- } -- --#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \ -- { \ -- .num = ch_num, \ -- .name = ch_name, \ -- .num_elements = el_count, \ -- .event_ring = ev_ring, \ -- .dir = DMA_TO_DEVICE, \ -- .ee_mask = BIT(MHI_EE_AMSS), \ -- .pollcfg = 0, \ -- .doorbell = MHI_DB_BRST_ENABLE, \ -- .lpm_notify = false, \ -- .offload_channel = false, \ -- .doorbell_mode_switch = true, \ -- } \ -- --#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \ -- { \ -- .num = ch_num, \ -- .name = ch_name, \ -- .num_elements = el_count, \ -- .event_ring = ev_ring, \ -- .dir = DMA_FROM_DEVICE, \ -- .ee_mask = BIT(MHI_EE_AMSS), \ -- .pollcfg = 0, \ -- .doorbell = MHI_DB_BRST_ENABLE, \ -- .lpm_notify = false, \ -- .offload_channel = false, \ -- .doorbell_mode_switch = true, \ -- } -- --#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \ -- { \ -- .num = ch_num, \ -- .name = ch_name, \ -- .num_elements = el_count, \ -- .event_ring = ev_ring, \ -- .dir = DMA_TO_DEVICE, \ -- .ee_mask = BIT(MHI_EE_SBL), \ -- .pollcfg = 0, \ -- .doorbell = MHI_DB_BRST_DISABLE, \ -- .lpm_notify = false, \ -- .offload_channel = false, \ -- .doorbell_mode_switch = false, \ -- } \ -- --#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \ -- { \ -- .num = ch_num, \ -- .name = ch_name, \ -- .num_elements = el_count, \ -- .event_ring = ev_ring, \ -- .dir = DMA_FROM_DEVICE, \ -- .ee_mask = BIT(MHI_EE_SBL), \ -- .pollcfg = 0, \ -- .doorbell = MHI_DB_BRST_DISABLE, \ -- .lpm_notify = false, \ -- .offload_channel = false, \ -- .doorbell_mode_switch = false, \ -- } -- --#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \ -- { \ -- .num = ch_num, \ -- .name = ch_name, \ -- .num_elements = el_count, \ -- .event_ring = ev_ring, \ -- .dir = DMA_TO_DEVICE, \ -- .ee_mask = BIT(MHI_EE_FP), \ -- .pollcfg = 0, \ -- .doorbell = MHI_DB_BRST_DISABLE, \ -- .lpm_notify = false, \ -- .offload_channel = false, \ -- .doorbell_mode_switch = false, \ -- } \ -- --#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \ -- { \ -- .num = ch_num, \ -- .name = ch_name, \ -- .num_elements = el_count, \ -- .event_ring = ev_ring, \ -- .dir = DMA_FROM_DEVICE, \ -- .ee_mask = BIT(MHI_EE_FP), \ -- .pollcfg = 0, \ -- .doorbell = MHI_DB_BRST_DISABLE, \ -- .lpm_notify = false, \ -- .offload_channel = false, \ -- .doorbell_mode_switch = false, \ -- } -- --#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \ -- { \ -- .num_elements = el_count, \ -- .irq_moderation_ms = 5, \ -- .irq = (ev_ring) + 1, \ -- .priority = 1, \ -- .mode = MHI_DB_BRST_DISABLE, \ -- .data_type = MHI_ER_DATA, \ -- .hardware_event = false, \ -- .client_managed = false, \ -- .offload_channel = false, \ -- } -- --#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \ -- { \ -- .num_elements = el_count, \ -- .irq_moderation_ms = 1, \ -- .irq = (ev_ring) + 1, \ -- .priority = 1, \ -- .mode = MHI_DB_BRST_DISABLE, \ -- .data_type = MHI_ER_DATA, \ -- .hardware_event = true, \ -- .client_managed = false, \ -- .offload_channel = false, \ -- .channel = ch_num, \ -- } -- --static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { -- MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1), -- MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1), -- MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0), -- MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0), -- MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), -- MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), -- MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), -- MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), -- MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), -- MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), -- MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), -- MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3), --}; -- --static struct mhi_event_config modem_qcom_v1_mhi_events[] = { -- /* first ring is control+data ring */ -- MHI_EVENT_CONFIG_CTRL(0, 64), -- /* DIAG dedicated event ring */ -- MHI_EVENT_CONFIG_DATA(1, 128), -- /* Hardware channels request dedicated hardware event rings */ -- MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), -- MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) --}; -- --static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { -- .max_channels = 128, -- .timeout_ms = 8000, -- .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), -- .ch_cfg = modem_qcom_v1_mhi_channels, -- .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), -- .event_cfg = modem_qcom_v1_mhi_events, --}; -- --static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { -- .name = "qcom-sdx65m", -- .fw = "qcom/sdx65m/xbl.elf", -- .edl = "qcom/sdx65m/edl.mbn", -- .config = &modem_qcom_v1_mhiv_config, -- .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -- .dma_data_width = 32, -- .sideband_wake = false, --}; -- --static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { -- .name = "qcom-sdx55m", -- .fw = "qcom/sdx55m/sbl1.mbn", -- .edl = "qcom/sdx55m/edl.mbn", -- .config = &modem_qcom_v1_mhiv_config, -- .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -- .dma_data_width = 32, -- .mru_default = 32768, -- .sideband_wake = false, --}; -- --static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { -- .name = "qcom-sdx24", -- .edl = "qcom/prog_firehose_sdx24.mbn", -- .config = &modem_qcom_v1_mhiv_config, -- .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -- .dma_data_width = 32, -- .sideband_wake = true, --}; -- --static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = { -- MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0), -- MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0), -- MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), -- MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), -- MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), -- MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), -- MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), -- MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), -- MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), -- MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), -- /* The EDL firmware is a flash-programmer exposing firehose protocol */ -- MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), -- MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), -- MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), -- MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), --}; -- --static struct mhi_event_config mhi_quectel_em1xx_events[] = { -- MHI_EVENT_CONFIG_CTRL(0, 128), -- MHI_EVENT_CONFIG_DATA(1, 128), -- MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), -- MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) --}; -- --static const struct mhi_controller_config modem_quectel_em1xx_config = { -- .max_channels = 128, -- .timeout_ms = 20000, -- .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels), -- .ch_cfg = mhi_quectel_em1xx_channels, -- .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events), -- .event_cfg = mhi_quectel_em1xx_events, --}; -- --static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { -- .name = "quectel-em1xx", -- .edl = "qcom/prog_firehose_sdx24.mbn", -- .config = &modem_quectel_em1xx_config, -- .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -- .dma_data_width = 32, -- .sideband_wake = true, --}; -- --static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { -- MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0), -- MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0), -- MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), -- MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), -- MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), -- MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), -- MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), -- MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), -- MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), -- MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), --}; -- --static struct mhi_event_config mhi_foxconn_sdx55_events[] = { -- MHI_EVENT_CONFIG_CTRL(0, 128), -- MHI_EVENT_CONFIG_DATA(1, 128), -- MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), -- MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101) --}; -- --static const struct mhi_controller_config modem_foxconn_sdx55_config = { -- .max_channels = 128, -- .timeout_ms = 20000, -- .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels), -- .ch_cfg = mhi_foxconn_sdx55_channels, -- .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events), -- .event_cfg = mhi_foxconn_sdx55_events, --}; -- --static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { -- .name = "foxconn-sdx55", -- .fw = "qcom/sdx55m/sbl1.mbn", -- .edl = "qcom/sdx55m/edl.mbn", -- .config = &modem_foxconn_sdx55_config, -- .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -- .dma_data_width = 32, -- .sideband_wake = false, --}; -- --static const struct mhi_channel_config mhi_mv31_channels[] = { -- MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), -- MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), -- /* MBIM Control Channel */ -- MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0), -- MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0), -- /* MBIM Data Channel */ -- MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2), -- MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), --}; -- --static struct mhi_event_config mhi_mv31_events[] = { -- MHI_EVENT_CONFIG_CTRL(0, 256), -- MHI_EVENT_CONFIG_DATA(1, 256), -- MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), -- MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), --}; -- --static const struct mhi_controller_config modem_mv31_config = { -- .max_channels = 128, -- .timeout_ms = 20000, -- .num_channels = ARRAY_SIZE(mhi_mv31_channels), -- .ch_cfg = mhi_mv31_channels, -- .num_events = ARRAY_SIZE(mhi_mv31_events), -- .event_cfg = mhi_mv31_events, --}; -- --static const struct mhi_pci_dev_info mhi_mv31_info = { -- .name = "cinterion-mv31", -- .config = &modem_mv31_config, -- .bar_num = MHI_PCI_DEFAULT_BAR_NUM, -- .dma_data_width = 32, --}; -- --static const struct pci_device_id mhi_pci_id_table[] = { -- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), -- .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, -- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), -- .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, -- { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ -- .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, -- { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ -- .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, -- { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), -- .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, -- /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ -- { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), -- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, -- /* DW5930e (sdx55), With eSIM, It's also T99W175 */ -- { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0), -- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, -- /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */ -- { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1), -- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, -- /* MV31-W (Cinterion) */ -- { PCI_DEVICE(0x1269, 0x00b3), -- .driver_data = (kernel_ulong_t) &mhi_mv31_info }, -- { } --}; --MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); -- --enum mhi_pci_device_status { -- MHI_PCI_DEV_STARTED, -- MHI_PCI_DEV_SUSPENDED, --}; -- --struct mhi_pci_device { -- struct mhi_controller mhi_cntrl; -- struct pci_saved_state *pci_state; -- struct work_struct recovery_work; -- struct timer_list health_check_timer; -- unsigned long status; --}; -- --static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, -- void __iomem *addr, u32 *out) --{ -- *out = readl(addr); -- return 0; --} -- --static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl, -- void __iomem *addr, u32 val) --{ -- writel(val, addr); --} -- --static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl, -- enum mhi_callback cb) --{ -- struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -- -- /* Nothing to do for now */ -- switch (cb) { -- case MHI_CB_FATAL_ERROR: -- case MHI_CB_SYS_ERROR: -- dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb); -- pm_runtime_forbid(&pdev->dev); -- break; -- case MHI_CB_EE_MISSION_MODE: -- pm_runtime_allow(&pdev->dev); -- break; -- default: -- break; -- } --} -- --static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force) --{ -- /* no-op */ --} -- --static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override) --{ -- /* no-op */ --} -- --static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl) --{ -- /* no-op */ --} -- --static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl) --{ -- struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -- u16 vendor = 0; -- -- if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) -- return false; -- -- if (vendor == (u16) ~0 || vendor == 0) -- return false; -- -- return true; --} -- --static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, -- unsigned int bar_num, u64 dma_mask) --{ -- struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -- int err; -- -- err = pci_assign_resource(pdev, bar_num); -- if (err) -- return err; -- -- err = pcim_enable_device(pdev); -- if (err) { -- dev_err(&pdev->dev, "failed to enable pci device: %d\n", err); -- return err; -- } -- -- err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev)); -- if (err) { -- dev_err(&pdev->dev, "failed to map pci region: %d\n", err); -- return err; -- } -- mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; -- mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); -- -- err = pci_set_dma_mask(pdev, dma_mask); -- if (err) { -- dev_err(&pdev->dev, "Cannot set proper DMA mask\n"); -- return err; -- } -- -- err = pci_set_consistent_dma_mask(pdev, dma_mask); -- if (err) { -- dev_err(&pdev->dev, "set consistent dma mask failed\n"); -- return err; -- } -- -- pci_set_master(pdev); -- -- return 0; --} -- --static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, -- const struct mhi_controller_config *mhi_cntrl_config) --{ -- struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -- int nr_vectors, i; -- int *irq; -- -- /* -- * Alloc one MSI vector for BHI + one vector per event ring, ideally... -- * No explicit pci_free_irq_vectors required, done by pcim_release. -- */ -- mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events; -- -- nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI); -- if (nr_vectors < 0) { -- dev_err(&pdev->dev, "Error allocating MSI vectors %d\n", -- nr_vectors); -- return nr_vectors; -- } -- -- if (nr_vectors < mhi_cntrl->nr_irqs) { -- dev_warn(&pdev->dev, "using shared MSI\n"); -- -- /* Patch msi vectors, use only one (shared) */ -- for (i = 0; i < mhi_cntrl_config->num_events; i++) -- mhi_cntrl_config->event_cfg[i].irq = 0; -- mhi_cntrl->nr_irqs = 1; -- } -- -- irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL); -- if (!irq) -- return -ENOMEM; -- -- for (i = 0; i < mhi_cntrl->nr_irqs; i++) { -- int vector = i >= nr_vectors ? (nr_vectors - 1) : i; -- -- irq[i] = pci_irq_vector(pdev, vector); -- } -- -- mhi_cntrl->irq = irq; -- -- return 0; --} -- --static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl) --{ -- /* The runtime_get() MHI callback means: -- * Do whatever is requested to leave M3. -- */ -- return pm_runtime_get(mhi_cntrl->cntrl_dev); --} -- --static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl) --{ -- /* The runtime_put() MHI callback means: -- * Device can be moved in M3 state. -- */ -- pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev); -- pm_runtime_put(mhi_cntrl->cntrl_dev); --} -- --static void mhi_pci_recovery_work(struct work_struct *work) --{ -- struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device, -- recovery_work); -- struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -- struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); -- int err; -- -- dev_warn(&pdev->dev, "device recovery started\n"); -- -- del_timer(&mhi_pdev->health_check_timer); -- pm_runtime_forbid(&pdev->dev); -- -- /* Clean up MHI state */ -- if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -- mhi_power_down(mhi_cntrl, false); -- mhi_unprepare_after_power_down(mhi_cntrl); -- } -- -- pci_set_power_state(pdev, PCI_D0); -- pci_load_saved_state(pdev, mhi_pdev->pci_state); -- pci_restore_state(pdev); -- -- if (!mhi_pci_is_alive(mhi_cntrl)) -- goto err_try_reset; -- -- err = mhi_prepare_for_power_up(mhi_cntrl); -- if (err) -- goto err_try_reset; -- -- err = mhi_sync_power_up(mhi_cntrl); -- if (err) -- goto err_unprepare; -- -- dev_dbg(&pdev->dev, "Recovery completed\n"); -- -- set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); -- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -- return; -- --err_unprepare: -- mhi_unprepare_after_power_down(mhi_cntrl); --err_try_reset: -- if (pci_reset_function(pdev)) -- dev_err(&pdev->dev, "Recovery failed\n"); --} -- --static void health_check(struct timer_list *t) --{ -- struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer); -- struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -- -- if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || -- test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) -- return; -- -- if (!mhi_pci_is_alive(mhi_cntrl)) { -- dev_err(mhi_cntrl->cntrl_dev, "Device died\n"); -- queue_work(system_long_wq, &mhi_pdev->recovery_work); -- return; -- } -- -- /* reschedule in two seconds */ -- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); --} -- --static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) --{ -- const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; -- const struct mhi_controller_config *mhi_cntrl_config; -- struct mhi_pci_device *mhi_pdev; -- struct mhi_controller *mhi_cntrl; -- int err; -- -- dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name); -- -- /* mhi_pdev.mhi_cntrl must be zero-initialized */ -- mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL); -- if (!mhi_pdev) -- return -ENOMEM; -- -- INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work); -- timer_setup(&mhi_pdev->health_check_timer, health_check, 0); -- -- mhi_cntrl_config = info->config; -- mhi_cntrl = &mhi_pdev->mhi_cntrl; -- -- mhi_cntrl->cntrl_dev = &pdev->dev; -- mhi_cntrl->iova_start = 0; -- mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width); -- mhi_cntrl->fw_image = info->fw; -- mhi_cntrl->edl_image = info->edl; -- -- mhi_cntrl->read_reg = mhi_pci_read_reg; -- mhi_cntrl->write_reg = mhi_pci_write_reg; -- mhi_cntrl->status_cb = mhi_pci_status_cb; -- mhi_cntrl->runtime_get = mhi_pci_runtime_get; -- mhi_cntrl->runtime_put = mhi_pci_runtime_put; -- mhi_cntrl->mru = info->mru_default; -- -- if (info->sideband_wake) { -- mhi_cntrl->wake_get = mhi_pci_wake_get_nop; -- mhi_cntrl->wake_put = mhi_pci_wake_put_nop; -- mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; -- } -- -- err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); -- if (err) -- return err; -- -- err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config); -- if (err) -- return err; -- -- pci_set_drvdata(pdev, mhi_pdev); -- -- /* Have stored pci confspace at hand for restore in sudden PCI error. -- * cache the state locally and discard the PCI core one. -- */ -- pci_save_state(pdev); -- mhi_pdev->pci_state = pci_store_saved_state(pdev); -- pci_load_saved_state(pdev, NULL); -- -- pci_enable_pcie_error_reporting(pdev); -- -- err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config); -- if (err) -- goto err_disable_reporting; -- -- /* MHI bus does not power up the controller by default */ -- err = mhi_prepare_for_power_up(mhi_cntrl); -- if (err) { -- dev_err(&pdev->dev, "failed to prepare MHI controller\n"); -- goto err_unregister; -- } -- -- err = mhi_sync_power_up(mhi_cntrl); -- if (err) { -- dev_err(&pdev->dev, "failed to power up MHI controller\n"); -- goto err_unprepare; -- } -- -- set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); -- -- /* start health check */ -- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -- -- /* Only allow runtime-suspend if PME capable (for wakeup) */ -- if (pci_pme_capable(pdev, PCI_D3hot)) { -- pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); -- pm_runtime_use_autosuspend(&pdev->dev); -- pm_runtime_mark_last_busy(&pdev->dev); -- pm_runtime_put_noidle(&pdev->dev); -- } -- -- return 0; -- --err_unprepare: -- mhi_unprepare_after_power_down(mhi_cntrl); --err_unregister: -- mhi_unregister_controller(mhi_cntrl); --err_disable_reporting: -- pci_disable_pcie_error_reporting(pdev); -- -- return err; --} -- --static void mhi_pci_remove(struct pci_dev *pdev) --{ -- struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -- struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -- -- del_timer_sync(&mhi_pdev->health_check_timer); -- cancel_work_sync(&mhi_pdev->recovery_work); -- -- if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -- mhi_power_down(mhi_cntrl, true); -- mhi_unprepare_after_power_down(mhi_cntrl); -- } -- -- /* balancing probe put_noidle */ -- if (pci_pme_capable(pdev, PCI_D3hot)) -- pm_runtime_get_noresume(&pdev->dev); -- -- mhi_unregister_controller(mhi_cntrl); -- pci_disable_pcie_error_reporting(pdev); --} -- --static void mhi_pci_shutdown(struct pci_dev *pdev) --{ -- mhi_pci_remove(pdev); -- pci_set_power_state(pdev, PCI_D3hot); --} -- --static void mhi_pci_reset_prepare(struct pci_dev *pdev) --{ -- struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -- struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -- -- dev_info(&pdev->dev, "reset\n"); -- -- del_timer(&mhi_pdev->health_check_timer); -- -- /* Clean up MHI state */ -- if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -- mhi_power_down(mhi_cntrl, false); -- mhi_unprepare_after_power_down(mhi_cntrl); -- } -- -- /* cause internal device reset */ -- mhi_soc_reset(mhi_cntrl); -- -- /* Be sure device reset has been executed */ -- msleep(MHI_POST_RESET_DELAY_MS); --} -- --static void mhi_pci_reset_done(struct pci_dev *pdev) --{ -- struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -- struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -- int err; -- -- /* Restore initial known working PCI state */ -- pci_load_saved_state(pdev, mhi_pdev->pci_state); -- pci_restore_state(pdev); -- -- /* Is device status available ? */ -- if (!mhi_pci_is_alive(mhi_cntrl)) { -- dev_err(&pdev->dev, "reset failed\n"); -- return; -- } -- -- err = mhi_prepare_for_power_up(mhi_cntrl); -- if (err) { -- dev_err(&pdev->dev, "failed to prepare MHI controller\n"); -- return; -- } -- -- err = mhi_sync_power_up(mhi_cntrl); -- if (err) { -- dev_err(&pdev->dev, "failed to power up MHI controller\n"); -- mhi_unprepare_after_power_down(mhi_cntrl); -- return; -- } -- -- set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); -- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); --} -- --static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev, -- pci_channel_state_t state) --{ -- struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -- struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -- -- dev_err(&pdev->dev, "PCI error detected, state = %u\n", state); -- -- if (state == pci_channel_io_perm_failure) -- return PCI_ERS_RESULT_DISCONNECT; -- -- /* Clean up MHI state */ -- if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -- mhi_power_down(mhi_cntrl, false); -- mhi_unprepare_after_power_down(mhi_cntrl); -- } else { -- /* Nothing to do */ -- return PCI_ERS_RESULT_RECOVERED; -- } -- -- pci_disable_device(pdev); -- -- return PCI_ERS_RESULT_NEED_RESET; --} -- --static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev) --{ -- if (pci_enable_device(pdev)) { -- dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); -- return PCI_ERS_RESULT_DISCONNECT; -- } -- -- return PCI_ERS_RESULT_RECOVERED; --} -- --static void mhi_pci_io_resume(struct pci_dev *pdev) --{ -- struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); -- -- dev_err(&pdev->dev, "PCI slot reset done\n"); -- -- queue_work(system_long_wq, &mhi_pdev->recovery_work); --} -- --static const struct pci_error_handlers mhi_pci_err_handler = { -- .error_detected = mhi_pci_error_detected, -- .slot_reset = mhi_pci_slot_reset, -- .resume = mhi_pci_io_resume, -- .reset_prepare = mhi_pci_reset_prepare, -- .reset_done = mhi_pci_reset_done, --}; -- --static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev) --{ -- struct pci_dev *pdev = to_pci_dev(dev); -- struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); -- struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -- int err; -- -- if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) -- return 0; -- -- del_timer(&mhi_pdev->health_check_timer); -- cancel_work_sync(&mhi_pdev->recovery_work); -- -- if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || -- mhi_cntrl->ee != MHI_EE_AMSS) -- goto pci_suspend; /* Nothing to do at MHI level */ -- -- /* Transition to M3 state */ -- err = mhi_pm_suspend(mhi_cntrl); -- if (err) { -- dev_err(&pdev->dev, "failed to suspend device: %d\n", err); -- clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status); -- return -EBUSY; -- } -- --pci_suspend: -- pci_disable_device(pdev); -- pci_wake_from_d3(pdev, true); -- -- return 0; --} -- --static int __maybe_unused mhi_pci_runtime_resume(struct device *dev) --{ -- struct pci_dev *pdev = to_pci_dev(dev); -- struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); -- struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -- int err; -- -- if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) -- return 0; -- -- err = pci_enable_device(pdev); -- if (err) -- goto err_recovery; -- -- pci_set_master(pdev); -- pci_wake_from_d3(pdev, false); -- -- if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || -- mhi_cntrl->ee != MHI_EE_AMSS) -- return 0; /* Nothing to do at MHI level */ -- -- /* Exit M3, transition to M0 state */ -- err = mhi_pm_resume(mhi_cntrl); -- if (err) { -- dev_err(&pdev->dev, "failed to resume device: %d\n", err); -- goto err_recovery; -- } -- -- /* Resume health check */ -- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); -- -- /* It can be a remote wakeup (no mhi runtime_get), update access time */ -- pm_runtime_mark_last_busy(dev); -- -- return 0; -- --err_recovery: -- /* Do not fail to not mess up our PCI device state, the device likely -- * lost power (d3cold) and we simply need to reset it from the recovery -- * procedure, trigger the recovery asynchronously to prevent system -- * suspend exit delaying. -- */ -- queue_work(system_long_wq, &mhi_pdev->recovery_work); -- pm_runtime_mark_last_busy(dev); -- -- return 0; --} -- --static int __maybe_unused mhi_pci_suspend(struct device *dev) --{ -- pm_runtime_disable(dev); -- return mhi_pci_runtime_suspend(dev); --} -- --static int __maybe_unused mhi_pci_resume(struct device *dev) --{ -- int ret; -- -- /* Depending the platform, device may have lost power (d3cold), we need -- * to resume it now to check its state and recover when necessary. -- */ -- ret = mhi_pci_runtime_resume(dev); -- pm_runtime_enable(dev); -- -- return ret; --} -- --static int __maybe_unused mhi_pci_freeze(struct device *dev) --{ -- struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); -- struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; -- -- /* We want to stop all operations, hibernation does not guarantee that -- * device will be in the same state as before freezing, especially if -- * the intermediate restore kernel reinitializes MHI device with new -- * context. -- */ -- if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { -- mhi_power_down(mhi_cntrl, false); -- mhi_unprepare_after_power_down(mhi_cntrl); -- } -- -- return 0; --} -- --static int __maybe_unused mhi_pci_restore(struct device *dev) --{ -- struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev); -- -- /* Reinitialize the device */ -- queue_work(system_long_wq, &mhi_pdev->recovery_work); -- -- return 0; --} -- --static const struct dev_pm_ops mhi_pci_pm_ops = { -- SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL) --#ifdef CONFIG_PM_SLEEP -- .suspend = mhi_pci_suspend, -- .resume = mhi_pci_resume, -- .freeze = mhi_pci_freeze, -- .thaw = mhi_pci_restore, -- .restore = mhi_pci_restore, --#endif --}; -- --static struct pci_driver mhi_pci_driver = { -- .name = "mhi-pci-generic", -- .id_table = mhi_pci_id_table, -- .probe = mhi_pci_probe, -- .remove = mhi_pci_remove, -- .shutdown = mhi_pci_shutdown, -- .err_handler = &mhi_pci_err_handler, -- .driver.pm = &mhi_pci_pm_ops --}; --module_pci_driver(mhi_pci_driver); -- --MODULE_AUTHOR("Loic Poulain "); --MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver"); --MODULE_LICENSE("GPL"); -diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c -index 626dedd110cbc..fca0d0669aa97 100644 ---- a/drivers/bus/mips_cdmm.c -+++ b/drivers/bus/mips_cdmm.c -@@ -351,6 +351,7 @@ phys_addr_t __weak mips_cdmm_phys_base(void) - np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm"); - if (np) { - err = of_address_to_resource(np, 0, &res); -+ of_node_put(np); - if (!err) - return res.start; - } -diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c -index 6f225dddc74f4..fac8627b04e34 100644 ---- a/drivers/bus/sunxi-rsb.c -+++ b/drivers/bus/sunxi-rsb.c -@@ -227,6 +227,8 @@ static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb, - - dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev)); - -+ return rdev; -+ - err_device_add: - put_device(&rdev->dev); - -@@ -269,6 +271,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register); - /* common code that starts a transfer */ - static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) - { -+ u32 int_mask, status; -+ bool timeout; -+ - if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) { - dev_dbg(rsb->dev, "RSB transfer still in progress\n"); - return -EBUSY; -@@ -276,13 +281,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) - - reinit_completion(&rsb->complete); - -- writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER, -- rsb->regs + RSB_INTE); -+ int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER; -+ writel(int_mask, rsb->regs + RSB_INTE); - writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB, - rsb->regs + RSB_CTRL); - -- if (!wait_for_completion_io_timeout(&rsb->complete, -- msecs_to_jiffies(100))) { -+ if (irqs_disabled()) { -+ timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS, -+ status, (status & int_mask), -+ 10, 100000); -+ writel(status, rsb->regs + RSB_INTS); -+ } else { -+ timeout = !wait_for_completion_io_timeout(&rsb->complete, -+ msecs_to_jiffies(100)); -+ status = rsb->status; -+ } -+ -+ if (timeout) { - dev_dbg(rsb->dev, "RSB timeout\n"); - - /* abort the transfer */ -@@ -294,18 +309,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) - return -ETIMEDOUT; - } - -- if (rsb->status & RSB_INTS_LOAD_BSY) { -+ if (status & RSB_INTS_LOAD_BSY) { - dev_dbg(rsb->dev, "RSB busy\n"); - return -EBUSY; - } - -- if (rsb->status & RSB_INTS_TRANS_ERR) { -- if (rsb->status & RSB_INTS_TRANS_ERR_ACK) { -+ if (status & RSB_INTS_TRANS_ERR) { -+ if (status & RSB_INTS_TRANS_ERR_ACK) { - dev_dbg(rsb->dev, "RSB slave nack\n"); - return -EINVAL; - } - -- if (rsb->status & RSB_INTS_TRANS_ERR_DATA) { -+ if (status & RSB_INTS_TRANS_ERR_DATA) { - dev_dbg(rsb->dev, "RSB transfer data error\n"); - return -EIO; - } -@@ -687,11 +702,11 @@ err_clk_disable: - - static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb) - { -- /* Keep the clock and PM reference counts consistent. */ -- if (pm_runtime_status_suspended(rsb->dev)) -- pm_runtime_resume(rsb->dev); - reset_control_assert(rsb->rstc); -- clk_disable_unprepare(rsb->clk); -+ -+ /* Keep the clock and PM reference counts consistent. */ -+ if (!pm_runtime_status_suspended(rsb->dev)) -+ clk_disable_unprepare(rsb->clk); - } - - static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev) -@@ -814,14 +829,6 @@ static int sunxi_rsb_remove(struct platform_device *pdev) - return 0; - } - --static void sunxi_rsb_shutdown(struct platform_device *pdev) --{ -- struct sunxi_rsb *rsb = platform_get_drvdata(pdev); -- -- pm_runtime_disable(&pdev->dev); -- sunxi_rsb_hw_exit(rsb); --} -- - static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = { - SET_RUNTIME_PM_OPS(sunxi_rsb_runtime_suspend, - sunxi_rsb_runtime_resume, NULL) -@@ -837,7 +844,6 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table); - static struct platform_driver sunxi_rsb_driver = { - .probe = sunxi_rsb_probe, - .remove = sunxi_rsb_remove, -- .shutdown = sunxi_rsb_shutdown, - .driver = { - .name = RSB_CTRL_NAME, - .of_match_table = sunxi_rsb_of_match_table, -@@ -855,7 +861,13 @@ static int __init sunxi_rsb_init(void) - return ret; - } - -- return platform_driver_register(&sunxi_rsb_driver); -+ ret = platform_driver_register(&sunxi_rsb_driver); -+ if (ret) { -+ bus_unregister(&sunxi_rsb_bus); -+ return ret; -+ } -+ -+ return 0; - } - module_init(sunxi_rsb_init); - -diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c -index 6a8b7fb5be58d..436c0f3563d79 100644 ---- a/drivers/bus/ti-sysc.c -+++ b/drivers/bus/ti-sysc.c -@@ -6,6 +6,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -17,6 +18,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -51,11 +53,18 @@ struct sysc_address { - struct list_head node; - }; - -+struct sysc_module { -+ struct sysc *ddata; -+ struct list_head node; -+}; -+ - struct sysc_soc_info { - unsigned long general_purpose:1; - enum sysc_soc soc; -- struct mutex list_lock; /* disabled modules list lock */ -+ struct mutex list_lock; /* disabled and restored modules list lock */ - struct list_head disabled_modules; -+ struct list_head restored_modules; -+ struct notifier_block nb; - }; - - enum sysc_clocks { -@@ -223,37 +232,77 @@ static u32 sysc_read_sysstatus(struct sysc *ddata) - return sysc_read(ddata, offset); - } - --/* Poll on reset status */ --static int sysc_wait_softreset(struct sysc *ddata) -+static int sysc_poll_reset_sysstatus(struct sysc *ddata) - { -- u32 sysc_mask, syss_done, rstval; -- int syss_offset, error = 0; -- -- if (ddata->cap->regbits->srst_shift < 0) -- return 0; -- -- syss_offset = ddata->offsets[SYSC_SYSSTATUS]; -- sysc_mask = BIT(ddata->cap->regbits->srst_shift); -+ int error, retries; -+ u32 syss_done, rstval; - - if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) - syss_done = 0; - else - syss_done = ddata->cfg.syss_mask; - -- if (syss_offset >= 0) { -+ if (likely(!timekeeping_suspended)) { - error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata, - rstval, (rstval & ddata->cfg.syss_mask) == - syss_done, 100, MAX_MODULE_SOFTRESET_WAIT); -+ } else { -+ retries = MAX_MODULE_SOFTRESET_WAIT; -+ while (retries--) { -+ rstval = sysc_read_sysstatus(ddata); -+ if ((rstval & ddata->cfg.syss_mask) == syss_done) -+ return 0; -+ udelay(2); /* Account for udelay flakeyness */ -+ } -+ error = -ETIMEDOUT; -+ } -+ -+ return error; -+} -+ -+static int sysc_poll_reset_sysconfig(struct sysc *ddata) -+{ -+ int error, retries; -+ u32 sysc_mask, rstval; -+ -+ sysc_mask = BIT(ddata->cap->regbits->srst_shift); - -- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { -+ if (likely(!timekeeping_suspended)) { - error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata, - rstval, !(rstval & sysc_mask), - 100, MAX_MODULE_SOFTRESET_WAIT); -+ } else { -+ retries = MAX_MODULE_SOFTRESET_WAIT; -+ while (retries--) { -+ rstval = sysc_read_sysconfig(ddata); -+ if (!(rstval & sysc_mask)) -+ return 0; -+ udelay(2); /* Account for udelay flakeyness */ -+ } -+ error = -ETIMEDOUT; - } - - return error; - } - -+/* Poll on reset status */ -+static int sysc_wait_softreset(struct sysc *ddata) -+{ -+ int syss_offset, error = 0; -+ -+ if (ddata->cap->regbits->srst_shift < 0) -+ return 0; -+ -+ syss_offset = ddata->offsets[SYSC_SYSSTATUS]; -+ -+ if (syss_offset >= 0) -+ error = sysc_poll_reset_sysstatus(ddata); -+ else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) -+ error = sysc_poll_reset_sysconfig(ddata); -+ -+ return error; -+} -+ - static int sysc_add_named_clock_from_child(struct sysc *ddata, - const char *name, - const char *optfck_name) -@@ -1518,7 +1567,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { - 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), - SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY | -- SYSC_QUIRK_REINIT_ON_RESUME), -+ SYSC_QUIRK_REINIT_ON_CTX_LOST), - SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, - SYSC_MODULE_QUIRK_WDT), - /* PRUSS on am3, am4 and am5 */ -@@ -1710,7 +1759,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, - if (!ddata->module_va) - return -EIO; - -- /* DISP_CONTROL */ -+ /* DISP_CONTROL, shut down lcd and digit on disable if enabled */ - val = sysc_read(ddata, dispc_offset + 0x40); - lcd_en = val & lcd_en_mask; - digit_en = val & digit_en_mask; -@@ -1722,7 +1771,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, - else - irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */ - } -- if (disable & (lcd_en | digit_en)) -+ if (disable && (lcd_en || digit_en)) - sysc_write(ddata, dispc_offset + 0x40, - val & ~(lcd_en_mask | digit_en_mask)); - -@@ -2040,6 +2089,8 @@ static int sysc_reset(struct sysc *ddata) - sysc_val = sysc_read_sysconfig(ddata); - sysc_val |= sysc_mask; - sysc_write(ddata, sysc_offset, sysc_val); -+ /* Flush posted write */ -+ sysc_val = sysc_read_sysconfig(ddata); - } - - if (ddata->cfg.srst_udelay) -@@ -2401,6 +2452,78 @@ static struct dev_pm_domain sysc_child_pm_domain = { - } - }; - -+/* Caller needs to take list_lock if ever used outside of cpu_pm */ -+static void sysc_reinit_modules(struct sysc_soc_info *soc) -+{ -+ struct sysc_module *module; -+ struct list_head *pos; -+ struct sysc *ddata; -+ -+ list_for_each(pos, &sysc_soc->restored_modules) { -+ module = list_entry(pos, struct sysc_module, node); -+ ddata = module->ddata; -+ sysc_reinit_module(ddata, ddata->enabled); -+ } -+} -+ -+/** -+ * sysc_context_notifier - optionally reset and restore module after idle -+ * @nb: notifier block -+ * @cmd: unused -+ * @v: unused -+ * -+ * Some interconnect target modules need to be restored, or reset and restored -+ * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x -+ * OTG and GPMC target modules even if the modules are unused. -+ */ -+static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd, -+ void *v) -+{ -+ struct sysc_soc_info *soc; -+ -+ soc = container_of(nb, struct sysc_soc_info, nb); -+ -+ switch (cmd) { -+ case CPU_CLUSTER_PM_ENTER: -+ break; -+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */ -+ break; -+ case CPU_CLUSTER_PM_EXIT: -+ sysc_reinit_modules(soc); -+ break; -+ } -+ -+ return NOTIFY_OK; -+} -+ -+/** -+ * sysc_add_restored - optionally add reset and restore quirk hanlling -+ * @ddata: device data -+ */ -+static void sysc_add_restored(struct sysc *ddata) -+{ -+ struct sysc_module *restored_module; -+ -+ restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL); -+ if (!restored_module) -+ return; -+ -+ restored_module->ddata = ddata; -+ -+ mutex_lock(&sysc_soc->list_lock); -+ -+ list_add(&restored_module->node, &sysc_soc->restored_modules); -+ -+ if (sysc_soc->nb.notifier_call) -+ goto out_unlock; -+ -+ sysc_soc->nb.notifier_call = sysc_context_notifier; -+ cpu_pm_register_notifier(&sysc_soc->nb); -+ -+out_unlock: -+ mutex_unlock(&sysc_soc->list_lock); -+} -+ - /** - * sysc_legacy_idle_quirk - handle children in omap_device compatible way - * @ddata: device driver data -@@ -2900,12 +3023,14 @@ static int sysc_add_disabled(unsigned long base) - } - - /* -- * One time init to detect the booted SoC and disable unavailable features. -+ * One time init to detect the booted SoC, disable unavailable features -+ * and initialize list for optional cpu_pm notifier. -+ * - * Note that we initialize static data shared across all ti-sysc instances - * so ddata is only used for SoC type. This can be called from module_init - * once we no longer need to rely on platform data. - */ --static int sysc_init_soc(struct sysc *ddata) -+static int sysc_init_static_data(struct sysc *ddata) - { - const struct soc_device_attribute *match; - struct ti_sysc_platform_data *pdata; -@@ -2921,6 +3046,7 @@ static int sysc_init_soc(struct sysc *ddata) - - mutex_init(&sysc_soc->list_lock); - INIT_LIST_HEAD(&sysc_soc->disabled_modules); -+ INIT_LIST_HEAD(&sysc_soc->restored_modules); - sysc_soc->general_purpose = true; - - pdata = dev_get_platdata(ddata->dev); -@@ -2929,7 +3055,7 @@ static int sysc_init_soc(struct sysc *ddata) - - match = soc_device_match(sysc_soc_match); - if (match && match->data) -- sysc_soc->soc = (int)match->data; -+ sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data; - - /* - * Check and warn about possible old incomplete dtb. We now want to see -@@ -2985,15 +3111,24 @@ static int sysc_init_soc(struct sysc *ddata) - return 0; - } - --static void sysc_cleanup_soc(void) -+static void sysc_cleanup_static_data(void) - { -+ struct sysc_module *restored_module; - struct sysc_address *disabled_module; - struct list_head *pos, *tmp; - - if (!sysc_soc) - return; - -+ if (sysc_soc->nb.notifier_call) -+ cpu_pm_unregister_notifier(&sysc_soc->nb); -+ - mutex_lock(&sysc_soc->list_lock); -+ list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) { -+ restored_module = list_entry(pos, struct sysc_module, node); -+ list_del(pos); -+ kfree(restored_module); -+ } - list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) { - disabled_module = list_entry(pos, struct sysc_address, node); - list_del(pos); -@@ -3029,13 +3164,27 @@ static int sysc_check_disabled_devices(struct sysc *ddata) - */ - static int sysc_check_active_timer(struct sysc *ddata) - { -+ int error; -+ - if (ddata->cap->type != TI_SYSC_OMAP2_TIMER && - ddata->cap->type != TI_SYSC_OMAP4_TIMER) - return 0; - -+ /* -+ * Quirk for omap3 beagleboard revision A to B4 to use gpt12. -+ * Revision C and later are fixed with commit 23885389dbbb ("ARM: -+ * dts: Fix timer regression for beagleboard revision c"). This all -+ * can be dropped if we stop supporting old beagleboard revisions -+ * A to B4 at some point. -+ */ -+ if (sysc_soc->soc == SOC_3430) -+ error = -ENXIO; -+ else -+ error = -EBUSY; -+ - if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && - (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) -- return -ENXIO; -+ return error; - - return 0; - } -@@ -3061,7 +3210,7 @@ static int sysc_probe(struct platform_device *pdev) - ddata->dev = &pdev->dev; - platform_set_drvdata(pdev, ddata); - -- error = sysc_init_soc(ddata); -+ error = sysc_init_static_data(ddata); - if (error) - return error; - -@@ -3159,6 +3308,9 @@ static int sysc_probe(struct platform_device *pdev) - pm_runtime_put(&pdev->dev); - } - -+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST) -+ sysc_add_restored(ddata); -+ - return 0; - - err: -@@ -3175,7 +3327,9 @@ static int sysc_remove(struct platform_device *pdev) - struct sysc *ddata = platform_get_drvdata(pdev); - int error; - -- cancel_delayed_work_sync(&ddata->idle_work); -+ /* Device can still be enabled, see deferred idle quirk in probe */ -+ if (cancel_delayed_work_sync(&ddata->idle_work)) -+ ti_sysc_idle(&ddata->idle_work.work); - - error = pm_runtime_resume_and_get(ddata->dev); - if (error < 0) { -@@ -3240,7 +3394,7 @@ static void __exit sysc_exit(void) - { - bus_unregister_notifier(&platform_bus_type, &sysc_nb); - platform_driver_unregister(&sysc_driver); -- sysc_cleanup_soc(); -+ sysc_cleanup_static_data(); - } - module_exit(sysc_exit); - -diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig -index 740811893c570..d454428f4981d 100644 ---- a/drivers/char/Kconfig -+++ b/drivers/char/Kconfig -@@ -428,27 +428,40 @@ config ADI - driver include crash and makedumpfile. - - config RANDOM_TRUST_CPU -- bool "Trust the CPU manufacturer to initialize Linux's CRNG" -+ bool "Initialize RNG using CPU RNG instructions" -+ default y - depends on ARCH_RANDOM -- default n - help -- Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or -- RDRAND, IBM for the S390 and Power PC architectures) is trustworthy -- for the purposes of initializing Linux's CRNG. Since this is not -- something that can be independently audited, this amounts to trusting -- that CPU manufacturer (perhaps with the insistence or mandate -- of a Nation State's intelligence or law enforcement agencies) -- has not installed a hidden back door to compromise the CPU's -- random number generation facilities. This can also be configured -- at boot with "random.trust_cpu=on/off". -+ Initialize the RNG using random numbers supplied by the CPU's -+ RNG instructions (e.g. RDRAND), if supported and available. These -+ random numbers are never used directly, but are rather hashed into -+ the main input pool, and this happens regardless of whether or not -+ this option is enabled. Instead, this option controls whether the -+ they are credited and hence can initialize the RNG. Additionally, -+ other sources of randomness are always used, regardless of this -+ setting. Enabling this implies trusting that the CPU can supply high -+ quality and non-backdoored random numbers. -+ -+ Say Y here unless you have reason to mistrust your CPU or believe -+ its RNG facilities may be faulty. This may also be configured at -+ boot time with "random.trust_cpu=on/off". - - config RANDOM_TRUST_BOOTLOADER -- bool "Trust the bootloader to initialize Linux's CRNG" -+ bool "Initialize RNG using bootloader-supplied seed" -+ default y - help -- Some bootloaders can provide entropy to increase the kernel's initial -- device randomness. Say Y here to assume the entropy provided by the -- booloader is trustworthy so it will be added to the kernel's entropy -- pool. Otherwise, say N here so it will be regarded as device input that -- only mixes the entropy pool. -+ Initialize the RNG using a seed supplied by the bootloader or boot -+ environment (e.g. EFI or a bootloader-generated device tree). This -+ seed is not used directly, but is rather hashed into the main input -+ pool, and this happens regardless of whether or not this option is -+ enabled. Instead, this option controls whether the seed is credited -+ and hence can initialize the RNG. Additionally, other sources of -+ randomness are always used, regardless of this setting. Enabling -+ this implies trusting that the bootloader can supply high quality and -+ non-backdoored seeds. -+ -+ Say Y here unless you have reason to mistrust your bootloader or -+ believe its RNG facilities may be faulty. This may also be configured -+ at boot time with "random.trust_bootloader=on/off". - - endmenu -diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c -index ed3c4c42fc23b..514f9f287a781 100644 ---- a/drivers/char/agp/parisc-agp.c -+++ b/drivers/char/agp/parisc-agp.c -@@ -90,6 +90,9 @@ parisc_agp_tlbflush(struct agp_memory *mem) - { - struct _parisc_agp_info *info = &parisc_agp_info; - -+ /* force fdc ops to be visible to IOMMU */ -+ asm_io_sync(); -+ - writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM); - readq(info->ioc_regs+IOC_PCOM); /* flush */ - } -@@ -158,6 +161,7 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) - info->gatt[j] = - parisc_agp_mask_memory(agp_bridge, - paddr, type); -+ asm_io_fdc(&info->gatt[j]); - } - } - -@@ -191,7 +195,16 @@ static unsigned long - parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr, - int type) - { -- return SBA_PDIR_VALID_BIT | addr; -+ unsigned ci; /* coherent index */ -+ dma_addr_t pa; -+ -+ pa = addr & IOVP_MASK; -+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa))); -+ -+ pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */ -+ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ -+ -+ return cpu_to_le64(pa); - } - - static void -@@ -281,7 +294,7 @@ agp_ioc_init(void __iomem *ioc_regs) - return 0; - } - --static int -+static int __init - lba_find_capability(int cap) - { - struct _parisc_agp_info *info = &parisc_agp_info; -@@ -366,7 +379,7 @@ fail: - return error; - } - --static int -+static int __init - find_quicksilver(struct device *dev, void *data) - { - struct parisc_device **lba = data; -@@ -378,7 +391,7 @@ find_quicksilver(struct device *dev, void *data) - return 0; - } - --static int -+static int __init - parisc_agp_init(void) - { - extern struct sba_device *sba_list; -diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c -index deb85a334c937..260573c283209 100644 ---- a/drivers/char/applicom.c -+++ b/drivers/char/applicom.c -@@ -197,8 +197,10 @@ static int __init applicom_init(void) - if (!pci_match_id(applicom_pci_tbl, dev)) - continue; - -- if (pci_enable_device(dev)) -+ if (pci_enable_device(dev)) { -+ pci_dev_put(dev); - return -EIO; -+ } - - RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO); - -@@ -207,6 +209,7 @@ static int __init applicom_init(void) - "space at 0x%llx\n", - (unsigned long long)pci_resource_start(dev, 0)); - pci_disable_device(dev); -+ pci_dev_put(dev); - return -EIO; - } - -diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig -index 239eca4d68055..650c7d9180802 100644 ---- a/drivers/char/hw_random/Kconfig -+++ b/drivers/char/hw_random/Kconfig -@@ -414,7 +414,7 @@ config HW_RANDOM_MESON - - config HW_RANDOM_CAVIUM - tristate "Cavium ThunderX Random Number Generator support" -- depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT)) -+ depends on HW_RANDOM && PCI && ARCH_THUNDER - default HW_RANDOM - help - This driver provides kernel-side support for the Random Number -diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c -index c22d4184bb612..0555e3838bce1 100644 ---- a/drivers/char/hw_random/amd-rng.c -+++ b/drivers/char/hw_random/amd-rng.c -@@ -143,15 +143,19 @@ static int __init amd_rng_mod_init(void) - found: - err = pci_read_config_dword(pdev, 0x58, &pmbase); - if (err) -- return err; -+ goto put_dev; - - pmbase &= 0x0000FF00; -- if (pmbase == 0) -- return -EIO; -+ if (pmbase == 0) { -+ err = -EIO; -+ goto put_dev; -+ } - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); -- if (!priv) -- return -ENOMEM; -+ if (!priv) { -+ err = -ENOMEM; -+ goto put_dev; -+ } - - if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { - dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", -@@ -185,6 +189,8 @@ err_iomap: - release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); - out: - kfree(priv); -+put_dev: -+ pci_dev_put(pdev); - return err; - } - -@@ -200,6 +206,8 @@ static void __exit amd_rng_mod_exit(void) - - release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); - -+ pci_dev_put(priv->pcidev); -+ - kfree(priv); - } - -diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c -index b24ac39a903b3..e34c3ea692b6c 100644 ---- a/drivers/char/hw_random/arm_smccc_trng.c -+++ b/drivers/char/hw_random/arm_smccc_trng.c -@@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) - MAX_BITS_PER_CALL); - - arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res); -- if ((int)res.a0 < 0) -- return (int)res.a0; - - switch ((int)res.a0) { - case SMCCC_RET_SUCCESS: -@@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) - return copied; - cond_resched(); - break; -+ default: -+ return -EIO; - } - } - -diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c -index ecb71c4317a50..8cf0ef501341e 100644 ---- a/drivers/char/hw_random/atmel-rng.c -+++ b/drivers/char/hw_random/atmel-rng.c -@@ -114,6 +114,7 @@ static int atmel_trng_probe(struct platform_device *pdev) - - err_register: - clk_disable_unprepare(trng->clk); -+ atmel_trng_disable(trng); - return ret; - } - -diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c -index 3de4a6a443ef9..6f66919652bf5 100644 ---- a/drivers/char/hw_random/cavium-rng-vf.c -+++ b/drivers/char/hw_random/cavium-rng-vf.c -@@ -1,10 +1,7 @@ -+// SPDX-License-Identifier: GPL-2.0 - /* -- * Hardware Random Number Generator support for Cavium, Inc. -- * Thunder processor family. -- * -- * This file is subject to the terms and conditions of the GNU General Public -- * License. See the file "COPYING" in the main directory of this archive -- * for more details. -+ * Hardware Random Number Generator support. -+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. - * - * Copyright (C) 2016 Cavium, Inc. - */ -@@ -15,16 +12,146 @@ - #include - #include - -+#include -+ -+/* PCI device IDs */ -+#define PCI_DEVID_CAVIUM_RNG_PF 0xA018 -+#define PCI_DEVID_CAVIUM_RNG_VF 0xA033 -+ -+#define HEALTH_STATUS_REG 0x38 -+ -+/* RST device info */ -+#define PCI_DEVICE_ID_RST_OTX2 0xA085 -+#define RST_BOOT_REG 0x1600ULL -+#define CLOCK_BASE_RATE 50000000ULL -+#define MSEC_TO_NSEC(x) (x * 1000000) -+ - struct cavium_rng { - struct hwrng ops; - void __iomem *result; -+ void __iomem *pf_regbase; -+ struct pci_dev *pdev; -+ u64 clock_rate; -+ u64 prev_error; -+ u64 prev_time; - }; - -+static inline bool is_octeontx(struct pci_dev *pdev) -+{ -+ if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX, -+ MIDR_CPU_VAR_REV(0, 0), -+ MIDR_CPU_VAR_REV(3, 0)) || -+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX, -+ MIDR_CPU_VAR_REV(0, 0), -+ MIDR_CPU_VAR_REV(3, 0)) || -+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX, -+ MIDR_CPU_VAR_REV(0, 0), -+ MIDR_CPU_VAR_REV(3, 0))) -+ return true; -+ -+ return false; -+} -+ -+static u64 rng_get_coprocessor_clkrate(void) -+{ -+ u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */ -+ struct pci_dev *pdev; -+ void __iomem *base; -+ -+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, -+ PCI_DEVICE_ID_RST_OTX2, NULL); -+ if (!pdev) -+ goto error; -+ -+ base = pci_ioremap_bar(pdev, 0); -+ if (!base) -+ goto error_put_pdev; -+ -+ /* RST: PNR_MUL * 50Mhz gives clockrate */ -+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F); -+ -+ iounmap(base); -+ -+error_put_pdev: -+ pci_dev_put(pdev); -+ -+error: -+ return ret; -+} -+ -+static int check_rng_health(struct cavium_rng *rng) -+{ -+ u64 cur_err, cur_time; -+ u64 status, cycles; -+ u64 time_elapsed; -+ -+ -+ /* Skip checking health for OcteonTx */ -+ if (!rng->pf_regbase) -+ return 0; -+ -+ status = readq(rng->pf_regbase + HEALTH_STATUS_REG); -+ if (status & BIT_ULL(0)) { -+ dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n"); -+ return -EIO; -+ } -+ -+ cycles = status >> 1; -+ if (!cycles) -+ return 0; -+ -+ cur_time = arch_timer_read_counter(); -+ -+ /* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE] -+ * Number of coprocessor cycles times 2 since the last failure. -+ * This field doesn't get cleared/updated until another failure. -+ */ -+ cycles = cycles / 2; -+ cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */ -+ -+ /* Ignore errors that happenned a long time ago, these -+ * are most likely false positive errors. -+ */ -+ if (cur_err > MSEC_TO_NSEC(10)) { -+ rng->prev_error = 0; -+ rng->prev_time = 0; -+ return 0; -+ } -+ -+ if (rng->prev_error) { -+ /* Calculate time elapsed since last error -+ * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz. -+ */ -+ time_elapsed = (cur_time - rng->prev_time) * 10; -+ time_elapsed += rng->prev_error; -+ -+ /* Check if current error is a new one or the old one itself. -+ * If error is a new one then consider there is a persistent -+ * issue with entropy, declare hardware failure. -+ */ -+ if (cur_err < time_elapsed) { -+ dev_err(&rng->pdev->dev, "HWRNG failure detected\n"); -+ rng->prev_error = cur_err; -+ rng->prev_time = cur_time; -+ return -EIO; -+ } -+ } -+ -+ rng->prev_error = cur_err; -+ rng->prev_time = cur_time; -+ return 0; -+} -+ - /* Read data from the RNG unit */ - static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) - { - struct cavium_rng *p = container_of(rng, struct cavium_rng, ops); - unsigned int size = max; -+ int err = 0; -+ -+ err = check_rng_health(p); -+ if (err) -+ return err; - - while (size >= 8) { - *((u64 *)dat) = readq(p->result); -@@ -39,6 +166,39 @@ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) - return max; - } - -+static int cavium_map_pf_regs(struct cavium_rng *rng) -+{ -+ struct pci_dev *pdev; -+ -+ /* Health status is not supported on 83xx, skip mapping PF CSRs */ -+ if (is_octeontx(rng->pdev)) { -+ rng->pf_regbase = NULL; -+ return 0; -+ } -+ -+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, -+ PCI_DEVID_CAVIUM_RNG_PF, NULL); -+ if (!pdev) { -+ dev_err(&pdev->dev, "Cannot find RNG PF device\n"); -+ return -EIO; -+ } -+ -+ rng->pf_regbase = ioremap(pci_resource_start(pdev, 0), -+ pci_resource_len(pdev, 0)); -+ if (!rng->pf_regbase) { -+ dev_err(&pdev->dev, "Failed to map PF CSR region\n"); -+ pci_dev_put(pdev); -+ return -ENOMEM; -+ } -+ -+ pci_dev_put(pdev); -+ -+ /* Get co-processor clock rate */ -+ rng->clock_rate = rng_get_coprocessor_clkrate(); -+ -+ return 0; -+} -+ - /* Map Cavium RNG to an HWRNG object */ - static int cavium_rng_probe_vf(struct pci_dev *pdev, - const struct pci_device_id *id) -@@ -50,6 +210,8 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, - if (!rng) - return -ENOMEM; - -+ rng->pdev = pdev; -+ - /* Map the RNG result */ - rng->result = pcim_iomap(pdev, 0, 0); - if (!rng->result) { -@@ -67,6 +229,11 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, - - pci_set_drvdata(pdev, rng); - -+ /* Health status is available only at PF, hence map PF registers. */ -+ ret = cavium_map_pf_regs(rng); -+ if (ret) -+ return ret; -+ - ret = devm_hwrng_register(&pdev->dev, &rng->ops); - if (ret) { - dev_err(&pdev->dev, "Error registering device as HWRNG.\n"); -@@ -76,10 +243,18 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, - return 0; - } - -+/* Remove the VF */ -+static void cavium_rng_remove_vf(struct pci_dev *pdev) -+{ -+ struct cavium_rng *rng; -+ -+ rng = pci_get_drvdata(pdev); -+ iounmap(rng->pf_regbase); -+} - - static const struct pci_device_id cavium_rng_vf_id_table[] = { -- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0}, -- {0,}, -+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) }, -+ { 0, } - }; - MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table); - -@@ -87,8 +262,9 @@ static struct pci_driver cavium_rng_vf_driver = { - .name = "cavium_rng_vf", - .id_table = cavium_rng_vf_id_table, - .probe = cavium_rng_probe_vf, -+ .remove = cavium_rng_remove_vf, - }; - module_pci_driver(cavium_rng_vf_driver); - - MODULE_AUTHOR("Omer Khaliq "); --MODULE_LICENSE("GPL"); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c -index 63d6e68c24d2f..b96579222408b 100644 ---- a/drivers/char/hw_random/cavium-rng.c -+++ b/drivers/char/hw_random/cavium-rng.c -@@ -1,10 +1,7 @@ -+// SPDX-License-Identifier: GPL-2.0 - /* -- * Hardware Random Number Generator support for Cavium Inc. -- * Thunder processor family. -- * -- * This file is subject to the terms and conditions of the GNU General Public -- * License. See the file "COPYING" in the main directory of this archive -- * for more details. -+ * Hardware Random Number Generator support. -+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. - * - * Copyright (C) 2016 Cavium, Inc. - */ -@@ -91,4 +88,4 @@ static struct pci_driver cavium_rng_pf_driver = { - - module_pci_driver(cavium_rng_pf_driver); - MODULE_AUTHOR("Omer Khaliq "); --MODULE_LICENSE("GPL"); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c -index a3db27916256d..cfb085de876b7 100644 ---- a/drivers/char/hw_random/core.c -+++ b/drivers/char/hw_random/core.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - #include -diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c -index 138ce434f86b2..12fbe80918319 100644 ---- a/drivers/char/hw_random/geode-rng.c -+++ b/drivers/char/hw_random/geode-rng.c -@@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = { - }; - MODULE_DEVICE_TABLE(pci, pci_tbl); - -+struct amd_geode_priv { -+ struct pci_dev *pcidev; -+ void __iomem *membase; -+}; - - static int geode_rng_data_read(struct hwrng *rng, u32 *data) - { -@@ -90,6 +94,7 @@ static int __init geode_rng_init(void) - const struct pci_device_id *ent; - void __iomem *mem; - unsigned long rng_base; -+ struct amd_geode_priv *priv; - - for_each_pci_dev(pdev) { - ent = pci_match_id(pci_tbl, pdev); -@@ -97,17 +102,26 @@ static int __init geode_rng_init(void) - goto found; - } - /* Device not found. */ -- goto out; -+ return err; - - found: -+ priv = kzalloc(sizeof(*priv), GFP_KERNEL); -+ if (!priv) { -+ err = -ENOMEM; -+ goto put_dev; -+ } -+ - rng_base = pci_resource_start(pdev, 0); - if (rng_base == 0) -- goto out; -+ goto free_priv; - err = -ENOMEM; - mem = ioremap(rng_base, 0x58); - if (!mem) -- goto out; -- geode_rng.priv = (unsigned long)mem; -+ goto free_priv; -+ -+ geode_rng.priv = (unsigned long)priv; -+ priv->membase = mem; -+ priv->pcidev = pdev; - - pr_info("AMD Geode RNG detected\n"); - err = hwrng_register(&geode_rng); -@@ -116,20 +130,26 @@ found: - err); - goto err_unmap; - } --out: - return err; - - err_unmap: - iounmap(mem); -- goto out; -+free_priv: -+ kfree(priv); -+put_dev: -+ pci_dev_put(pdev); -+ return err; - } - - static void __exit geode_rng_exit(void) - { -- void __iomem *mem = (void __iomem *)geode_rng.priv; -+ struct amd_geode_priv *priv; - -+ priv = (struct amd_geode_priv *)geode_rng.priv; - hwrng_unregister(&geode_rng); -- iounmap(mem); -+ iounmap(priv->membase); -+ pci_dev_put(priv->pcidev); -+ kfree(priv); - } - - module_init(geode_rng_init); -diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c -index b05d676ca814c..c4ae72b6743ed 100644 ---- a/drivers/char/hw_random/imx-rngc.c -+++ b/drivers/char/hw_random/imx-rngc.c -@@ -110,7 +110,7 @@ static int imx_rngc_self_test(struct imx_rngc *rngc) - cmd = readl(rngc->base + RNGC_COMMAND); - writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND); - -- ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT); -+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); - imx_rngc_irq_mask_clear(rngc); - if (!ret) - return -ETIMEDOUT; -@@ -187,9 +187,7 @@ static int imx_rngc_init(struct hwrng *rng) - cmd = readl(rngc->base + RNGC_COMMAND); - writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND); - -- ret = wait_for_completion_timeout(&rngc->rng_op_done, -- RNGC_TIMEOUT); -- -+ ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT)); - if (!ret) { - ret = -ETIMEDOUT; - goto err; -@@ -270,13 +268,6 @@ static int imx_rngc_probe(struct platform_device *pdev) - goto err; - } - -- ret = devm_request_irq(&pdev->dev, -- irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); -- if (ret) { -- dev_err(rngc->dev, "Can't get interrupt working.\n"); -- goto err; -- } -- - init_completion(&rngc->rng_op_done); - - rngc->rng.name = pdev->name; -@@ -290,6 +281,13 @@ static int imx_rngc_probe(struct platform_device *pdev) - - imx_rngc_irq_mask_clear(rngc); - -+ ret = devm_request_irq(&pdev->dev, -+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); -+ if (ret) { -+ dev_err(rngc->dev, "Can't get interrupt working.\n"); -+ return ret; -+ } -+ - if (self_test) { - ret = imx_rngc_self_test(rngc); - if (ret) { -diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c -index a43743887db19..9142a63b92b30 100644 ---- a/drivers/char/hw_random/iproc-rng200.c -+++ b/drivers/char/hw_random/iproc-rng200.c -@@ -189,6 +189,8 @@ static int iproc_rng200_probe(struct platform_device *pdev) - return PTR_ERR(priv->base); - } - -+ dev_set_drvdata(dev, priv); -+ - priv->rng.name = "iproc-rng200"; - priv->rng.read = iproc_rng200_read; - priv->rng.init = iproc_rng200_init; -@@ -206,6 +208,28 @@ static int iproc_rng200_probe(struct platform_device *pdev) - return 0; - } - -+static int __maybe_unused iproc_rng200_suspend(struct device *dev) -+{ -+ struct iproc_rng200_dev *priv = dev_get_drvdata(dev); -+ -+ iproc_rng200_cleanup(&priv->rng); -+ -+ return 0; -+} -+ -+static int __maybe_unused iproc_rng200_resume(struct device *dev) -+{ -+ struct iproc_rng200_dev *priv = dev_get_drvdata(dev); -+ -+ iproc_rng200_init(&priv->rng); -+ -+ return 0; -+} -+ -+static const struct dev_pm_ops iproc_rng200_pm_ops = { -+ SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume) -+}; -+ - static const struct of_device_id iproc_rng200_of_match[] = { - { .compatible = "brcm,bcm2711-rng200", }, - { .compatible = "brcm,bcm7211-rng200", }, -@@ -219,6 +243,7 @@ static struct platform_driver iproc_rng200_driver = { - .driver = { - .name = "iproc-rng200", - .of_match_table = iproc_rng200_of_match, -+ .pm = &iproc_rng200_pm_ops, - }, - .probe = iproc_rng200_probe, - }; -diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c -index 8ad7b515a51b8..6c00ea0085553 100644 ---- a/drivers/char/hw_random/mtk-rng.c -+++ b/drivers/char/hw_random/mtk-rng.c -@@ -166,8 +166,13 @@ static int mtk_rng_runtime_resume(struct device *dev) - return mtk_rng_init(&priv->rng); - } - --static UNIVERSAL_DEV_PM_OPS(mtk_rng_pm_ops, mtk_rng_runtime_suspend, -- mtk_rng_runtime_resume, NULL); -+static const struct dev_pm_ops mtk_rng_pm_ops = { -+ SET_RUNTIME_PM_OPS(mtk_rng_runtime_suspend, -+ mtk_rng_runtime_resume, NULL) -+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, -+ pm_runtime_force_resume) -+}; -+ - #define MTK_RNG_PM_OPS (&mtk_rng_pm_ops) - #else /* CONFIG_PM */ - #define MTK_RNG_PM_OPS NULL -diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c -index 67947a19aa225..3774adf903a83 100644 ---- a/drivers/char/hw_random/nomadik-rng.c -+++ b/drivers/char/hw_random/nomadik-rng.c -@@ -13,8 +13,6 @@ - #include - #include - --static struct clk *rng_clk; -- - static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) - { - void __iomem *base = (void __iomem *)rng->priv; -@@ -36,21 +34,20 @@ static struct hwrng nmk_rng = { - - static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) - { -+ struct clk *rng_clk; - void __iomem *base; - int ret; - -- rng_clk = devm_clk_get(&dev->dev, NULL); -+ rng_clk = devm_clk_get_enabled(&dev->dev, NULL); - if (IS_ERR(rng_clk)) { - dev_err(&dev->dev, "could not get rng clock\n"); - ret = PTR_ERR(rng_clk); - return ret; - } - -- clk_prepare_enable(rng_clk); -- - ret = amba_request_regions(dev, dev->dev.init_name); - if (ret) -- goto out_clk; -+ return ret; - ret = -ENOMEM; - base = devm_ioremap(&dev->dev, dev->res.start, - resource_size(&dev->res)); -@@ -64,15 +61,12 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) - - out_release: - amba_release_regions(dev); --out_clk: -- clk_disable(rng_clk); - return ret; - } - - static void nmk_rng_remove(struct amba_device *dev) - { - amba_release_regions(dev); -- clk_disable(rng_clk); - } - - static const struct amba_id nmk_rng_ids[] = { -diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c -index e0d77fa048fb6..f06e4f95114f9 100644 ---- a/drivers/char/hw_random/omap3-rom-rng.c -+++ b/drivers/char/hw_random/omap3-rom-rng.c -@@ -92,7 +92,7 @@ static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev) - - r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); - if (r != 0) { -- clk_disable(ddata->clk); -+ clk_disable_unprepare(ddata->clk); - dev_err(dev, "HW init failed: %d\n", r); - - return -EIO; -diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c -index 99c8bd0859a14..e04a054e89307 100644 ---- a/drivers/char/hw_random/pic32-rng.c -+++ b/drivers/char/hw_random/pic32-rng.c -@@ -36,7 +36,6 @@ - struct pic32_rng { - void __iomem *base; - struct hwrng rng; -- struct clk *clk; - }; - - /* -@@ -70,6 +69,7 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max, - static int pic32_rng_probe(struct platform_device *pdev) - { - struct pic32_rng *priv; -+ struct clk *clk; - u32 v; - int ret; - -@@ -81,13 +81,9 @@ static int pic32_rng_probe(struct platform_device *pdev) - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); - -- priv->clk = devm_clk_get(&pdev->dev, NULL); -- if (IS_ERR(priv->clk)) -- return PTR_ERR(priv->clk); -- -- ret = clk_prepare_enable(priv->clk); -- if (ret) -- return ret; -+ clk = devm_clk_get_enabled(&pdev->dev, NULL); -+ if (IS_ERR(clk)) -+ return PTR_ERR(clk); - - /* enable TRNG in enhanced mode */ - v = TRNGEN | TRNGMOD; -@@ -98,15 +94,11 @@ static int pic32_rng_probe(struct platform_device *pdev) - - ret = devm_hwrng_register(&pdev->dev, &priv->rng); - if (ret) -- goto err_register; -+ return ret; - - platform_set_drvdata(pdev, priv); - - return 0; -- --err_register: -- clk_disable_unprepare(priv->clk); -- return ret; - } - - static int pic32_rng_remove(struct platform_device *pdev) -@@ -114,7 +106,6 @@ static int pic32_rng_remove(struct platform_device *pdev) - struct pic32_rng *rng = platform_get_drvdata(pdev); - - writel(0, rng->base + RNGCON); -- clk_disable_unprepare(rng->clk); - return 0; - } - -diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c -index 15ba1e6fae4d2..6e9dfac9fc9f4 100644 ---- a/drivers/char/hw_random/st-rng.c -+++ b/drivers/char/hw_random/st-rng.c -@@ -42,7 +42,6 @@ - - struct st_rng_data { - void __iomem *base; -- struct clk *clk; - struct hwrng ops; - }; - -@@ -85,26 +84,18 @@ static int st_rng_probe(struct platform_device *pdev) - if (IS_ERR(base)) - return PTR_ERR(base); - -- clk = devm_clk_get(&pdev->dev, NULL); -+ clk = devm_clk_get_enabled(&pdev->dev, NULL); - if (IS_ERR(clk)) - return PTR_ERR(clk); - -- ret = clk_prepare_enable(clk); -- if (ret) -- return ret; -- - ddata->ops.priv = (unsigned long)ddata; - ddata->ops.read = st_rng_read; - ddata->ops.name = pdev->name; - ddata->base = base; -- ddata->clk = clk; -- -- dev_set_drvdata(&pdev->dev, ddata); - - ret = devm_hwrng_register(&pdev->dev, &ddata->ops); - if (ret) { - dev_err(&pdev->dev, "Failed to register HW RNG\n"); -- clk_disable_unprepare(clk); - return ret; - } - -@@ -113,15 +104,6 @@ static int st_rng_probe(struct platform_device *pdev) - return 0; - } - --static int st_rng_remove(struct platform_device *pdev) --{ -- struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev); -- -- clk_disable_unprepare(ddata->clk); -- -- return 0; --} -- - static const struct of_device_id st_rng_match[] __maybe_unused = { - { .compatible = "st,rng" }, - {}, -@@ -134,7 +116,6 @@ static struct platform_driver st_rng_driver = { - .of_match_table = of_match_ptr(st_rng_match), - }, - .probe = st_rng_probe, -- .remove = st_rng_remove - }; - - module_platform_driver(st_rng_driver); -diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c -index a90001e02bf7a..3a194eb3ce8ad 100644 ---- a/drivers/char/hw_random/virtio-rng.c -+++ b/drivers/char/hw_random/virtio-rng.c -@@ -4,6 +4,7 @@ - * Copyright (C) 2007, 2008 Rusty Russell IBM Corporation - */ - -+#include - #include - #include - #include -@@ -18,71 +19,111 @@ static DEFINE_IDA(rng_index_ida); - struct virtrng_info { - struct hwrng hwrng; - struct virtqueue *vq; -- struct completion have_data; - char name[25]; -- unsigned int data_avail; - int index; -- bool busy; - bool hwrng_register_done; - bool hwrng_removed; -+ /* data transfer */ -+ struct completion have_data; -+ unsigned int data_avail; -+ unsigned int data_idx; -+ /* minimal size returned by rng_buffer_size() */ -+#if SMP_CACHE_BYTES < 32 -+ u8 data[32]; -+#else -+ u8 data[SMP_CACHE_BYTES]; -+#endif - }; - - static void random_recv_done(struct virtqueue *vq) - { - struct virtrng_info *vi = vq->vdev->priv; -+ unsigned int len; - - /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ -- if (!virtqueue_get_buf(vi->vq, &vi->data_avail)) -+ if (!virtqueue_get_buf(vi->vq, &len)) - return; - -+ smp_store_release(&vi->data_avail, len); - complete(&vi->have_data); - } - --/* The host will fill any buffer we give it with sweet, sweet randomness. */ --static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size) -+static void request_entropy(struct virtrng_info *vi) - { - struct scatterlist sg; - -- sg_init_one(&sg, buf, size); -+ reinit_completion(&vi->have_data); -+ vi->data_idx = 0; -+ -+ sg_init_one(&sg, vi->data, sizeof(vi->data)); - - /* There should always be room for one buffer. */ -- virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL); -+ virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL); - - virtqueue_kick(vi->vq); - } - -+static unsigned int copy_data(struct virtrng_info *vi, void *buf, -+ unsigned int size) -+{ -+ size = min_t(unsigned int, size, vi->data_avail); -+ memcpy(buf, vi->data + vi->data_idx, size); -+ vi->data_idx += size; -+ vi->data_avail -= size; -+ if (vi->data_avail == 0) -+ request_entropy(vi); -+ return size; -+} -+ - static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) - { - int ret; - struct virtrng_info *vi = (struct virtrng_info *)rng->priv; -+ unsigned int chunk; -+ size_t read; - - if (vi->hwrng_removed) - return -ENODEV; - -- if (!vi->busy) { -- vi->busy = true; -- reinit_completion(&vi->have_data); -- register_buffer(vi, buf, size); -+ read = 0; -+ -+ /* copy available data */ -+ if (smp_load_acquire(&vi->data_avail)) { -+ chunk = copy_data(vi, buf, size); -+ size -= chunk; -+ read += chunk; - } - - if (!wait) -- return 0; -- -- ret = wait_for_completion_killable(&vi->have_data); -- if (ret < 0) -- return ret; -+ return read; -+ -+ /* We have already copied available entropy, -+ * so either size is 0 or data_avail is 0 -+ */ -+ while (size != 0) { -+ /* data_avail is 0 but a request is pending */ -+ ret = wait_for_completion_killable(&vi->have_data); -+ if (ret < 0) -+ return ret; -+ /* if vi->data_avail is 0, we have been interrupted -+ * by a cleanup, but buffer stays in the queue -+ */ -+ if (vi->data_avail == 0) -+ return read; - -- vi->busy = false; -+ chunk = copy_data(vi, buf + read, size); -+ size -= chunk; -+ read += chunk; -+ } - -- return vi->data_avail; -+ return read; - } - - static void virtio_cleanup(struct hwrng *rng) - { - struct virtrng_info *vi = (struct virtrng_info *)rng->priv; - -- if (vi->busy) -- wait_for_completion(&vi->have_data); -+ complete(&vi->have_data); - } - - static int probe_common(struct virtio_device *vdev) -@@ -118,6 +159,9 @@ static int probe_common(struct virtio_device *vdev) - goto err_find; - } - -+ /* we always have a pending entropy request */ -+ request_entropy(vi); -+ - return 0; - - err_find: -@@ -133,9 +177,9 @@ static void remove_common(struct virtio_device *vdev) - - vi->hwrng_removed = true; - vi->data_avail = 0; -+ vi->data_idx = 0; - complete(&vi->have_data); - vdev->config->reset(vdev); -- vi->busy = false; - if (vi->hwrng_register_done) - hwrng_unregister(&vi->hwrng); - vdev->config->del_vqs(vdev); -diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig -index 249b31197eeae..8298a4dd0de68 100644 ---- a/drivers/char/ipmi/Kconfig -+++ b/drivers/char/ipmi/Kconfig -@@ -153,7 +153,8 @@ config IPMI_KCS_BMC_SERIO - - config ASPEED_BT_IPMI_BMC - depends on ARCH_ASPEED || COMPILE_TEST -- depends on REGMAP && REGMAP_MMIO && MFD_SYSCON -+ depends on MFD_SYSCON -+ select REGMAP_MMIO - tristate "BT IPMI bmc driver" - help - Provides a driver for the BT (Block Transfer) IPMI interface -diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c -index e96cb5c4f97a3..15c211c5d6f4e 100644 ---- a/drivers/char/ipmi/ipmi_msghandler.c -+++ b/drivers/char/ipmi/ipmi_msghandler.c -@@ -11,8 +11,8 @@ - * Copyright 2002 MontaVista Software Inc. - */ - --#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " --#define dev_fmt pr_fmt -+#define pr_fmt(fmt) "IPMI message handler: " fmt -+#define dev_fmt(fmt) pr_fmt(fmt) - - #include - #include -@@ -191,6 +191,8 @@ struct ipmi_user { - struct work_struct remove_work; - }; - -+static struct workqueue_struct *remove_work_wq; -+ - static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) - __acquires(user->release_barrier) - { -@@ -1261,7 +1263,7 @@ static void free_user(struct kref *ref) - struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); - - /* SRCU cleanup must happen in task context. */ -- schedule_work(&user->remove_work); -+ queue_work(remove_work_wq, &user->remove_work); - } - - static void _ipmi_destroy_user(struct ipmi_user *user) -@@ -1271,6 +1273,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) - unsigned long flags; - struct cmd_rcvr *rcvr; - struct cmd_rcvr *rcvrs = NULL; -+ struct module *owner; - - if (!acquire_ipmi_user(user, &i)) { - /* -@@ -1332,8 +1335,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user) - kfree(rcvr); - } - -+ owner = intf->owner; - kref_put(&intf->refcount, intf_free); -- module_put(intf->owner); -+ module_put(owner); - } - - int ipmi_destroy_user(struct ipmi_user *user) -@@ -2930,7 +2934,7 @@ cleanup_bmc_device(struct kref *ref) - * with removing the device attributes while reading a device - * attribute. - */ -- schedule_work(&bmc->remove_work); -+ queue_work(remove_work_wq, &bmc->remove_work); - } - - /* -@@ -3525,12 +3529,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf, - struct ipmi_smi_msg *msg, - unsigned char err) - { -+ int rv; - msg->rsp[0] = msg->data[0] | 4; - msg->rsp[1] = msg->data[1]; - msg->rsp[2] = err; - msg->rsp_size = 3; -- /* It's an error, so it will never requeue, no need to check return. */ -- handle_one_recv_msg(intf, msg); -+ -+ /* This will never requeue, but it may ask us to free the message. */ -+ rv = handle_one_recv_msg(intf, msg); -+ if (rv == 0) -+ ipmi_free_smi_msg(msg); - } - - static void cleanup_smi_msgs(struct ipmi_smi *intf) -@@ -4789,7 +4797,9 @@ static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); - static void free_smi_msg(struct ipmi_smi_msg *msg) - { - atomic_dec(&smi_msg_inuse_count); -- kfree(msg); -+ /* Try to keep as much stuff out of the panic path as possible. */ -+ if (!oops_in_progress) -+ kfree(msg); - } - - struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) -@@ -4808,7 +4818,9 @@ EXPORT_SYMBOL(ipmi_alloc_smi_msg); - static void free_recv_msg(struct ipmi_recv_msg *msg) - { - atomic_dec(&recv_msg_inuse_count); -- kfree(msg); -+ /* Try to keep as much stuff out of the panic path as possible. */ -+ if (!oops_in_progress) -+ kfree(msg); - } - - static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) -@@ -4826,7 +4838,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) - - void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) - { -- if (msg->user) -+ if (msg->user && !oops_in_progress) - kref_put(&msg->user->refcount, free_user); - msg->done(msg); - } -@@ -5142,7 +5154,16 @@ static int ipmi_init_msghandler(void) - if (initialized) - goto out; - -- init_srcu_struct(&ipmi_interfaces_srcu); -+ rv = init_srcu_struct(&ipmi_interfaces_srcu); -+ if (rv) -+ goto out; -+ -+ remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); -+ if (!remove_work_wq) { -+ pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); -+ rv = -ENOMEM; -+ goto out_wq; -+ } - - timer_setup(&ipmi_timer, ipmi_timeout, 0); - mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); -@@ -5151,6 +5172,9 @@ static int ipmi_init_msghandler(void) - - initialized = true; - -+out_wq: -+ if (rv) -+ cleanup_srcu_struct(&ipmi_interfaces_srcu); - out: - mutex_unlock(&ipmi_interfaces_mutex); - return rv; -@@ -5174,6 +5198,8 @@ static void __exit cleanup_ipmi(void) - int count; - - if (initialized) { -+ destroy_workqueue(remove_work_wq); -+ - atomic_notifier_chain_unregister(&panic_notifier_list, - &panic_block); - -diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c -index 6f3272b58ced3..f4360fbddbffe 100644 ---- a/drivers/char/ipmi/ipmi_si_intf.c -+++ b/drivers/char/ipmi/ipmi_si_intf.c -@@ -2081,6 +2081,11 @@ static int try_smi_init(struct smi_info *new_smi) - new_smi->io.io_cleanup = NULL; - } - -+ if (rv && new_smi->si_sm) { -+ kfree(new_smi->si_sm); -+ new_smi->si_sm = NULL; -+ } -+ - return rv; - } - -@@ -2152,6 +2157,20 @@ skip_fallback_noirq: - } - module_init(init_ipmi_si); - -+static void wait_msg_processed(struct smi_info *smi_info) -+{ -+ unsigned long jiffies_now; -+ long time_diff; -+ -+ while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { -+ jiffies_now = jiffies; -+ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) -+ * SI_USEC_PER_JIFFY); -+ smi_event_handler(smi_info, time_diff); -+ schedule_timeout_uninterruptible(1); -+ } -+} -+ - static void shutdown_smi(void *send_info) - { - struct smi_info *smi_info = send_info; -@@ -2186,16 +2205,13 @@ static void shutdown_smi(void *send_info) - * in the BMC. Note that timers and CPU interrupts are off, - * so no need for locks. - */ -- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { -- poll(smi_info); -- schedule_timeout_uninterruptible(1); -- } -+ wait_msg_processed(smi_info); -+ - if (smi_info->handlers) - disable_si_irq(smi_info); -- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { -- poll(smi_info); -- schedule_timeout_uninterruptible(1); -- } -+ -+ wait_msg_processed(smi_info); -+ - if (smi_info->handlers) - smi_info->handlers->cleanup(smi_info->si_sm); - -diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c -index 20d5af92966d4..30f757249c5c0 100644 ---- a/drivers/char/ipmi/ipmi_ssif.c -+++ b/drivers/char/ipmi/ipmi_ssif.c -@@ -74,7 +74,8 @@ - /* - * Timer values - */ --#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */ -+#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */ -+#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */ - #define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */ - - /* How many times to we retry sending/receiving the message. */ -@@ -82,7 +83,9 @@ - #define SSIF_RECV_RETRIES 250 - - #define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000) -+#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000) - #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC) -+#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC) - #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC) - - /* -@@ -92,7 +95,7 @@ - #define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250) - - enum ssif_intf_state { -- SSIF_NORMAL, -+ SSIF_IDLE, - SSIF_GETTING_FLAGS, - SSIF_GETTING_EVENTS, - SSIF_CLEARING_FLAGS, -@@ -100,8 +103,8 @@ enum ssif_intf_state { - /* FIXME - add watchdog stuff. */ - }; - --#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \ -- && (ssif)->curr_msg == NULL) -+#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \ -+ && (ssif)->curr_msg == NULL) - - /* - * Indexes into stats[] in ssif_info below. -@@ -229,6 +232,9 @@ struct ssif_info { - bool got_alert; - bool waiting_alert; - -+ /* Used to inform the timeout that it should do a resend. */ -+ bool do_resend; -+ - /* - * If set to true, this will request events the next time the - * state machine is idle. -@@ -348,9 +354,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info, - - /* - * Must be called with the message lock held. This will release the -- * message lock. Note that the caller will check SSIF_IDLE and start a -- * new operation, so there is no need to check for new messages to -- * start in here. -+ * message lock. Note that the caller will check IS_SSIF_IDLE and -+ * start a new operation, so there is no need to check for new -+ * messages to start in here. - */ - static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) - { -@@ -367,7 +373,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) - - if (start_send(ssif_info, msg, 3) != 0) { - /* Error, just go to normal state. */ -- ssif_info->ssif_state = SSIF_NORMAL; -+ ssif_info->ssif_state = SSIF_IDLE; - } - } - -@@ -382,7 +388,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags) - mb[0] = (IPMI_NETFN_APP_REQUEST << 2); - mb[1] = IPMI_GET_MSG_FLAGS_CMD; - if (start_send(ssif_info, mb, 2) != 0) -- ssif_info->ssif_state = SSIF_NORMAL; -+ ssif_info->ssif_state = SSIF_IDLE; - } - - static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, -@@ -393,7 +399,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, - - flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - ssif_info->curr_msg = NULL; -- ssif_info->ssif_state = SSIF_NORMAL; -+ ssif_info->ssif_state = SSIF_IDLE; - ipmi_ssif_unlock_cond(ssif_info, flags); - ipmi_free_smi_msg(msg); - } -@@ -407,7 +413,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) - - msg = ipmi_alloc_smi_msg(); - if (!msg) { -- ssif_info->ssif_state = SSIF_NORMAL; -+ ssif_info->ssif_state = SSIF_IDLE; - ipmi_ssif_unlock_cond(ssif_info, flags); - return; - } -@@ -430,7 +436,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, - - msg = ipmi_alloc_smi_msg(); - if (!msg) { -- ssif_info->ssif_state = SSIF_NORMAL; -+ ssif_info->ssif_state = SSIF_IDLE; - ipmi_ssif_unlock_cond(ssif_info, flags); - return; - } -@@ -448,9 +454,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, - - /* - * Must be called with the message lock held. This will release the -- * message lock. Note that the caller will check SSIF_IDLE and start a -- * new operation, so there is no need to check for new messages to -- * start in here. -+ * message lock. Note that the caller will check IS_SSIF_IDLE and -+ * start a new operation, so there is no need to check for new -+ * messages to start in here. - */ - static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) - { -@@ -466,7 +472,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) - /* Events available. */ - start_event_fetch(ssif_info, flags); - else { -- ssif_info->ssif_state = SSIF_NORMAL; -+ ssif_info->ssif_state = SSIF_IDLE; - ipmi_ssif_unlock_cond(ssif_info, flags); - } - } -@@ -538,22 +544,30 @@ static void start_get(struct ssif_info *ssif_info) - ssif_info->recv, I2C_SMBUS_BLOCK_DATA); - } - -+static void start_resend(struct ssif_info *ssif_info); -+ - static void retry_timeout(struct timer_list *t) - { - struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer); - unsigned long oflags, *flags; -- bool waiting; -+ bool waiting, resend; - - if (ssif_info->stopping) - return; - - flags = ipmi_ssif_lock_cond(ssif_info, &oflags); -+ resend = ssif_info->do_resend; -+ ssif_info->do_resend = false; - waiting = ssif_info->waiting_alert; - ssif_info->waiting_alert = false; - ipmi_ssif_unlock_cond(ssif_info, flags); - - if (waiting) - start_get(ssif_info); -+ if (resend) { -+ start_resend(ssif_info); -+ ssif_inc_stat(ssif_info, send_retries); -+ } - } - - static void watch_timeout(struct timer_list *t) -@@ -568,7 +582,7 @@ static void watch_timeout(struct timer_list *t) - if (ssif_info->watch_timeout) { - mod_timer(&ssif_info->watch_timer, - jiffies + ssif_info->watch_timeout); -- if (SSIF_IDLE(ssif_info)) { -+ if (IS_SSIF_IDLE(ssif_info)) { - start_flag_fetch(ssif_info, flags); /* Releases lock */ - return; - } -@@ -602,8 +616,6 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type, - start_get(ssif_info); - } - --static int start_resend(struct ssif_info *ssif_info); -- - static void msg_done_handler(struct ssif_info *ssif_info, int result, - unsigned char *data, unsigned int len) - { -@@ -756,7 +768,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, - } - - switch (ssif_info->ssif_state) { -- case SSIF_NORMAL: -+ case SSIF_IDLE: - ipmi_ssif_unlock_cond(ssif_info, flags); - if (!msg) - break; -@@ -774,7 +786,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, - * Error fetching flags, or invalid length, - * just give up for now. - */ -- ssif_info->ssif_state = SSIF_NORMAL; -+ ssif_info->ssif_state = SSIF_IDLE; - ipmi_ssif_unlock_cond(ssif_info, flags); - dev_warn(&ssif_info->client->dev, - "Error getting flags: %d %d, %x\n", -@@ -782,9 +794,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, - } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 - || data[1] != IPMI_GET_MSG_FLAGS_CMD) { - /* -- * Don't abort here, maybe it was a queued -- * response to a previous command. -+ * Recv error response, give up. - */ -+ ssif_info->ssif_state = SSIF_IDLE; - ipmi_ssif_unlock_cond(ssif_info, flags); - dev_warn(&ssif_info->client->dev, - "Invalid response getting flags: %x %x\n", -@@ -809,11 +821,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, - "Invalid response clearing flags: %x %x\n", - data[0], data[1]); - } -- ssif_info->ssif_state = SSIF_NORMAL; -+ ssif_info->ssif_state = SSIF_IDLE; - ipmi_ssif_unlock_cond(ssif_info, flags); - break; - - case SSIF_GETTING_EVENTS: -+ if (!msg) { -+ /* Should never happen, but just in case. */ -+ dev_warn(&ssif_info->client->dev, -+ "No message set while getting events\n"); -+ ipmi_ssif_unlock_cond(ssif_info, flags); -+ break; -+ } -+ - if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { - /* Error getting event, probably done. */ - msg->done(msg); -@@ -838,6 +858,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, - break; - - case SSIF_GETTING_MESSAGES: -+ if (!msg) { -+ /* Should never happen, but just in case. */ -+ dev_warn(&ssif_info->client->dev, -+ "No message set while getting messages\n"); -+ ipmi_ssif_unlock_cond(ssif_info, flags); -+ break; -+ } -+ - if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) { - /* Error getting event, probably done. */ - msg->done(msg); -@@ -861,10 +889,17 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, - deliver_recv_msg(ssif_info, msg); - } - break; -+ -+ default: -+ /* Should never happen, but just in case. */ -+ dev_warn(&ssif_info->client->dev, -+ "Invalid state in message done handling: %d\n", -+ ssif_info->ssif_state); -+ ipmi_ssif_unlock_cond(ssif_info, flags); - } - - flags = ipmi_ssif_lock_cond(ssif_info, &oflags); -- if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) { -+ if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) { - if (ssif_info->req_events) - start_event_fetch(ssif_info, flags); - else if (ssif_info->req_flags) -@@ -886,31 +921,23 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, - if (result < 0) { - ssif_info->retries_left--; - if (ssif_info->retries_left > 0) { -- if (!start_resend(ssif_info)) { -- ssif_inc_stat(ssif_info, send_retries); -- return; -- } -- /* request failed, just return the error. */ -- ssif_inc_stat(ssif_info, send_errors); -- -- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) -- dev_dbg(&ssif_info->client->dev, -- "%s: Out of retries\n", __func__); -- msg_done_handler(ssif_info, -EIO, NULL, 0); -+ /* -+ * Wait the retry timeout time per the spec, -+ * then redo the send. -+ */ -+ ssif_info->do_resend = true; -+ mod_timer(&ssif_info->retry_timer, -+ jiffies + SSIF_REQ_RETRY_JIFFIES); - return; - } - - ssif_inc_stat(ssif_info, send_errors); - -- /* -- * Got an error on transmit, let the done routine -- * handle it. -- */ - if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - dev_dbg(&ssif_info->client->dev, -- "%s: Error %d\n", __func__, result); -+ "%s: Out of retries\n", __func__); - -- msg_done_handler(ssif_info, result, NULL, 0); -+ msg_done_handler(ssif_info, -EIO, NULL, 0); - return; - } - -@@ -973,7 +1000,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, - } - } - --static int start_resend(struct ssif_info *ssif_info) -+static void start_resend(struct ssif_info *ssif_info) - { - int command; - -@@ -998,7 +1025,6 @@ static int start_resend(struct ssif_info *ssif_info) - - ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE, - command, ssif_info->data, I2C_SMBUS_BLOCK_DATA); -- return 0; - } - - static int start_send(struct ssif_info *ssif_info, -@@ -1013,7 +1039,8 @@ static int start_send(struct ssif_info *ssif_info, - ssif_info->retries_left = SSIF_SEND_RETRIES; - memcpy(ssif_info->data + 1, data, len); - ssif_info->data_len = len; -- return start_resend(ssif_info); -+ start_resend(ssif_info); -+ return 0; - } - - /* Must be called with the message lock held. */ -@@ -1023,7 +1050,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) - unsigned long oflags; - - restart: -- if (!SSIF_IDLE(ssif_info)) { -+ if (!IS_SSIF_IDLE(ssif_info)) { - ipmi_ssif_unlock_cond(ssif_info, flags); - return; - } -@@ -1246,7 +1273,7 @@ static void shutdown_ssif(void *send_info) - dev_set_drvdata(&ssif_info->client->dev, NULL); - - /* make sure the driver is not looking for flags any more. */ -- while (ssif_info->ssif_state != SSIF_NORMAL) -+ while (ssif_info->ssif_state != SSIF_IDLE) - schedule_timeout(1); - - ssif_info->stopping = true; -@@ -1313,8 +1340,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg, - ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg); - if (ret) { - retry_cnt--; -- if (retry_cnt > 0) -+ if (retry_cnt > 0) { -+ msleep(SSIF_REQ_RETRY_MSEC); - goto retry1; -+ } - return -ENODEV; - } - -@@ -1385,7 +1414,7 @@ static struct ssif_addr_info *ssif_info_find(unsigned short addr, - restart: - list_for_each_entry(info, &ssif_infos, link) { - if (info->binfo.addr == addr) { -- if (info->addr_src == SI_SMBIOS) -+ if (info->addr_src == SI_SMBIOS && !info->adapter_name) - info->adapter_name = kstrdup(adapter_name, - GFP_KERNEL); - -@@ -1455,8 +1484,10 @@ retry_write: - 32, msg); - if (ret) { - retry_cnt--; -- if (retry_cnt > 0) -+ if (retry_cnt > 0) { -+ msleep(SSIF_REQ_RETRY_MSEC); - goto retry_write; -+ } - dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n"); - return ret; - } -@@ -1583,6 +1614,11 @@ static int ssif_add_infos(struct i2c_client *client) - info->addr_src = SI_ACPI; - info->client = client; - info->adapter_name = kstrdup(client->adapter->name, GFP_KERNEL); -+ if (!info->adapter_name) { -+ kfree(info); -+ return -ENOMEM; -+ } -+ - info->binfo.addr = client->addr; - list_add_tail(&info->link, &ssif_infos); - return 0; -@@ -1659,6 +1695,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) - } - } - -+ ssif_info->client = client; -+ i2c_set_clientdata(client, ssif_info); -+ - rv = ssif_check_and_remove(client, ssif_info); - /* If rv is 0 and addr source is not SI_ACPI, continue probing */ - if (!rv && ssif_info->addr_source == SI_ACPI) { -@@ -1679,9 +1718,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) - ipmi_addr_src_to_str(ssif_info->addr_source), - client->addr, client->adapter->name, slave_addr); - -- ssif_info->client = client; -- i2c_set_clientdata(client, ssif_info); -- - /* Now check for system interface capabilities */ - msg[0] = IPMI_NETFN_APP_REQUEST << 2; - msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD; -@@ -1818,7 +1854,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) - } - - spin_lock_init(&ssif_info->lock); -- ssif_info->ssif_state = SSIF_NORMAL; -+ ssif_info->ssif_state = SSIF_IDLE; - timer_setup(&ssif_info->retry_timer, retry_timeout, 0); - timer_setup(&ssif_info->watch_timer, watch_timeout, 0); - -@@ -1881,6 +1917,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) - - dev_err(&ssif_info->client->dev, - "Unable to start IPMI SSIF: %d\n", rv); -+ i2c_set_clientdata(client, NULL); - kfree(ssif_info); - } - kfree(resp); -diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c -index e4ff3b50de7f3..883b4a3410122 100644 ---- a/drivers/char/ipmi/ipmi_watchdog.c -+++ b/drivers/char/ipmi/ipmi_watchdog.c -@@ -342,13 +342,17 @@ static atomic_t msg_tofree = ATOMIC_INIT(0); - static DECLARE_COMPLETION(msg_wait); - static void msg_free_smi(struct ipmi_smi_msg *msg) - { -- if (atomic_dec_and_test(&msg_tofree)) -- complete(&msg_wait); -+ if (atomic_dec_and_test(&msg_tofree)) { -+ if (!oops_in_progress) -+ complete(&msg_wait); -+ } - } - static void msg_free_recv(struct ipmi_recv_msg *msg) - { -- if (atomic_dec_and_test(&msg_tofree)) -- complete(&msg_wait); -+ if (atomic_dec_and_test(&msg_tofree)) { -+ if (!oops_in_progress) -+ complete(&msg_wait); -+ } - } - static struct ipmi_smi_msg smi_msg = { - .done = msg_free_smi -@@ -434,8 +438,10 @@ static int _ipmi_set_timeout(int do_heartbeat) - rv = __ipmi_set_timeout(&smi_msg, - &recv_msg, - &send_heartbeat_now); -- if (rv) -+ if (rv) { -+ atomic_set(&msg_tofree, 0); - return rv; -+ } - - wait_for_completion(&msg_wait); - -@@ -497,7 +503,7 @@ static void panic_halt_ipmi_heartbeat(void) - msg.cmd = IPMI_WDOG_RESET_TIMER; - msg.data = NULL; - msg.data_len = 0; -- atomic_inc(&panic_done_count); -+ atomic_add(2, &panic_done_count); - rv = ipmi_request_supply_msgs(watchdog_user, - (struct ipmi_addr *) &addr, - 0, -@@ -507,7 +513,7 @@ static void panic_halt_ipmi_heartbeat(void) - &panic_halt_heartbeat_recv_msg, - 1); - if (rv) -- atomic_dec(&panic_done_count); -+ atomic_sub(2, &panic_done_count); - } - - static struct ipmi_smi_msg panic_halt_smi_msg = { -@@ -531,12 +537,12 @@ static void panic_halt_ipmi_set_timeout(void) - /* Wait for the messages to be free. */ - while (atomic_read(&panic_done_count) != 0) - ipmi_poll_interface(watchdog_user); -- atomic_inc(&panic_done_count); -+ atomic_add(2, &panic_done_count); - rv = __ipmi_set_timeout(&panic_halt_smi_msg, - &panic_halt_recv_msg, - &send_heartbeat_now); - if (rv) { -- atomic_dec(&panic_done_count); -+ atomic_sub(2, &panic_done_count); - pr_warn("Unable to extend the watchdog timeout\n"); - } else { - if (send_heartbeat_now) -@@ -580,6 +586,7 @@ restart: - &recv_msg, - 1); - if (rv) { -+ atomic_set(&msg_tofree, 0); - pr_warn("heartbeat send failure: %d\n", rv); - return rv; - } -diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c -index 92a37b33494cb..f23c146bb740c 100644 ---- a/drivers/char/ipmi/kcs_bmc_aspeed.c -+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c -@@ -404,13 +404,31 @@ static void aspeed_kcs_check_obe(struct timer_list *timer) - static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) - { - struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); -+ int rc; -+ u8 str; - - /* We don't have an OBE IRQ, emulate it */ - if (mask & KCS_BMC_EVENT_TYPE_OBE) { -- if (KCS_BMC_EVENT_TYPE_OBE & state) -- mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); -- else -+ if (KCS_BMC_EVENT_TYPE_OBE & state) { -+ /* -+ * Given we don't have an OBE IRQ, delay by polling briefly to see if we can -+ * observe such an event before returning to the caller. This is not -+ * incorrect because OBF may have already become clear before enabling the -+ * IRQ if we had one, under which circumstance no event will be propagated -+ * anyway. -+ * -+ * The onus is on the client to perform a race-free check that it hasn't -+ * missed the event. -+ */ -+ rc = read_poll_timeout_atomic(aspeed_kcs_inb, str, -+ !(str & KCS_BMC_STR_OBF), 1, 100, false, -+ &priv->kcs_bmc, priv->kcs_bmc.ioreg.str); -+ /* Time for the slow path? */ -+ if (rc == -ETIMEDOUT) -+ mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); -+ } else { - del_timer(&priv->obe.timer); -+ } - } - - if (mask & KCS_BMC_EVENT_TYPE_IBF) { -diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c -index 7948cabde50b4..7e2067628a6ce 100644 ---- a/drivers/char/ipmi/kcs_bmc_serio.c -+++ b/drivers/char/ipmi/kcs_bmc_serio.c -@@ -73,10 +73,12 @@ static int kcs_bmc_serio_add_device(struct kcs_bmc_device *kcs_bmc) - struct serio *port; - - priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ return -ENOMEM; - - /* Use kzalloc() as the allocation is cleaned up with kfree() via serio_unregister_port() */ - port = kzalloc(sizeof(*port), GFP_KERNEL); -- if (!(priv && port)) -+ if (!port) - return -ENOMEM; - - port->id.type = SERIO_8042; -diff --git a/drivers/char/mem.c b/drivers/char/mem.c -index 1c596b5cdb279..d8e3b547e0ae7 100644 ---- a/drivers/char/mem.c -+++ b/drivers/char/mem.c -@@ -702,8 +702,8 @@ static const struct memdev { - #endif - [5] = { "zero", 0666, &zero_fops, 0 }, - [7] = { "full", 0666, &full_fops, 0 }, -- [8] = { "random", 0666, &random_fops, 0 }, -- [9] = { "urandom", 0666, &urandom_fops, 0 }, -+ [8] = { "random", 0666, &random_fops, FMODE_NOWAIT }, -+ [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT }, - #ifdef CONFIG_PRINTK - [11] = { "kmsg", 0644, &kmsg_fops, 0 }, - #endif -diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h -index 9ccb6b270b071..95164246afd1a 100644 ---- a/drivers/char/mwave/3780i.h -+++ b/drivers/char/mwave/3780i.h -@@ -68,7 +68,7 @@ typedef struct { - unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */ - unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */ - unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */ -- unsigned char Reserved:5; /* 0: Reserved */ -+ unsigned short Reserved:13; /* 0: Reserved */ - } DSP_ISA_SLAVE_CONTROL; - - -diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c -index 8f1bce0b4fe50..7057b7bacc8cf 100644 ---- a/drivers/char/pcmcia/cm4000_cs.c -+++ b/drivers/char/pcmcia/cm4000_cs.c -@@ -530,7 +530,8 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) - DEBUGP(5, dev, "NumRecBytes is valid\n"); - break; - } -- usleep_range(10000, 11000); -+ /* can not sleep as this is in atomic context */ -+ mdelay(10); - } - if (i == 100) { - DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting " -@@ -550,7 +551,8 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) - } - break; - } -- usleep_range(10000, 11000); -+ /* can not sleep as this is in atomic context */ -+ mdelay(10); - } - - /* check whether it is a short PTS reply? */ -diff --git a/drivers/char/random.c b/drivers/char/random.c -index 605969ed0f965..8642326de6e1c 100644 ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -1,310 +1,26 @@ -+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) - /* -- * random.c -- A strong random number generator -- * -- * Copyright (C) 2017 Jason A. Donenfeld . All -- * Rights Reserved. -- * -+ * Copyright (C) 2017-2022 Jason A. Donenfeld . All Rights Reserved. - * Copyright Matt Mackall , 2003, 2004, 2005 -- * -- * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All -- * rights reserved. -- * -- * Redistribution and use in source and binary forms, with or without -- * modification, are permitted provided that the following conditions -- * are met: -- * 1. Redistributions of source code must retain the above copyright -- * notice, and the entire permission notice in its entirety, -- * including the disclaimer of warranties. -- * 2. Redistributions in binary form must reproduce the above copyright -- * notice, this list of conditions and the following disclaimer in the -- * documentation and/or other materials provided with the distribution. -- * 3. The name of the author may not be used to endorse or promote -- * products derived from this software without specific prior -- * written permission. -- * -- * ALTERNATIVELY, this product may be distributed under the terms of -- * the GNU General Public License, in which case the provisions of the GPL are -- * required INSTEAD OF the above restrictions. (This clause is -- * necessary due to a potential bad interaction between the GPL and -- * the restrictions contained in a BSD-style copyright.) -- * -- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED -- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF -- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE -- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH -- * DAMAGE. -- */ -- --/* -- * (now, with legal B.S. out of the way.....) -- * -- * This routine gathers environmental noise from device drivers, etc., -- * and returns good random numbers, suitable for cryptographic use. -- * Besides the obvious cryptographic uses, these numbers are also good -- * for seeding TCP sequence numbers, and other places where it is -- * desirable to have numbers which are not only random, but hard to -- * predict by an attacker. -- * -- * Theory of operation -- * =================== -- * -- * Computers are very predictable devices. Hence it is extremely hard -- * to produce truly random numbers on a computer --- as opposed to -- * pseudo-random numbers, which can easily generated by using a -- * algorithm. Unfortunately, it is very easy for attackers to guess -- * the sequence of pseudo-random number generators, and for some -- * applications this is not acceptable. So instead, we must try to -- * gather "environmental noise" from the computer's environment, which -- * must be hard for outside attackers to observe, and use that to -- * generate random numbers. In a Unix environment, this is best done -- * from inside the kernel. -- * -- * Sources of randomness from the environment include inter-keyboard -- * timings, inter-interrupt timings from some interrupts, and other -- * events which are both (a) non-deterministic and (b) hard for an -- * outside observer to measure. Randomness from these sources are -- * added to an "entropy pool", which is mixed using a CRC-like function. -- * This is not cryptographically strong, but it is adequate assuming -- * the randomness is not chosen maliciously, and it is fast enough that -- * the overhead of doing it on every interrupt is very reasonable. -- * As random bytes are mixed into the entropy pool, the routines keep -- * an *estimate* of how many bits of randomness have been stored into -- * the random number generator's internal state. -- * -- * When random bytes are desired, they are obtained by taking the SHA -- * hash of the contents of the "entropy pool". The SHA hash avoids -- * exposing the internal state of the entropy pool. It is believed to -- * be computationally infeasible to derive any useful information -- * about the input of SHA from its output. Even if it is possible to -- * analyze SHA in some clever way, as long as the amount of data -- * returned from the generator is less than the inherent entropy in -- * the pool, the output data is totally unpredictable. For this -- * reason, the routine decreases its internal estimate of how many -- * bits of "true randomness" are contained in the entropy pool as it -- * outputs random numbers. -- * -- * If this estimate goes to zero, the routine can still generate -- * random numbers; however, an attacker may (at least in theory) be -- * able to infer the future output of the generator from prior -- * outputs. This requires successful cryptanalysis of SHA, which is -- * not believed to be feasible, but there is a remote possibility. -- * Nonetheless, these numbers should be useful for the vast majority -- * of purposes. -- * -- * Exported interfaces ---- output -- * =============================== -- * -- * There are four exported interfaces; two for use within the kernel, -- * and two or use from userspace. -- * -- * Exported interfaces ---- userspace output -- * ----------------------------------------- -- * -- * The userspace interfaces are two character devices /dev/random and -- * /dev/urandom. /dev/random is suitable for use when very high -- * quality randomness is desired (for example, for key generation or -- * one-time pads), as it will only return a maximum of the number of -- * bits of randomness (as estimated by the random number generator) -- * contained in the entropy pool. -- * -- * The /dev/urandom device does not have this limit, and will return -- * as many bytes as are requested. As more and more random bytes are -- * requested without giving time for the entropy pool to recharge, -- * this will result in random numbers that are merely cryptographically -- * strong. For many applications, however, this is acceptable. -- * -- * Exported interfaces ---- kernel output -- * -------------------------------------- -- * -- * The primary kernel interface is -- * -- * void get_random_bytes(void *buf, int nbytes); -- * -- * This interface will return the requested number of random bytes, -- * and place it in the requested buffer. This is equivalent to a -- * read from /dev/urandom. -- * -- * For less critical applications, there are the functions: -- * -- * u32 get_random_u32() -- * u64 get_random_u64() -- * unsigned int get_random_int() -- * unsigned long get_random_long() -- * -- * These are produced by a cryptographic RNG seeded from get_random_bytes, -- * and so do not deplete the entropy pool as much. These are recommended -- * for most in-kernel operations *if the result is going to be stored in -- * the kernel*. -- * -- * Specifically, the get_random_int() family do not attempt to do -- * "anti-backtracking". If you capture the state of the kernel (e.g. -- * by snapshotting the VM), you can figure out previous get_random_int() -- * return values. But if the value is stored in the kernel anyway, -- * this is not a problem. -- * -- * It *is* safe to expose get_random_int() output to attackers (e.g. as -- * network cookies); given outputs 1..n, it's not feasible to predict -- * outputs 0 or n+1. The only concern is an attacker who breaks into -- * the kernel later; the get_random_int() engine is not reseeded as -- * often as the get_random_bytes() one. -- * -- * get_random_bytes() is needed for keys that need to stay secret after -- * they are erased from the kernel. For example, any key that will -- * be wrapped and stored encrypted. And session encryption keys: we'd -- * like to know that after the session is closed and the keys erased, -- * the plaintext is unrecoverable to someone who recorded the ciphertext. -- * -- * But for network ports/cookies, stack canaries, PRNG seeds, address -- * space layout randomization, session *authentication* keys, or other -- * applications where the sensitive data is stored in the kernel in -- * plaintext for as long as it's sensitive, the get_random_int() family -- * is just fine. -- * -- * Consider ASLR. We want to keep the address space secret from an -- * outside attacker while the process is running, but once the address -- * space is torn down, it's of no use to an attacker any more. And it's -- * stored in kernel data structures as long as it's alive, so worrying -- * about an attacker's ability to extrapolate it from the get_random_int() -- * CRNG is silly. -- * -- * Even some cryptographic keys are safe to generate with get_random_int(). -- * In particular, keys for SipHash are generally fine. Here, knowledge -- * of the key authorizes you to do something to a kernel object (inject -- * packets to a network connection, or flood a hash table), and the -- * key is stored with the object being protected. Once it goes away, -- * we no longer care if anyone knows the key. -- * -- * prandom_u32() -- * ------------- -- * -- * For even weaker applications, see the pseudorandom generator -- * prandom_u32(), prandom_max(), and prandom_bytes(). If the random -- * numbers aren't security-critical at all, these are *far* cheaper. -- * Useful for self-tests, random error simulation, randomized backoffs, -- * and any other application where you trust that nobody is trying to -- * maliciously mess with you by guessing the "random" numbers. -- * -- * Exported interfaces ---- input -- * ============================== -- * -- * The current exported interfaces for gathering environmental noise -- * from the devices are: -- * -- * void add_device_randomness(const void *buf, unsigned int size); -- * void add_input_randomness(unsigned int type, unsigned int code, -- * unsigned int value); -- * void add_interrupt_randomness(int irq, int irq_flags); -- * void add_disk_randomness(struct gendisk *disk); -- * -- * add_device_randomness() is for adding data to the random pool that -- * is likely to differ between two devices (or possibly even per boot). -- * This would be things like MAC addresses or serial numbers, or the -- * read-out of the RTC. This does *not* add any actual entropy to the -- * pool, but it initializes the pool to different values for devices -- * that might otherwise be identical and have very little entropy -- * available to them (particularly common in the embedded world). -- * -- * add_input_randomness() uses the input layer interrupt timing, as well as -- * the event type information from the hardware. -- * -- * add_interrupt_randomness() uses the interrupt timing as random -- * inputs to the entropy pool. Using the cycle counters and the irq source -- * as inputs, it feeds the randomness roughly once a second. -- * -- * add_disk_randomness() uses what amounts to the seek time of block -- * layer request events, on a per-disk_devt basis, as input to the -- * entropy pool. Note that high-speed solid state drives with very low -- * seek times do not make for good sources of entropy, as their seek -- * times are usually fairly consistent. -- * -- * All of these routines try to estimate how many bits of randomness a -- * particular randomness source. They do this by keeping track of the -- * first and second order deltas of the event timings. -- * -- * Ensuring unpredictability at system startup -- * ============================================ -- * -- * When any operating system starts up, it will go through a sequence -- * of actions that are fairly predictable by an adversary, especially -- * if the start-up does not involve interaction with a human operator. -- * This reduces the actual number of bits of unpredictability in the -- * entropy pool below the value in entropy_count. In order to -- * counteract this effect, it helps to carry information in the -- * entropy pool across shut-downs and start-ups. To do this, put the -- * following lines an appropriate script which is run during the boot -- * sequence: -- * -- * echo "Initializing random number generator..." -- * random_seed=/var/run/random-seed -- * # Carry a random seed from start-up to start-up -- * # Load and then save the whole entropy pool -- * if [ -f $random_seed ]; then -- * cat $random_seed >/dev/urandom -- * else -- * touch $random_seed -- * fi -- * chmod 600 $random_seed -- * dd if=/dev/urandom of=$random_seed count=1 bs=512 -- * -- * and the following lines in an appropriate script which is run as -- * the system is shutdown: -- * -- * # Carry a random seed from shut-down to start-up -- * # Save the whole entropy pool -- * echo "Saving random seed..." -- * random_seed=/var/run/random-seed -- * touch $random_seed -- * chmod 600 $random_seed -- * dd if=/dev/urandom of=$random_seed count=1 bs=512 -- * -- * For example, on most modern systems using the System V init -- * scripts, such code fragments would be found in -- * /etc/rc.d/init.d/random. On older Linux systems, the correct script -- * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0. -- * -- * Effectively, these commands cause the contents of the entropy pool -- * to be saved at shut-down time and reloaded into the entropy pool at -- * start-up. (The 'dd' in the addition to the bootup script is to -- * make sure that /etc/random-seed is different for every start-up, -- * even if the system crashes without executing rc.0.) Even with -- * complete knowledge of the start-up activities, predicting the state -- * of the entropy pool requires knowledge of the previous history of -- * the system. -- * -- * Configuring the /dev/random driver under Linux -- * ============================================== -- * -- * The /dev/random driver under Linux uses minor numbers 8 and 9 of -- * the /dev/mem major number (#1). So if your system does not have -- * /dev/random and /dev/urandom created already, they can be created -- * by using the commands: -- * -- * mknod /dev/random c 1 8 -- * mknod /dev/urandom c 1 9 -- * -- * Acknowledgements: -- * ================= -- * -- * Ideas for constructing this random number generator were derived -- * from Pretty Good Privacy's random number generator, and from private -- * discussions with Phil Karn. Colin Plumb provided a faster random -- * number generator, which speed up the mixing function of the entropy -- * pool, taken from PGPfone. Dale Worley has also contributed many -- * useful ideas and suggestions to improve this driver. -- * -- * Any flaws in the design are solely my responsibility, and should -- * not be attributed to the Phil, Colin, or any of authors of PGP. -- * -- * Further background information on this topic may be obtained from -- * RFC 1750, "Randomness Recommendations for Security", by Donald -- * Eastlake, Steve Crocker, and Jeff Schiller. -+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. -+ * -+ * This driver produces cryptographically secure pseudorandom data. It is divided -+ * into roughly six sections, each with a section header: -+ * -+ * - Initialization and readiness waiting. -+ * - Fast key erasure RNG, the "crng". -+ * - Entropy accumulation and extraction routines. -+ * - Entropy collection routines. -+ * - Userspace reader/writer interfaces. -+ * - Sysctl interface. -+ * -+ * The high level overview is that there is one input pool, into which -+ * various pieces of data are hashed. Prior to initialization, some of that -+ * data is then "credited" as having a certain number of bits of entropy. -+ * When enough bits of entropy are available, the hash is finalized and -+ * handed as a key to a stream cipher that expands it indefinitely for -+ * various consumers. This key is periodically refreshed as the various -+ * entropy collectors, described below, add data to the input pool. - */ - - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -@@ -327,7 +43,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -335,1457 +50,1075 @@ - #include - #include - #include -+#include -+#include -+#include - #include --#include -- -+#include - #include --#include - #include - #include - #include - --#define CREATE_TRACE_POINTS --#include -- --/* #define ADD_INTERRUPT_BENCH */ -+/********************************************************************* -+ * -+ * Initialization and readiness waiting. -+ * -+ * Much of the RNG infrastructure is devoted to various dependencies -+ * being able to wait until the RNG has collected enough entropy and -+ * is ready for safe consumption. -+ * -+ *********************************************************************/ - - /* -- * Configuration information -+ * crng_init is protected by base_crng->lock, and only increases -+ * its value (from empty->early->ready). - */ --#define INPUT_POOL_SHIFT 12 --#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5)) --#define OUTPUT_POOL_SHIFT 10 --#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5)) --#define EXTRACT_SIZE 10 -- -+static enum { -+ CRNG_EMPTY = 0, /* Little to no entropy collected */ -+ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */ -+ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */ -+} crng_init __read_mostly = CRNG_EMPTY; -+#define crng_ready() (likely(crng_init >= CRNG_READY)) -+/* Various types of waiters for crng_init->CRNG_READY transition. */ -+static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); -+static struct fasync_struct *fasync; -+static DEFINE_SPINLOCK(random_ready_chain_lock); -+static RAW_NOTIFIER_HEAD(random_ready_chain); - --#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) -+/* Control how we warn userspace. */ -+static struct ratelimit_state urandom_warning = -+ RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE); -+static int ratelimit_disable __read_mostly = -+ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); -+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); -+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); - - /* -- * To allow fractional bits to be tracked, the entropy_count field is -- * denominated in units of 1/8th bits. -+ * Returns whether or not the input pool has been seeded and thus guaranteed -+ * to supply cryptographically secure random numbers. This applies to: the -+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, -+ * ,u64,int,long} family of functions. - * -- * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in -- * credit_entropy_bits() needs to be 64 bits wide. -+ * Returns: true if the input pool has been seeded. -+ * false if the input pool has not been seeded. - */ --#define ENTROPY_SHIFT 3 --#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT) -+bool rng_is_initialized(void) -+{ -+ return crng_ready(); -+} -+EXPORT_SYMBOL(rng_is_initialized); - --/* -- * If the entropy count falls under this number of bits, then we -- * should wake up processes which are selecting or polling on write -- * access to /dev/random. -- */ --static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; -+/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ -+static void try_to_generate_entropy(void); - - /* -- * Originally, we used a primitive polynomial of degree .poolwords -- * over GF(2). The taps for various sizes are defined below. They -- * were chosen to be evenly spaced except for the last tap, which is 1 -- * to get the twisting happening as fast as possible. -- * -- * For the purposes of better mixing, we use the CRC-32 polynomial as -- * well to make a (modified) twisted Generalized Feedback Shift -- * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR -- * generators. ACM Transactions on Modeling and Computer Simulation -- * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted -- * GFSR generators II. ACM Transactions on Modeling and Computer -- * Simulation 4:254-266) -- * -- * Thanks to Colin Plumb for suggesting this. -- * -- * The mixing operation is much less sensitive than the output hash, -- * where we use SHA-1. All that we want of mixing operation is that -- * it be a good non-cryptographic hash; i.e. it not produce collisions -- * when fed "random" data of the sort we expect to see. As long as -- * the pool state differs for different inputs, we have preserved the -- * input entropy and done a good job. The fact that an intelligent -- * attacker can construct inputs that will produce controlled -- * alterations to the pool's state is not important because we don't -- * consider such inputs to contribute any randomness. The only -- * property we need with respect to them is that the attacker can't -- * increase his/her knowledge of the pool's state. Since all -- * additions are reversible (knowing the final state and the input, -- * you can reconstruct the initial state), if an attacker has any -- * uncertainty about the initial state, he/she can only shuffle that -- * uncertainty about, but never cause any collisions (which would -- * decrease the uncertainty). -+ * Wait for the input pool to be seeded and thus guaranteed to supply -+ * cryptographically secure random numbers. This applies to: the /dev/urandom -+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} -+ * family of functions. Using any of these functions without first calling -+ * this function forfeits the guarantee of security. - * -- * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and -- * Videau in their paper, "The Linux Pseudorandom Number Generator -- * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their -- * paper, they point out that we are not using a true Twisted GFSR, -- * since Matsumoto & Kurita used a trinomial feedback polynomial (that -- * is, with only three taps, instead of the six that we are using). -- * As a result, the resulting polynomial is neither primitive nor -- * irreducible, and hence does not have a maximal period over -- * GF(2**32). They suggest a slight change to the generator -- * polynomial which improves the resulting TGFSR polynomial to be -- * irreducible, which we have made here. -+ * Returns: 0 if the input pool has been seeded. -+ * -ERESTARTSYS if the function was interrupted by a signal. - */ --static const struct poolinfo { -- int poolbitshift, poolwords, poolbytes, poolfracbits; --#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5) -- int tap1, tap2, tap3, tap4, tap5; --} poolinfo_table[] = { -- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */ -- /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */ -- { S(128), 104, 76, 51, 25, 1 }, --}; -+int wait_for_random_bytes(void) -+{ -+ while (!crng_ready()) { -+ int ret; -+ -+ try_to_generate_entropy(); -+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); -+ if (ret) -+ return ret > 0 ? 0 : ret; -+ } -+ return 0; -+} -+EXPORT_SYMBOL(wait_for_random_bytes); - - /* -- * Static global variables -+ * Add a callback function that will be invoked when the input -+ * pool is initialised. -+ * -+ * returns: 0 if callback is successfully added -+ * -EALREADY if pool is already initialised (callback not called) - */ --static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); --static struct fasync_struct *fasync; -- --static DEFINE_SPINLOCK(random_ready_list_lock); --static LIST_HEAD(random_ready_list); -+int __cold register_random_ready_notifier(struct notifier_block *nb) -+{ -+ unsigned long flags; -+ int ret = -EALREADY; - --struct crng_state { -- __u32 state[16]; -- unsigned long init_time; -- spinlock_t lock; --}; -+ if (crng_ready()) -+ return ret; - --static struct crng_state primary_crng = { -- .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock), --}; -+ spin_lock_irqsave(&random_ready_chain_lock, flags); -+ if (!crng_ready()) -+ ret = raw_notifier_chain_register(&random_ready_chain, nb); -+ spin_unlock_irqrestore(&random_ready_chain_lock, flags); -+ return ret; -+} - - /* -- * crng_init = 0 --> Uninitialized -- * 1 --> Initialized -- * 2 --> Initialized from input_pool -- * -- * crng_init is protected by primary_crng->lock, and only increases -- * its value (from 0->1->2). -+ * Delete a previously registered readiness callback function. - */ --static int crng_init = 0; --#define crng_ready() (likely(crng_init > 1)) --static int crng_init_cnt = 0; --static unsigned long crng_global_init_time = 0; --#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE) --static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]); --static void _crng_backtrack_protect(struct crng_state *crng, -- __u8 tmp[CHACHA_BLOCK_SIZE], int used); --static void process_random_ready_list(void); --static void _get_random_bytes(void *buf, int nbytes); -- --static struct ratelimit_state unseeded_warning = -- RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); --static struct ratelimit_state urandom_warning = -- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); -+int __cold unregister_random_ready_notifier(struct notifier_block *nb) -+{ -+ unsigned long flags; -+ int ret; -+ -+ spin_lock_irqsave(&random_ready_chain_lock, flags); -+ ret = raw_notifier_chain_unregister(&random_ready_chain, nb); -+ spin_unlock_irqrestore(&random_ready_chain_lock, flags); -+ return ret; -+} - --static int ratelimit_disable __read_mostly; -+static void __cold process_random_ready_list(void) -+{ -+ unsigned long flags; - --module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); --MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); -+ spin_lock_irqsave(&random_ready_chain_lock, flags); -+ raw_notifier_call_chain(&random_ready_chain, 0, NULL); -+ spin_unlock_irqrestore(&random_ready_chain_lock, flags); -+} - --/********************************************************************** -+#define warn_unseeded_randomness() \ -+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ -+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ -+ __func__, (void *)_RET_IP_, crng_init) -+ -+ -+/********************************************************************* - * -- * OS independent entropy store. Here are the functions which handle -- * storing entropy in an entropy pool. -+ * Fast key erasure RNG, the "crng". - * -- **********************************************************************/ -+ * These functions expand entropy from the entropy extractor into -+ * long streams for external consumption using the "fast key erasure" -+ * RNG described at . -+ * -+ * There are a few exported interfaces for use by other drivers: -+ * -+ * void get_random_bytes(void *buf, size_t len) -+ * u32 get_random_u32() -+ * u64 get_random_u64() -+ * unsigned int get_random_int() -+ * unsigned long get_random_long() -+ * -+ * These interfaces will return the requested number of random bytes -+ * into the given buffer or as a return value. This is equivalent to -+ * a read from /dev/urandom. The u32, u64, int, and long family of -+ * functions may be higher performance for one-off random integers, -+ * because they do a bit of buffering and do not invoke reseeding -+ * until the buffer is emptied. -+ * -+ *********************************************************************/ - --struct entropy_store; --struct entropy_store { -- /* read-only data: */ -- const struct poolinfo *poolinfo; -- __u32 *pool; -- const char *name; -+enum { -+ CRNG_RESEED_START_INTERVAL = HZ, -+ CRNG_RESEED_INTERVAL = 60 * HZ -+}; - -- /* read-write data: */ -+static struct { -+ u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long)); -+ unsigned long birth; -+ unsigned long generation; - spinlock_t lock; -- unsigned short add_ptr; -- unsigned short input_rotate; -- int entropy_count; -- unsigned int last_data_init:1; -- __u8 last_data[EXTRACT_SIZE]; -+} base_crng = { -+ .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock) - }; - --static ssize_t extract_entropy(struct entropy_store *r, void *buf, -- size_t nbytes, int min, int rsvd); --static ssize_t _extract_entropy(struct entropy_store *r, void *buf, -- size_t nbytes, int fips); -- --static void crng_reseed(struct crng_state *crng, struct entropy_store *r); --static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy; -- --static struct entropy_store input_pool = { -- .poolinfo = &poolinfo_table[0], -- .name = "input", -- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), -- .pool = input_pool_data -+struct crng { -+ u8 key[CHACHA_KEY_SIZE]; -+ unsigned long generation; -+ local_lock_t lock; - }; - --static __u32 const twist_table[8] = { -- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, -- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; -- --/* -- * This function adds bytes into the entropy "pool". It does not -- * update the entropy estimate. The caller should call -- * credit_entropy_bits if this is appropriate. -- * -- * The pool is stirred with a primitive polynomial of the appropriate -- * degree, and then twisted. We twist by three bits at a time because -- * it's cheap to do so and helps slightly in the expected case where -- * the entropy is concentrated in the low-order bits. -- */ --static void _mix_pool_bytes(struct entropy_store *r, const void *in, -- int nbytes) --{ -- unsigned long i, tap1, tap2, tap3, tap4, tap5; -- int input_rotate; -- int wordmask = r->poolinfo->poolwords - 1; -- const char *bytes = in; -- __u32 w; -- -- tap1 = r->poolinfo->tap1; -- tap2 = r->poolinfo->tap2; -- tap3 = r->poolinfo->tap3; -- tap4 = r->poolinfo->tap4; -- tap5 = r->poolinfo->tap5; -- -- input_rotate = r->input_rotate; -- i = r->add_ptr; -- -- /* mix one byte at a time to simplify size handling and churn faster */ -- while (nbytes--) { -- w = rol32(*bytes++, input_rotate); -- i = (i - 1) & wordmask; -- -- /* XOR in the various taps */ -- w ^= r->pool[i]; -- w ^= r->pool[(i + tap1) & wordmask]; -- w ^= r->pool[(i + tap2) & wordmask]; -- w ^= r->pool[(i + tap3) & wordmask]; -- w ^= r->pool[(i + tap4) & wordmask]; -- w ^= r->pool[(i + tap5) & wordmask]; -- -- /* Mix the result back in with a twist */ -- r->pool[i] = (w >> 3) ^ twist_table[w & 7]; -- -- /* -- * Normally, we add 7 bits of rotation to the pool. -- * At the beginning of the pool, add an extra 7 bits -- * rotation, so that successive passes spread the -- * input bits across the pool evenly. -- */ -- input_rotate = (input_rotate + (i ? 7 : 14)) & 31; -- } -- -- r->input_rotate = input_rotate; -- r->add_ptr = i; --} -+static DEFINE_PER_CPU(struct crng, crngs) = { -+ .generation = ULONG_MAX, -+ .lock = INIT_LOCAL_LOCK(crngs.lock), -+}; - --static void __mix_pool_bytes(struct entropy_store *r, const void *in, -- int nbytes) --{ -- trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); -- _mix_pool_bytes(r, in, nbytes); --} -+/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */ -+static void extract_entropy(void *buf, size_t len); - --static void mix_pool_bytes(struct entropy_store *r, const void *in, -- int nbytes) -+/* This extracts a new crng key from the input pool. */ -+static void crng_reseed(void) - { - unsigned long flags; -+ unsigned long next_gen; -+ u8 key[CHACHA_KEY_SIZE]; - -- trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); -- spin_lock_irqsave(&r->lock, flags); -- _mix_pool_bytes(r, in, nbytes); -- spin_unlock_irqrestore(&r->lock, flags); --} -+ extract_entropy(key, sizeof(key)); - --struct fast_pool { -- __u32 pool[4]; -- unsigned long last; -- unsigned short reg_idx; -- unsigned char count; --}; -+ /* -+ * We copy the new key into the base_crng, overwriting the old one, -+ * and update the generation counter. We avoid hitting ULONG_MAX, -+ * because the per-cpu crngs are initialized to ULONG_MAX, so this -+ * forces new CPUs that come online to always initialize. -+ */ -+ spin_lock_irqsave(&base_crng.lock, flags); -+ memcpy(base_crng.key, key, sizeof(base_crng.key)); -+ next_gen = base_crng.generation + 1; -+ if (next_gen == ULONG_MAX) -+ ++next_gen; -+ WRITE_ONCE(base_crng.generation, next_gen); -+ WRITE_ONCE(base_crng.birth, jiffies); -+ if (!crng_ready()) -+ crng_init = CRNG_READY; -+ spin_unlock_irqrestore(&base_crng.lock, flags); -+ memzero_explicit(key, sizeof(key)); -+} - - /* -- * This is a fast mixing routine used by the interrupt randomness -- * collector. It's hardcoded for an 128 bit pool and assumes that any -- * locks that might be needed are taken by the caller. -+ * This generates a ChaCha block using the provided key, and then -+ * immediately overwites that key with half the block. It returns -+ * the resultant ChaCha state to the user, along with the second -+ * half of the block containing 32 bytes of random data that may -+ * be used; random_data_len may not be greater than 32. -+ * -+ * The returned ChaCha state contains within it a copy of the old -+ * key value, at index 4, so the state should always be zeroed out -+ * immediately after using in order to maintain forward secrecy. -+ * If the state cannot be erased in a timely manner, then it is -+ * safer to set the random_data parameter to &chacha_state[4] so -+ * that this function overwrites it before returning. - */ --static void fast_mix(struct fast_pool *f) -+static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], -+ u32 chacha_state[CHACHA_STATE_WORDS], -+ u8 *random_data, size_t random_data_len) - { -- __u32 a = f->pool[0], b = f->pool[1]; -- __u32 c = f->pool[2], d = f->pool[3]; -- -- a += b; c += d; -- b = rol32(b, 6); d = rol32(d, 27); -- d ^= a; b ^= c; -- -- a += b; c += d; -- b = rol32(b, 16); d = rol32(d, 14); -- d ^= a; b ^= c; -+ u8 first_block[CHACHA_BLOCK_SIZE]; - -- a += b; c += d; -- b = rol32(b, 6); d = rol32(d, 27); -- d ^= a; b ^= c; -+ BUG_ON(random_data_len > 32); - -- a += b; c += d; -- b = rol32(b, 16); d = rol32(d, 14); -- d ^= a; b ^= c; -+ chacha_init_consts(chacha_state); -+ memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); -+ memset(&chacha_state[12], 0, sizeof(u32) * 4); -+ chacha20_block(chacha_state, first_block); - -- f->pool[0] = a; f->pool[1] = b; -- f->pool[2] = c; f->pool[3] = d; -- f->count++; -+ memcpy(key, first_block, CHACHA_KEY_SIZE); -+ memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); -+ memzero_explicit(first_block, sizeof(first_block)); - } - --static void process_random_ready_list(void) -+/* -+ * Return whether the crng seed is considered to be sufficiently old -+ * that a reseeding is needed. This happens if the last reseeding -+ * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval -+ * proportional to the uptime. -+ */ -+static bool crng_has_old_seed(void) - { -- unsigned long flags; -- struct random_ready_callback *rdy, *tmp; -- -- spin_lock_irqsave(&random_ready_list_lock, flags); -- list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { -- struct module *owner = rdy->owner; -- -- list_del_init(&rdy->list); -- rdy->func(rdy); -- module_put(owner); -+ static bool early_boot = true; -+ unsigned long interval = CRNG_RESEED_INTERVAL; -+ -+ if (unlikely(READ_ONCE(early_boot))) { -+ time64_t uptime = ktime_get_seconds(); -+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) -+ WRITE_ONCE(early_boot, false); -+ else -+ interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL, -+ (unsigned int)uptime / 2 * HZ); - } -- spin_unlock_irqrestore(&random_ready_list_lock, flags); -+ return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval); - } - - /* -- * Credit (or debit) the entropy store with n bits of entropy. -- * Use credit_entropy_bits_safe() if the value comes from userspace -- * or otherwise should be checked for extreme values. -+ * This function returns a ChaCha state that you may use for generating -+ * random data. It also returns up to 32 bytes on its own of random data -+ * that may be used; random_data_len may not be greater than 32. - */ --static void credit_entropy_bits(struct entropy_store *r, int nbits) -+static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], -+ u8 *random_data, size_t random_data_len) - { -- int entropy_count, orig; -- const int pool_size = r->poolinfo->poolfracbits; -- int nfrac = nbits << ENTROPY_SHIFT; -+ unsigned long flags; -+ struct crng *crng; - -- if (!nbits) -- return; -+ BUG_ON(random_data_len > 32); - --retry: -- entropy_count = orig = READ_ONCE(r->entropy_count); -- if (nfrac < 0) { -- /* Debit */ -- entropy_count += nfrac; -- } else { -- /* -- * Credit: we have to account for the possibility of -- * overwriting already present entropy. Even in the -- * ideal case of pure Shannon entropy, new contributions -- * approach the full value asymptotically: -- * -- * entropy <- entropy + (pool_size - entropy) * -- * (1 - exp(-add_entropy/pool_size)) -- * -- * For add_entropy <= pool_size/2 then -- * (1 - exp(-add_entropy/pool_size)) >= -- * (add_entropy/pool_size)*0.7869... -- * so we can approximate the exponential with -- * 3/4*add_entropy/pool_size and still be on the -- * safe side by adding at most pool_size/2 at a time. -- * -- * The use of pool_size-2 in the while statement is to -- * prevent rounding artifacts from making the loop -- * arbitrarily long; this limits the loop to log2(pool_size)*2 -- * turns no matter how large nbits is. -- */ -- int pnfrac = nfrac; -- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2; -- /* The +2 corresponds to the /4 in the denominator */ -- -- do { -- unsigned int anfrac = min(pnfrac, pool_size/2); -- unsigned int add = -- ((pool_size - entropy_count)*anfrac*3) >> s; -- -- entropy_count += add; -- pnfrac -= anfrac; -- } while (unlikely(entropy_count < pool_size-2 && pnfrac)); -+ /* -+ * For the fast path, we check whether we're ready, unlocked first, and -+ * then re-check once locked later. In the case where we're really not -+ * ready, we do fast key erasure with the base_crng directly, extracting -+ * when crng_init is CRNG_EMPTY. -+ */ -+ if (!crng_ready()) { -+ bool ready; -+ -+ spin_lock_irqsave(&base_crng.lock, flags); -+ ready = crng_ready(); -+ if (!ready) { -+ if (crng_init == CRNG_EMPTY) -+ extract_entropy(base_crng.key, sizeof(base_crng.key)); -+ crng_fast_key_erasure(base_crng.key, chacha_state, -+ random_data, random_data_len); -+ } -+ spin_unlock_irqrestore(&base_crng.lock, flags); -+ if (!ready) -+ return; - } - -- if (WARN_ON(entropy_count < 0)) { -- pr_warn("negative entropy/overflow: pool %s count %d\n", -- r->name, entropy_count); -- entropy_count = 0; -- } else if (entropy_count > pool_size) -- entropy_count = pool_size; -- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) -- goto retry; -- -- trace_credit_entropy_bits(r->name, nbits, -- entropy_count >> ENTROPY_SHIFT, _RET_IP_); -+ /* -+ * If the base_crng is old enough, we reseed, which in turn bumps the -+ * generation counter that we check below. -+ */ -+ if (unlikely(crng_has_old_seed())) -+ crng_reseed(); - -- if (r == &input_pool) { -- int entropy_bits = entropy_count >> ENTROPY_SHIFT; -+ local_lock_irqsave(&crngs.lock, flags); -+ crng = raw_cpu_ptr(&crngs); - -- if (crng_init < 2 && entropy_bits >= 128) -- crng_reseed(&primary_crng, r); -+ /* -+ * If our per-cpu crng is older than the base_crng, then it means -+ * somebody reseeded the base_crng. In that case, we do fast key -+ * erasure on the base_crng, and use its output as the new key -+ * for our per-cpu crng. This brings us up to date with base_crng. -+ */ -+ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) { -+ spin_lock(&base_crng.lock); -+ crng_fast_key_erasure(base_crng.key, chacha_state, -+ crng->key, sizeof(crng->key)); -+ crng->generation = base_crng.generation; -+ spin_unlock(&base_crng.lock); - } -+ -+ /* -+ * Finally, when we've made it this far, our per-cpu crng has an up -+ * to date key, and we can do fast key erasure with it to produce -+ * some random data and a ChaCha state for the caller. All other -+ * branches of this function are "unlikely", so most of the time we -+ * should wind up here immediately. -+ */ -+ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); -+ local_unlock_irqrestore(&crngs.lock, flags); - } - --static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) -+static void _get_random_bytes(void *buf, size_t len) - { -- const int nbits_max = r->poolinfo->poolwords * 32; -+ u32 chacha_state[CHACHA_STATE_WORDS]; -+ u8 tmp[CHACHA_BLOCK_SIZE]; -+ size_t first_block_len; - -- if (nbits < 0) -- return -EINVAL; -- -- /* Cap the value to avoid overflows */ -- nbits = min(nbits, nbits_max); -+ if (!len) -+ return; - -- credit_entropy_bits(r, nbits); -- return 0; --} -+ first_block_len = min_t(size_t, 32, len); -+ crng_make_state(chacha_state, buf, first_block_len); -+ len -= first_block_len; -+ buf += first_block_len; - --/********************************************************************* -- * -- * CRNG using CHACHA20 -- * -- *********************************************************************/ -+ while (len) { -+ if (len < CHACHA_BLOCK_SIZE) { -+ chacha20_block(chacha_state, tmp); -+ memcpy(buf, tmp, len); -+ memzero_explicit(tmp, sizeof(tmp)); -+ break; -+ } - --#define CRNG_RESEED_INTERVAL (300*HZ) -+ chacha20_block(chacha_state, buf); -+ if (unlikely(chacha_state[12] == 0)) -+ ++chacha_state[13]; -+ len -= CHACHA_BLOCK_SIZE; -+ buf += CHACHA_BLOCK_SIZE; -+ } - --static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); -+ memzero_explicit(chacha_state, sizeof(chacha_state)); -+} - --#ifdef CONFIG_NUMA - /* -- * Hack to deal with crazy userspace progams when they are all trying -- * to access /dev/urandom in parallel. The programs are almost -- * certainly doing something terribly wrong, but we'll work around -- * their brain damage. -+ * This function is the exported kernel interface. It returns some -+ * number of good random numbers, suitable for key generation, seeding -+ * TCP sequence numbers, etc. It does not rely on the hardware random -+ * number generator. For random bytes direct from the hardware RNG -+ * (when available), use get_random_bytes_arch(). In order to ensure -+ * that the randomness provided by this function is okay, the function -+ * wait_for_random_bytes() should be called and return 0 at least once -+ * at any point prior. - */ --static struct crng_state **crng_node_pool __read_mostly; --#endif -- --static void invalidate_batched_entropy(void); --static void numa_crng_init(void); -- --static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); --static int __init parse_trust_cpu(char *arg) -+void get_random_bytes(void *buf, size_t len) - { -- return kstrtobool(arg, &trust_cpu); -+ warn_unseeded_randomness(); -+ _get_random_bytes(buf, len); - } --early_param("random.trust_cpu", parse_trust_cpu); -+EXPORT_SYMBOL(get_random_bytes); - --static bool crng_init_try_arch(struct crng_state *crng) -+static ssize_t get_random_bytes_user(struct iov_iter *iter) - { -- int i; -- bool arch_init = true; -- unsigned long rv; -- -- for (i = 4; i < 16; i++) { -- if (!arch_get_random_seed_long(&rv) && -- !arch_get_random_long(&rv)) { -- rv = random_get_entropy(); -- arch_init = false; -- } -- crng->state[i] ^= rv; -- } -+ u32 chacha_state[CHACHA_STATE_WORDS]; -+ u8 block[CHACHA_BLOCK_SIZE]; -+ size_t ret = 0, copied; - -- return arch_init; --} -+ if (unlikely(!iov_iter_count(iter))) -+ return 0; - --static bool __init crng_init_try_arch_early(struct crng_state *crng) --{ -- int i; -- bool arch_init = true; -- unsigned long rv; -- -- for (i = 4; i < 16; i++) { -- if (!arch_get_random_seed_long_early(&rv) && -- !arch_get_random_long_early(&rv)) { -- rv = random_get_entropy(); -- arch_init = false; -- } -- crng->state[i] ^= rv; -+ /* -+ * Immediately overwrite the ChaCha key at index 4 with random -+ * bytes, in case userspace causes copy_to_iter() below to sleep -+ * forever, so that we still retain forward secrecy in that case. -+ */ -+ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); -+ /* -+ * However, if we're doing a read of len <= 32, we don't need to -+ * use chacha_state after, so we can simply return those bytes to -+ * the user directly. -+ */ -+ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { -+ ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); -+ goto out_zero_chacha; - } - -- return arch_init; --} -+ for (;;) { -+ chacha20_block(chacha_state, block); -+ if (unlikely(chacha_state[12] == 0)) -+ ++chacha_state[13]; - --static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) --{ -- chacha_init_consts(crng->state); -- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); -- crng_init_try_arch(crng); -- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; --} -+ copied = copy_to_iter(block, sizeof(block), iter); -+ ret += copied; -+ if (!iov_iter_count(iter) || copied != sizeof(block)) -+ break; - --static void __init crng_initialize_primary(struct crng_state *crng) --{ -- chacha_init_consts(crng->state); -- _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); -- if (crng_init_try_arch_early(crng) && trust_cpu) { -- invalidate_batched_entropy(); -- numa_crng_init(); -- crng_init = 2; -- pr_notice("crng done (trusting CPU's manufacturer)\n"); -+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); -+ if (ret % PAGE_SIZE == 0) { -+ if (signal_pending(current)) -+ break; -+ cond_resched(); -+ } - } -- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; --} - --#ifdef CONFIG_NUMA --static void do_numa_crng_init(struct work_struct *work) --{ -- int i; -- struct crng_state *crng; -- struct crng_state **pool; -- -- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); -- for_each_online_node(i) { -- crng = kmalloc_node(sizeof(struct crng_state), -- GFP_KERNEL | __GFP_NOFAIL, i); -- spin_lock_init(&crng->lock); -- crng_initialize_secondary(crng); -- pool[i] = crng; -- } -- mb(); -- if (cmpxchg(&crng_node_pool, NULL, pool)) { -- for_each_node(i) -- kfree(pool[i]); -- kfree(pool); -- } -+ memzero_explicit(block, sizeof(block)); -+out_zero_chacha: -+ memzero_explicit(chacha_state, sizeof(chacha_state)); -+ return ret ? ret : -EFAULT; - } - --static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init); -+/* -+ * Batched entropy returns random integers. The quality of the random -+ * number is good as /dev/urandom. In order to ensure that the randomness -+ * provided by this function is okay, the function wait_for_random_bytes() -+ * should be called and return 0 at least once at any point prior. -+ */ - --static void numa_crng_init(void) -+#define DEFINE_BATCHED_ENTROPY(type) \ -+struct batch_ ##type { \ -+ /* \ -+ * We make this 1.5x a ChaCha block, so that we get the \ -+ * remaining 32 bytes from fast key erasure, plus one full \ -+ * block from the detached ChaCha state. We can increase \ -+ * the size of this later if needed so long as we keep the \ -+ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \ -+ */ \ -+ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \ -+ local_lock_t lock; \ -+ unsigned long generation; \ -+ unsigned int position; \ -+}; \ -+ \ -+static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \ -+ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \ -+ .position = UINT_MAX \ -+}; \ -+ \ -+type get_random_ ##type(void) \ -+{ \ -+ type ret; \ -+ unsigned long flags; \ -+ struct batch_ ##type *batch; \ -+ unsigned long next_gen; \ -+ \ -+ warn_unseeded_randomness(); \ -+ \ -+ if (!crng_ready()) { \ -+ _get_random_bytes(&ret, sizeof(ret)); \ -+ return ret; \ -+ } \ -+ \ -+ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \ -+ batch = raw_cpu_ptr(&batched_entropy_##type); \ -+ \ -+ next_gen = READ_ONCE(base_crng.generation); \ -+ if (batch->position >= ARRAY_SIZE(batch->entropy) || \ -+ next_gen != batch->generation) { \ -+ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \ -+ batch->position = 0; \ -+ batch->generation = next_gen; \ -+ } \ -+ \ -+ ret = batch->entropy[batch->position]; \ -+ batch->entropy[batch->position] = 0; \ -+ ++batch->position; \ -+ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \ -+ return ret; \ -+} \ -+EXPORT_SYMBOL(get_random_ ##type); -+ -+DEFINE_BATCHED_ENTROPY(u64) -+DEFINE_BATCHED_ENTROPY(u32) -+ -+#ifdef CONFIG_SMP -+/* -+ * This function is called when the CPU is coming up, with entry -+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. -+ */ -+int __cold random_prepare_cpu(unsigned int cpu) - { -- schedule_work(&numa_crng_init_work); -+ /* -+ * When the cpu comes back online, immediately invalidate both -+ * the per-cpu crng and all batches, so that we serve fresh -+ * randomness. -+ */ -+ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; -+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; -+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; -+ return 0; - } --#else --static void numa_crng_init(void) {} - #endif - - /* -- * crng_fast_load() can be called by code in the interrupt service -- * path. So we can't afford to dilly-dally. -+ * This function will use the architecture-specific hardware random -+ * number generator if it is available. It is not recommended for -+ * use. Use get_random_bytes() instead. It returns the number of -+ * bytes filled in. - */ --static int crng_fast_load(const char *cp, size_t len) -+size_t __must_check get_random_bytes_arch(void *buf, size_t len) - { -- unsigned long flags; -- char *p; -- -- if (!spin_trylock_irqsave(&primary_crng.lock, flags)) -- return 0; -- if (crng_init != 0) { -- spin_unlock_irqrestore(&primary_crng.lock, flags); -- return 0; -- } -- p = (unsigned char *) &primary_crng.state[4]; -- while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { -- p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; -- cp++; crng_init_cnt++; len--; -- } -- spin_unlock_irqrestore(&primary_crng.lock, flags); -- if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { -- invalidate_batched_entropy(); -- crng_init = 1; -- pr_notice("fast init done\n"); -- } -- return 1; --} -- --/* -- * crng_slow_load() is called by add_device_randomness, which has two -- * attributes. (1) We can't trust the buffer passed to it is -- * guaranteed to be unpredictable (so it might not have any entropy at -- * all), and (2) it doesn't have the performance constraints of -- * crng_fast_load(). -- * -- * So we do something more comprehensive which is guaranteed to touch -- * all of the primary_crng's state, and which uses a LFSR with a -- * period of 255 as part of the mixing algorithm. Finally, we do -- * *not* advance crng_init_cnt since buffer we may get may be something -- * like a fixed DMI table (for example), which might very well be -- * unique to the machine, but is otherwise unvarying. -- */ --static int crng_slow_load(const char *cp, size_t len) --{ -- unsigned long flags; -- static unsigned char lfsr = 1; -- unsigned char tmp; -- unsigned i, max = CHACHA_KEY_SIZE; -- const char * src_buf = cp; -- char * dest_buf = (char *) &primary_crng.state[4]; -- -- if (!spin_trylock_irqsave(&primary_crng.lock, flags)) -- return 0; -- if (crng_init != 0) { -- spin_unlock_irqrestore(&primary_crng.lock, flags); -- return 0; -- } -- if (len > max) -- max = len; -- -- for (i = 0; i < max ; i++) { -- tmp = lfsr; -- lfsr >>= 1; -- if (tmp & 1) -- lfsr ^= 0xE1; -- tmp = dest_buf[i % CHACHA_KEY_SIZE]; -- dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; -- lfsr += (tmp << 3) | (tmp >> 5); -- } -- spin_unlock_irqrestore(&primary_crng.lock, flags); -- return 1; --} -- --static void crng_reseed(struct crng_state *crng, struct entropy_store *r) --{ -- unsigned long flags; -- int i, num; -- union { -- __u8 block[CHACHA_BLOCK_SIZE]; -- __u32 key[8]; -- } buf; -- -- if (r) { -- num = extract_entropy(r, &buf, 32, 16, 0); -- if (num == 0) -- return; -- } else { -- _extract_crng(&primary_crng, buf.block); -- _crng_backtrack_protect(&primary_crng, buf.block, -- CHACHA_KEY_SIZE); -- } -- spin_lock_irqsave(&crng->lock, flags); -- for (i = 0; i < 8; i++) { -- unsigned long rv; -- if (!arch_get_random_seed_long(&rv) && -- !arch_get_random_long(&rv)) -- rv = random_get_entropy(); -- crng->state[i+4] ^= buf.key[i] ^ rv; -- } -- memzero_explicit(&buf, sizeof(buf)); -- crng->init_time = jiffies; -- spin_unlock_irqrestore(&crng->lock, flags); -- if (crng == &primary_crng && crng_init < 2) { -- invalidate_batched_entropy(); -- numa_crng_init(); -- crng_init = 2; -- process_random_ready_list(); -- wake_up_interruptible(&crng_init_wait); -- kill_fasync(&fasync, SIGIO, POLL_IN); -- pr_notice("crng init done\n"); -- if (unseeded_warning.missed) { -- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", -- unseeded_warning.missed); -- unseeded_warning.missed = 0; -- } -- if (urandom_warning.missed) { -- pr_notice("%d urandom warning(s) missed due to ratelimiting\n", -- urandom_warning.missed); -- urandom_warning.missed = 0; -- } -- } --} -- --static void _extract_crng(struct crng_state *crng, -- __u8 out[CHACHA_BLOCK_SIZE]) --{ -- unsigned long v, flags; -- -- if (crng_ready() && -- (time_after(crng_global_init_time, crng->init_time) || -- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) -- crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); -- spin_lock_irqsave(&crng->lock, flags); -- if (arch_get_random_long(&v)) -- crng->state[14] ^= v; -- chacha20_block(&crng->state[0], out); -- if (crng->state[12] == 0) -- crng->state[13]++; -- spin_unlock_irqrestore(&crng->lock, flags); --} -- --static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE]) --{ -- struct crng_state *crng = NULL; -- --#ifdef CONFIG_NUMA -- if (crng_node_pool) -- crng = crng_node_pool[numa_node_id()]; -- if (crng == NULL) --#endif -- crng = &primary_crng; -- _extract_crng(crng, out); --} -- --/* -- * Use the leftover bytes from the CRNG block output (if there is -- * enough) to mutate the CRNG key to provide backtracking protection. -- */ --static void _crng_backtrack_protect(struct crng_state *crng, -- __u8 tmp[CHACHA_BLOCK_SIZE], int used) --{ -- unsigned long flags; -- __u32 *s, *d; -- int i; -- -- used = round_up(used, sizeof(__u32)); -- if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) { -- extract_crng(tmp); -- used = 0; -- } -- spin_lock_irqsave(&crng->lock, flags); -- s = (__u32 *) &tmp[used]; -- d = &crng->state[4]; -- for (i=0; i < 8; i++) -- *d++ ^= *s++; -- spin_unlock_irqrestore(&crng->lock, flags); --} -- --static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used) --{ -- struct crng_state *crng = NULL; -- --#ifdef CONFIG_NUMA -- if (crng_node_pool) -- crng = crng_node_pool[numa_node_id()]; -- if (crng == NULL) --#endif -- crng = &primary_crng; -- _crng_backtrack_protect(crng, tmp, used); --} -+ size_t left = len; -+ u8 *p = buf; - --static ssize_t extract_crng_user(void __user *buf, size_t nbytes) --{ -- ssize_t ret = 0, i = CHACHA_BLOCK_SIZE; -- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); -- int large_request = (nbytes > 256); -- -- while (nbytes) { -- if (large_request && need_resched()) { -- if (signal_pending(current)) { -- if (ret == 0) -- ret = -ERESTARTSYS; -- break; -- } -- schedule(); -- } -+ while (left) { -+ unsigned long v; -+ size_t block_len = min_t(size_t, left, sizeof(unsigned long)); - -- extract_crng(tmp); -- i = min_t(int, nbytes, CHACHA_BLOCK_SIZE); -- if (copy_to_user(buf, tmp, i)) { -- ret = -EFAULT; -+ if (!arch_get_random_long(&v)) - break; -- } - -- nbytes -= i; -- buf += i; -- ret += i; -+ memcpy(p, &v, block_len); -+ p += block_len; -+ left -= block_len; - } -- crng_backtrack_protect(tmp, i); -- -- /* Wipe data just written to memory */ -- memzero_explicit(tmp, sizeof(tmp)); - -- return ret; -+ return len - left; - } -+EXPORT_SYMBOL(get_random_bytes_arch); - - --/********************************************************************* -+/********************************************************************** - * -- * Entropy input management -+ * Entropy accumulation and extraction routines. - * -- *********************************************************************/ -+ * Callers may add entropy via: -+ * -+ * static void mix_pool_bytes(const void *buf, size_t len) -+ * -+ * After which, if added entropy should be credited: -+ * -+ * static void credit_init_bits(size_t bits) -+ * -+ * Finally, extract entropy via: -+ * -+ * static void extract_entropy(void *buf, size_t len) -+ * -+ **********************************************************************/ - --/* There is one of these per entropy source */ --struct timer_rand_state { -- cycles_t last_time; -- long last_delta, last_delta2; -+enum { -+ POOL_BITS = BLAKE2S_HASH_SIZE * 8, -+ POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */ -+ POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */ -+}; -+ -+static struct { -+ struct blake2s_state hash; -+ spinlock_t lock; -+ unsigned int init_bits; -+} input_pool = { -+ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), -+ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, -+ BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 }, -+ .hash.outlen = BLAKE2S_HASH_SIZE, -+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), - }; - --#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, }; -+static void _mix_pool_bytes(const void *buf, size_t len) -+{ -+ blake2s_update(&input_pool.hash, buf, len); -+} - - /* -- * Add device- or boot-specific data to the input pool to help -- * initialize it. -- * -- * None of this adds any entropy; it is meant to avoid the problem of -- * the entropy pool having similar initial state across largely -- * identical devices. -+ * This function adds bytes into the input pool. It does not -+ * update the initialization bit counter; the caller should call -+ * credit_init_bits if this is appropriate. - */ --void add_device_randomness(const void *buf, unsigned int size) -+static void mix_pool_bytes(const void *buf, size_t len) - { -- unsigned long time = random_get_entropy() ^ jiffies; - unsigned long flags; - -- if (!crng_ready() && size) -- crng_slow_load(buf, size); -- -- trace_add_device_randomness(size, _RET_IP_); - spin_lock_irqsave(&input_pool.lock, flags); -- _mix_pool_bytes(&input_pool, buf, size); -- _mix_pool_bytes(&input_pool, &time, sizeof(time)); -+ _mix_pool_bytes(buf, len); - spin_unlock_irqrestore(&input_pool.lock, flags); - } --EXPORT_SYMBOL(add_device_randomness); -- --static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE; - - /* -- * This function adds entropy to the entropy "pool" by using timing -- * delays. It uses the timer_rand_state structure to make an estimate -- * of how many bits of entropy this call has added to the pool. -- * -- * The number "num" is also added to the pool - it should somehow describe -- * the type of event which just happened. This is currently 0-255 for -- * keyboard scan codes, and 256 upwards for interrupts. -- * -+ * This is an HKDF-like construction for using the hashed collected entropy -+ * as a PRF key, that's then expanded block-by-block. - */ --static void add_timer_randomness(struct timer_rand_state *state, unsigned num) -+static void extract_entropy(void *buf, size_t len) - { -- struct entropy_store *r; -+ unsigned long flags; -+ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; - struct { -- long jiffies; -- unsigned cycles; -- unsigned num; -- } sample; -- long delta, delta2, delta3; -- -- sample.jiffies = jiffies; -- sample.cycles = random_get_entropy(); -- sample.num = num; -- r = &input_pool; -- mix_pool_bytes(r, &sample, sizeof(sample)); -+ unsigned long rdseed[32 / sizeof(long)]; -+ size_t counter; -+ } block; -+ size_t i; -+ -+ for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) { -+ if (!arch_get_random_seed_long(&block.rdseed[i]) && -+ !arch_get_random_long(&block.rdseed[i])) -+ block.rdseed[i] = random_get_entropy(); -+ } - -- /* -- * Calculate number of bits of randomness we probably added. -- * We take into account the first, second and third-order deltas -- * in order to make our estimate. -- */ -- delta = sample.jiffies - READ_ONCE(state->last_time); -- WRITE_ONCE(state->last_time, sample.jiffies); -+ spin_lock_irqsave(&input_pool.lock, flags); - -- delta2 = delta - READ_ONCE(state->last_delta); -- WRITE_ONCE(state->last_delta, delta); -+ /* seed = HASHPRF(last_key, entropy_input) */ -+ blake2s_final(&input_pool.hash, seed); - -- delta3 = delta2 - READ_ONCE(state->last_delta2); -- WRITE_ONCE(state->last_delta2, delta2); -+ /* next_key = HASHPRF(seed, RDSEED || 0) */ -+ block.counter = 0; -+ blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); -+ blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); - -- if (delta < 0) -- delta = -delta; -- if (delta2 < 0) -- delta2 = -delta2; -- if (delta3 < 0) -- delta3 = -delta3; -- if (delta > delta2) -- delta = delta2; -- if (delta > delta3) -- delta = delta3; -+ spin_unlock_irqrestore(&input_pool.lock, flags); -+ memzero_explicit(next_key, sizeof(next_key)); -+ -+ while (len) { -+ i = min_t(size_t, len, BLAKE2S_HASH_SIZE); -+ /* output = HASHPRF(seed, RDSEED || ++counter) */ -+ ++block.counter; -+ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); -+ len -= i; -+ buf += i; -+ } - -- /* -- * delta is now minimum absolute delta. -- * Round down by 1 bit on general principles, -- * and limit entropy estimate to 12 bits. -- */ -- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); -+ memzero_explicit(seed, sizeof(seed)); -+ memzero_explicit(&block, sizeof(block)); - } - --void add_input_randomness(unsigned int type, unsigned int code, -- unsigned int value) -+#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits) -+ -+static void __cold _credit_init_bits(size_t bits) - { -- static unsigned char last_value; -+ unsigned int new, orig, add; -+ unsigned long flags; - -- /* ignore autorepeat and the like */ -- if (value == last_value) -+ if (!bits) - return; - -- last_value = value; -- add_timer_randomness(&input_timer_state, -- (type << 4) ^ code ^ (code >> 4) ^ value); -- trace_add_input_randomness(ENTROPY_BITS(&input_pool)); --} --EXPORT_SYMBOL_GPL(add_input_randomness); -- --static DEFINE_PER_CPU(struct fast_pool, irq_randomness); -- --#ifdef ADD_INTERRUPT_BENCH --static unsigned long avg_cycles, avg_deviation; -+ add = min_t(size_t, bits, POOL_BITS); - --#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */ --#define FIXED_1_2 (1 << (AVG_SHIFT-1)) -- --static void add_interrupt_bench(cycles_t start) --{ -- long delta = random_get_entropy() - start; -- -- /* Use a weighted moving average */ -- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT); -- avg_cycles += delta; -- /* And average deviation */ -- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT); -- avg_deviation += delta; --} --#else --#define add_interrupt_bench(x) --#endif -- --static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) --{ -- __u32 *ptr = (__u32 *) regs; -- unsigned int idx; -- -- if (regs == NULL) -- return 0; -- idx = READ_ONCE(f->reg_idx); -- if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) -- idx = 0; -- ptr += idx++; -- WRITE_ONCE(f->reg_idx, idx); -- return *ptr; --} -+ do { -+ orig = READ_ONCE(input_pool.init_bits); -+ new = min_t(unsigned int, POOL_BITS, orig + add); -+ } while (cmpxchg(&input_pool.init_bits, orig, new) != orig); - --void add_interrupt_randomness(int irq, int irq_flags) --{ -- struct entropy_store *r; -- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); -- struct pt_regs *regs = get_irq_regs(); -- unsigned long now = jiffies; -- cycles_t cycles = random_get_entropy(); -- __u32 c_high, j_high; -- __u64 ip; -- -- if (cycles == 0) -- cycles = get_reg(fast_pool, regs); -- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; -- j_high = (sizeof(now) > 4) ? now >> 32 : 0; -- fast_pool->pool[0] ^= cycles ^ j_high ^ irq; -- fast_pool->pool[1] ^= now ^ c_high; -- ip = regs ? instruction_pointer(regs) : _RET_IP_; -- fast_pool->pool[2] ^= ip; -- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : -- get_reg(fast_pool, regs); -- -- fast_mix(fast_pool); -- add_interrupt_bench(cycles); -- -- if (unlikely(crng_init == 0)) { -- if ((fast_pool->count >= 64) && -- crng_fast_load((char *) fast_pool->pool, -- sizeof(fast_pool->pool))) { -- fast_pool->count = 0; -- fast_pool->last = now; -+ if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { -+ crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ -+ process_random_ready_list(); -+ wake_up_interruptible(&crng_init_wait); -+ kill_fasync(&fasync, SIGIO, POLL_IN); -+ pr_notice("crng init done\n"); -+ if (urandom_warning.missed) -+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n", -+ urandom_warning.missed); -+ } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { -+ spin_lock_irqsave(&base_crng.lock, flags); -+ /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ -+ if (crng_init == CRNG_EMPTY) { -+ extract_entropy(base_crng.key, sizeof(base_crng.key)); -+ crng_init = CRNG_EARLY; - } -- return; -+ spin_unlock_irqrestore(&base_crng.lock, flags); - } -- -- if ((fast_pool->count < 64) && -- !time_after(now, fast_pool->last + HZ)) -- return; -- -- r = &input_pool; -- if (!spin_trylock(&r->lock)) -- return; -- -- fast_pool->last = now; -- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); -- spin_unlock(&r->lock); -- -- fast_pool->count = 0; -- -- /* award one bit for the contents of the fast pool */ -- credit_entropy_bits(r, 1); - } --EXPORT_SYMBOL_GPL(add_interrupt_randomness); - --#ifdef CONFIG_BLOCK --void add_disk_randomness(struct gendisk *disk) --{ -- if (!disk || !disk->random) -- return; -- /* first major is 1, so we get >= 0x200 here */ -- add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); -- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); --} --EXPORT_SYMBOL_GPL(add_disk_randomness); --#endif - --/********************************************************************* -+/********************************************************************** - * -- * Entropy extraction routines -+ * Entropy collection routines. - * -- *********************************************************************/ -+ * The following exported functions are used for pushing entropy into -+ * the above entropy accumulation routines: -+ * -+ * void add_device_randomness(const void *buf, size_t len); -+ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); -+ * void add_bootloader_randomness(const void *buf, size_t len); -+ * void add_interrupt_randomness(int irq); -+ * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); -+ * void add_disk_randomness(struct gendisk *disk); -+ * -+ * add_device_randomness() adds data to the input pool that -+ * is likely to differ between two devices (or possibly even per boot). -+ * This would be things like MAC addresses or serial numbers, or the -+ * read-out of the RTC. This does *not* credit any actual entropy to -+ * the pool, but it initializes the pool to different values for devices -+ * that might otherwise be identical and have very little entropy -+ * available to them (particularly common in the embedded world). -+ * -+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit -+ * entropy as specified by the caller. If the entropy pool is full it will -+ * block until more entropy is needed. -+ * -+ * add_bootloader_randomness() is called by bootloader drivers, such as EFI -+ * and device tree, and credits its input depending on whether or not the -+ * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set. -+ * -+ * add_interrupt_randomness() uses the interrupt timing as random -+ * inputs to the entropy pool. Using the cycle counters and the irq source -+ * as inputs, it feeds the input pool roughly once a second or after 64 -+ * interrupts, crediting 1 bit of entropy for whichever comes first. -+ * -+ * add_input_randomness() uses the input layer interrupt timing, as well -+ * as the event type information from the hardware. -+ * -+ * add_disk_randomness() uses what amounts to the seek time of block -+ * layer request events, on a per-disk_devt basis, as input to the -+ * entropy pool. Note that high-speed solid state drives with very low -+ * seek times do not make for good sources of entropy, as their seek -+ * times are usually fairly consistent. -+ * -+ * The last two routines try to estimate how many bits of entropy -+ * to credit. They do this by keeping track of the first and second -+ * order deltas of the event timings. -+ * -+ **********************************************************************/ - --/* -- * This function decides how many bytes to actually take from the -- * given pool, and also debits the entropy count accordingly. -- */ --static size_t account(struct entropy_store *r, size_t nbytes, int min, -- int reserved) -+static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); -+static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); -+static int __init parse_trust_cpu(char *arg) - { -- int entropy_count, orig, have_bytes; -- size_t ibytes, nfrac; -- -- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); -- -- /* Can we pull enough? */ --retry: -- entropy_count = orig = READ_ONCE(r->entropy_count); -- ibytes = nbytes; -- /* never pull more than available */ -- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); -- -- if ((have_bytes -= reserved) < 0) -- have_bytes = 0; -- ibytes = min_t(size_t, ibytes, have_bytes); -- if (ibytes < min) -- ibytes = 0; -- -- if (WARN_ON(entropy_count < 0)) { -- pr_warn("negative entropy count: pool %s count %d\n", -- r->name, entropy_count); -- entropy_count = 0; -- } -- nfrac = ibytes << (ENTROPY_SHIFT + 3); -- if ((size_t) entropy_count > nfrac) -- entropy_count -= nfrac; -- else -- entropy_count = 0; -- -- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) -- goto retry; -- -- trace_debit_entropy(r->name, 8 * ibytes); -- if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) { -- wake_up_interruptible(&random_write_wait); -- kill_fasync(&fasync, SIGIO, POLL_OUT); -- } -- -- return ibytes; -+ return kstrtobool(arg, &trust_cpu); - } -- --/* -- * This function does the actual extraction for extract_entropy. -- * -- * Note: we assume that .poolwords is a multiple of 16 words. -- */ --static void extract_buf(struct entropy_store *r, __u8 *out) -+static int __init parse_trust_bootloader(char *arg) - { -- int i; -- union { -- __u32 w[5]; -- unsigned long l[LONGS(20)]; -- } hash; -- __u32 workspace[SHA1_WORKSPACE_WORDS]; -- unsigned long flags; -- -- /* -- * If we have an architectural hardware random number -- * generator, use it for SHA's initial vector -- */ -- sha1_init(hash.w); -- for (i = 0; i < LONGS(20); i++) { -- unsigned long v; -- if (!arch_get_random_long(&v)) -- break; -- hash.l[i] = v; -- } -- -- /* Generate a hash across the pool, 16 words (512 bits) at a time */ -- spin_lock_irqsave(&r->lock, flags); -- for (i = 0; i < r->poolinfo->poolwords; i += 16) -- sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace); -- -- /* -- * We mix the hash back into the pool to prevent backtracking -- * attacks (where the attacker knows the state of the pool -- * plus the current outputs, and attempts to find previous -- * ouputs), unless the hash function can be inverted. By -- * mixing at least a SHA1 worth of hash data back, we make -- * brute-forcing the feedback as hard as brute-forcing the -- * hash. -- */ -- __mix_pool_bytes(r, hash.w, sizeof(hash.w)); -- spin_unlock_irqrestore(&r->lock, flags); -- -- memzero_explicit(workspace, sizeof(workspace)); -- -- /* -- * In case the hash function has some recognizable output -- * pattern, we fold it in half. Thus, we always feed back -- * twice as much data as we output. -- */ -- hash.w[0] ^= hash.w[3]; -- hash.w[1] ^= hash.w[4]; -- hash.w[2] ^= rol32(hash.w[2], 16); -- -- memcpy(out, &hash, EXTRACT_SIZE); -- memzero_explicit(&hash, sizeof(hash)); -+ return kstrtobool(arg, &trust_bootloader); - } -+early_param("random.trust_cpu", parse_trust_cpu); -+early_param("random.trust_bootloader", parse_trust_bootloader); - --static ssize_t _extract_entropy(struct entropy_store *r, void *buf, -- size_t nbytes, int fips) -+/* -+ * The first collection of entropy occurs at system boot while interrupts -+ * are still turned off. Here we push in latent entropy, RDSEED, a timestamp, -+ * utsname(), and the command line. Depending on the above configuration knob, -+ * RDSEED may be considered sufficient for initialization. Note that much -+ * earlier setup may already have pushed entropy into the input pool by the -+ * time we get here. -+ */ -+int __init random_init(const char *command_line) - { -- ssize_t ret = 0, i; -- __u8 tmp[EXTRACT_SIZE]; -- unsigned long flags; -+ ktime_t now = ktime_get_real(); -+ unsigned int i, arch_bits; -+ unsigned long entropy; - -- while (nbytes) { -- extract_buf(r, tmp); -+#if defined(LATENT_ENTROPY_PLUGIN) -+ static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; -+ _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); -+#endif - -- if (fips) { -- spin_lock_irqsave(&r->lock, flags); -- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) -- panic("Hardware RNG duplicated output!\n"); -- memcpy(r->last_data, tmp, EXTRACT_SIZE); -- spin_unlock_irqrestore(&r->lock, flags); -+ for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8; -+ i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) { -+ if (!arch_get_random_seed_long_early(&entropy) && -+ !arch_get_random_long_early(&entropy)) { -+ entropy = random_get_entropy(); -+ arch_bits -= sizeof(entropy) * 8; - } -- i = min_t(int, nbytes, EXTRACT_SIZE); -- memcpy(buf, tmp, i); -- nbytes -= i; -- buf += i; -- ret += i; -+ _mix_pool_bytes(&entropy, sizeof(entropy)); - } -+ _mix_pool_bytes(&now, sizeof(now)); -+ _mix_pool_bytes(utsname(), sizeof(*(utsname()))); -+ _mix_pool_bytes(command_line, strlen(command_line)); -+ add_latent_entropy(); - -- /* Wipe data just returned from memory */ -- memzero_explicit(tmp, sizeof(tmp)); -+ if (crng_ready()) -+ crng_reseed(); -+ else if (trust_cpu) -+ _credit_init_bits(arch_bits); - -- return ret; -+ return 0; - } - - /* -- * This function extracts randomness from the "entropy pool", and -- * returns it in a buffer. -+ * Add device- or boot-specific data to the input pool to help -+ * initialize it. - * -- * The min parameter specifies the minimum amount we can pull before -- * failing to avoid races that defeat catastrophic reseeding while the -- * reserved parameter indicates how much entropy we must leave in the -- * pool after each pull to avoid starving other readers. -+ * None of this adds any entropy; it is meant to avoid the problem of -+ * the entropy pool having similar initial state across largely -+ * identical devices. - */ --static ssize_t extract_entropy(struct entropy_store *r, void *buf, -- size_t nbytes, int min, int reserved) -+void add_device_randomness(const void *buf, size_t len) - { -- __u8 tmp[EXTRACT_SIZE]; -+ unsigned long entropy = random_get_entropy(); - unsigned long flags; - -- /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ -- if (fips_enabled) { -- spin_lock_irqsave(&r->lock, flags); -- if (!r->last_data_init) { -- r->last_data_init = 1; -- spin_unlock_irqrestore(&r->lock, flags); -- trace_extract_entropy(r->name, EXTRACT_SIZE, -- ENTROPY_BITS(r), _RET_IP_); -- extract_buf(r, tmp); -- spin_lock_irqsave(&r->lock, flags); -- memcpy(r->last_data, tmp, EXTRACT_SIZE); -- } -- spin_unlock_irqrestore(&r->lock, flags); -- } -- -- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); -- nbytes = account(r, nbytes, min, reserved); -- -- return _extract_entropy(r, buf, nbytes, fips_enabled); -+ spin_lock_irqsave(&input_pool.lock, flags); -+ _mix_pool_bytes(&entropy, sizeof(entropy)); -+ _mix_pool_bytes(buf, len); -+ spin_unlock_irqrestore(&input_pool.lock, flags); - } -+EXPORT_SYMBOL(add_device_randomness); - --#define warn_unseeded_randomness(previous) \ -- _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous)) -- --static void _warn_unseeded_randomness(const char *func_name, void *caller, -- void **previous) -+/* -+ * Interface for in-kernel drivers of true hardware RNGs. -+ * Those devices may produce endless random bits and will be throttled -+ * when our pool is full. -+ */ -+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy) - { --#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM -- const bool print_once = false; --#else -- static bool print_once __read_mostly; --#endif -+ mix_pool_bytes(buf, len); -+ credit_init_bits(entropy); - -- if (print_once || -- crng_ready() || -- (previous && (caller == READ_ONCE(*previous)))) -- return; -- WRITE_ONCE(*previous, caller); --#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM -- print_once = true; --#endif -- if (__ratelimit(&unseeded_warning)) -- printk_deferred(KERN_NOTICE "random: %s called from %pS " -- "with crng_init=%d\n", func_name, caller, -- crng_init); -+ /* -+ * Throttle writing to once every CRNG_RESEED_INTERVAL, unless -+ * we're not yet initialized. -+ */ -+ if (!kthread_should_stop() && crng_ready()) -+ schedule_timeout_interruptible(CRNG_RESEED_INTERVAL); - } -+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); - - /* -- * This function is the exported kernel interface. It returns some -- * number of good random numbers, suitable for key generation, seeding -- * TCP sequence numbers, etc. It does not rely on the hardware random -- * number generator. For random bytes direct from the hardware RNG -- * (when available), use get_random_bytes_arch(). In order to ensure -- * that the randomness provided by this function is okay, the function -- * wait_for_random_bytes() should be called and return 0 at least once -- * at any point prior. -+ * Handle random seed passed by bootloader, and credit it if -+ * CONFIG_RANDOM_TRUST_BOOTLOADER is set. - */ --static void _get_random_bytes(void *buf, int nbytes) -+void __init add_bootloader_randomness(const void *buf, size_t len) - { -- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4); -- -- trace_get_random_bytes(nbytes, _RET_IP_); -- -- while (nbytes >= CHACHA_BLOCK_SIZE) { -- extract_crng(buf); -- buf += CHACHA_BLOCK_SIZE; -- nbytes -= CHACHA_BLOCK_SIZE; -- } -- -- if (nbytes > 0) { -- extract_crng(tmp); -- memcpy(buf, tmp, nbytes); -- crng_backtrack_protect(tmp, nbytes); -- } else -- crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE); -- memzero_explicit(tmp, sizeof(tmp)); -+ mix_pool_bytes(buf, len); -+ if (trust_bootloader) -+ credit_init_bits(len * 8); - } - --void get_random_bytes(void *buf, int nbytes) --{ -- static void *previous; -+struct fast_pool { -+ unsigned long pool[4]; -+ unsigned long last; -+ unsigned int count; -+ struct timer_list mix; -+}; - -- warn_unseeded_randomness(&previous); -- _get_random_bytes(buf, nbytes); --} --EXPORT_SYMBOL(get_random_bytes); -+static void mix_interrupt_randomness(struct timer_list *work); - -+static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { -+#ifdef CONFIG_64BIT -+#define FASTMIX_PERM SIPHASH_PERMUTATION -+ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }, -+#else -+#define FASTMIX_PERM HSIPHASH_PERMUTATION -+ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }, -+#endif -+ .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0) -+}; - - /* -- * Each time the timer fires, we expect that we got an unpredictable -- * jump in the cycle counter. Even if the timer is running on another -- * CPU, the timer activity will be touching the stack of the CPU that is -- * generating entropy.. -- * -- * Note that we don't re-arm the timer in the timer itself - we are -- * happy to be scheduled away, since that just makes the load more -- * complex, but we do not want the timer to keep ticking unless the -- * entropy loop is running. -- * -- * So the re-arming always happens in the entropy loop itself. -+ * This is [Half]SipHash-1-x, starting from an empty key. Because -+ * the key is fixed, it assumes that its inputs are non-malicious, -+ * and therefore this has no security on its own. s represents the -+ * four-word SipHash state, while v represents a two-word input. - */ --static void entropy_timer(struct timer_list *t) -+static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) - { -- credit_entropy_bits(&input_pool, 1); -+ s[3] ^= v1; -+ FASTMIX_PERM(s[0], s[1], s[2], s[3]); -+ s[0] ^= v1; -+ s[3] ^= v2; -+ FASTMIX_PERM(s[0], s[1], s[2], s[3]); -+ s[0] ^= v2; - } - -+#ifdef CONFIG_SMP - /* -- * If we have an actual cycle counter, see if we can -- * generate enough entropy with timing noise -+ * This function is called when the CPU has just come online, with -+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. - */ --static void try_to_generate_entropy(void) -+int __cold random_online_cpu(unsigned int cpu) - { -- struct { -- unsigned long now; -- struct timer_list timer; -- } stack; -+ /* -+ * During CPU shutdown and before CPU onlining, add_interrupt_ -+ * randomness() may schedule mix_interrupt_randomness(), and -+ * set the MIX_INFLIGHT flag. However, because the worker can -+ * be scheduled on a different CPU during this period, that -+ * flag will never be cleared. For that reason, we zero out -+ * the flag here, which runs just after workqueues are onlined -+ * for the CPU again. This also has the effect of setting the -+ * irq randomness count to zero so that new accumulated irqs -+ * are fresh. -+ */ -+ per_cpu_ptr(&irq_randomness, cpu)->count = 0; -+ return 0; -+} -+#endif - -- stack.now = random_get_entropy(); -+static void mix_interrupt_randomness(struct timer_list *work) -+{ -+ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); -+ /* -+ * The size of the copied stack pool is explicitly 2 longs so that we -+ * only ever ingest half of the siphash output each time, retaining -+ * the other half as the next "key" that carries over. The entropy is -+ * supposed to be sufficiently dispersed between bits so on average -+ * we don't wind up "losing" some. -+ */ -+ unsigned long pool[2]; -+ unsigned int count; - -- /* Slow counter - or none. Don't even bother */ -- if (stack.now == random_get_entropy()) -+ /* Check to see if we're running on the wrong CPU due to hotplug. */ -+ local_irq_disable(); -+ if (fast_pool != this_cpu_ptr(&irq_randomness)) { -+ local_irq_enable(); - return; -- -- timer_setup_on_stack(&stack.timer, entropy_timer, 0); -- while (!crng_ready()) { -- if (!timer_pending(&stack.timer)) -- mod_timer(&stack.timer, jiffies+1); -- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); -- schedule(); -- stack.now = random_get_entropy(); - } - -- del_timer_sync(&stack.timer); -- destroy_timer_on_stack(&stack.timer); -- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now)); --} -- --/* -- * Wait for the urandom pool to be seeded and thus guaranteed to supply -- * cryptographically secure random numbers. This applies to: the /dev/urandom -- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} -- * family of functions. Using any of these functions without first calling -- * this function forfeits the guarantee of security. -- * -- * Returns: 0 if the urandom pool has been seeded. -- * -ERESTARTSYS if the function was interrupted by a signal. -- */ --int wait_for_random_bytes(void) --{ -- if (likely(crng_ready())) -- return 0; -- -- do { -- int ret; -- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); -- if (ret) -- return ret > 0 ? 0 : ret; -+ /* -+ * Copy the pool to the stack so that the mixer always has a -+ * consistent view, before we reenable irqs again. -+ */ -+ memcpy(pool, fast_pool->pool, sizeof(pool)); -+ count = fast_pool->count; -+ fast_pool->count = 0; -+ fast_pool->last = jiffies; -+ local_irq_enable(); - -- try_to_generate_entropy(); -- } while (!crng_ready()); -+ mix_pool_bytes(pool, sizeof(pool)); -+ credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8)); - -- return 0; -+ memzero_explicit(pool, sizeof(pool)); - } --EXPORT_SYMBOL(wait_for_random_bytes); - --/* -- * Returns whether or not the urandom pool has been seeded and thus guaranteed -- * to supply cryptographically secure random numbers. This applies to: the -- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, -- * ,u64,int,long} family of functions. -- * -- * Returns: true if the urandom pool has been seeded. -- * false if the urandom pool has not been seeded. -- */ --bool rng_is_initialized(void) --{ -- return crng_ready(); --} --EXPORT_SYMBOL(rng_is_initialized); -- --/* -- * Add a callback function that will be invoked when the nonblocking -- * pool is initialised. -- * -- * returns: 0 if callback is successfully added -- * -EALREADY if pool is already initialised (callback not called) -- * -ENOENT if module for callback is not alive -- */ --int add_random_ready_callback(struct random_ready_callback *rdy) -+void add_interrupt_randomness(int irq) - { -- struct module *owner; -- unsigned long flags; -- int err = -EALREADY; -- -- if (crng_ready()) -- return err; -- -- owner = rdy->owner; -- if (!try_module_get(owner)) -- return -ENOENT; -- -- spin_lock_irqsave(&random_ready_list_lock, flags); -- if (crng_ready()) -- goto out; -- -- owner = NULL; -+ enum { MIX_INFLIGHT = 1U << 31 }; -+ unsigned long entropy = random_get_entropy(); -+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); -+ struct pt_regs *regs = get_irq_regs(); -+ unsigned int new_count; - -- list_add(&rdy->list, &random_ready_list); -- err = 0; -+ fast_mix(fast_pool->pool, entropy, -+ (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq)); -+ new_count = ++fast_pool->count; - --out: -- spin_unlock_irqrestore(&random_ready_list_lock, flags); -+ if (new_count & MIX_INFLIGHT) -+ return; - -- module_put(owner); -+ if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) -+ return; - -- return err; -+ fast_pool->count |= MIX_INFLIGHT; -+ if (!timer_pending(&fast_pool->mix)) { -+ fast_pool->mix.expires = jiffies; -+ add_timer_on(&fast_pool->mix, raw_smp_processor_id()); -+ } - } --EXPORT_SYMBOL(add_random_ready_callback); -+EXPORT_SYMBOL_GPL(add_interrupt_randomness); -+ -+/* There is one of these per entropy source */ -+struct timer_rand_state { -+ unsigned long last_time; -+ long last_delta, last_delta2; -+}; - - /* -- * Delete a previously registered readiness callback function. -+ * This function adds entropy to the entropy "pool" by using timing -+ * delays. It uses the timer_rand_state structure to make an estimate -+ * of how many bits of entropy this call has added to the pool. The -+ * value "num" is also added to the pool; it should somehow describe -+ * the type of event that just happened. - */ --void del_random_ready_callback(struct random_ready_callback *rdy) -+static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) - { -- unsigned long flags; -- struct module *owner = NULL; -+ unsigned long entropy = random_get_entropy(), now = jiffies, flags; -+ long delta, delta2, delta3; -+ unsigned int bits; - -- spin_lock_irqsave(&random_ready_list_lock, flags); -- if (!list_empty(&rdy->list)) { -- list_del_init(&rdy->list); -- owner = rdy->owner; -+ /* -+ * If we're in a hard IRQ, add_interrupt_randomness() will be called -+ * sometime after, so mix into the fast pool. -+ */ -+ if (in_hardirq()) { -+ fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num); -+ } else { -+ spin_lock_irqsave(&input_pool.lock, flags); -+ _mix_pool_bytes(&entropy, sizeof(entropy)); -+ _mix_pool_bytes(&num, sizeof(num)); -+ spin_unlock_irqrestore(&input_pool.lock, flags); - } -- spin_unlock_irqrestore(&random_ready_list_lock, flags); - -- module_put(owner); --} --EXPORT_SYMBOL(del_random_ready_callback); -+ if (crng_ready()) -+ return; -+ -+ /* -+ * Calculate number of bits of randomness we probably added. -+ * We take into account the first, second and third-order deltas -+ * in order to make our estimate. -+ */ -+ delta = now - READ_ONCE(state->last_time); -+ WRITE_ONCE(state->last_time, now); - --/* -- * This function will use the architecture-specific hardware random -- * number generator if it is available. The arch-specific hw RNG will -- * almost certainly be faster than what we can do in software, but it -- * is impossible to verify that it is implemented securely (as -- * opposed, to, say, the AES encryption of a sequence number using a -- * key known by the NSA). So it's useful if we need the speed, but -- * only if we're willing to trust the hardware manufacturer not to -- * have put in a back door. -- * -- * Return number of bytes filled in. -- */ --int __must_check get_random_bytes_arch(void *buf, int nbytes) --{ -- int left = nbytes; -- char *p = buf; -+ delta2 = delta - READ_ONCE(state->last_delta); -+ WRITE_ONCE(state->last_delta, delta); - -- trace_get_random_bytes_arch(left, _RET_IP_); -- while (left) { -- unsigned long v; -- int chunk = min_t(int, left, sizeof(unsigned long)); -+ delta3 = delta2 - READ_ONCE(state->last_delta2); -+ WRITE_ONCE(state->last_delta2, delta2); - -- if (!arch_get_random_long(&v)) -- break; -+ if (delta < 0) -+ delta = -delta; -+ if (delta2 < 0) -+ delta2 = -delta2; -+ if (delta3 < 0) -+ delta3 = -delta3; -+ if (delta > delta2) -+ delta = delta2; -+ if (delta > delta3) -+ delta = delta3; - -- memcpy(p, &v, chunk); -- p += chunk; -- left -= chunk; -- } -+ /* -+ * delta is now minimum absolute delta. Round down by 1 bit -+ * on general principles, and limit entropy estimate to 11 bits. -+ */ -+ bits = min(fls(delta >> 1), 11); - -- return nbytes - left; -+ /* -+ * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness() -+ * will run after this, which uses a different crediting scheme of 1 bit -+ * per every 64 interrupts. In order to let that function do accounting -+ * close to the one in this function, we credit a full 64/64 bit per bit, -+ * and then subtract one to account for the extra one added. -+ */ -+ if (in_hardirq()) -+ this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; -+ else -+ _credit_init_bits(bits); - } --EXPORT_SYMBOL(get_random_bytes_arch); - --/* -- * init_std_data - initialize pool with system data -- * -- * @r: pool to initialize -- * -- * This function clears the pool's entropy count and mixes some system -- * data into the pool to prepare it for use. The pool is not cleared -- * as that can only decrease the entropy in the pool. -- */ --static void __init init_std_data(struct entropy_store *r) -+void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) - { -- int i; -- ktime_t now = ktime_get_real(); -- unsigned long rv; -- -- mix_pool_bytes(r, &now, sizeof(now)); -- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { -- if (!arch_get_random_seed_long(&rv) && -- !arch_get_random_long(&rv)) -- rv = random_get_entropy(); -- mix_pool_bytes(r, &rv, sizeof(rv)); -- } -- mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); -+ static unsigned char last_value; -+ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; -+ -+ /* Ignore autorepeat and the like. */ -+ if (value == last_value) -+ return; -+ -+ last_value = value; -+ add_timer_randomness(&input_timer_state, -+ (type << 4) ^ code ^ (code >> 4) ^ value); - } -+EXPORT_SYMBOL_GPL(add_input_randomness); - --/* -- * Note that setup_arch() may call add_device_randomness() -- * long before we get here. This allows seeding of the pools -- * with some platform dependent data very early in the boot -- * process. But it limits our options here. We must use -- * statically allocated structures that already have all -- * initializations complete at compile time. We should also -- * take care not to overwrite the precious per platform data -- * we were given. -- */ --int __init rand_initialize(void) -+#ifdef CONFIG_BLOCK -+void add_disk_randomness(struct gendisk *disk) - { -- init_std_data(&input_pool); -- crng_initialize_primary(&primary_crng); -- crng_global_init_time = jiffies; -- if (ratelimit_disable) { -- urandom_warning.interval = 0; -- unseeded_warning.interval = 0; -- } -- return 0; -+ if (!disk || !disk->random) -+ return; -+ /* First major is 1, so we get >= 0x200 here. */ -+ add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); - } -+EXPORT_SYMBOL_GPL(add_disk_randomness); - --#ifdef CONFIG_BLOCK --void rand_initialize_disk(struct gendisk *disk) -+void __cold rand_initialize_disk(struct gendisk *disk) - { - struct timer_rand_state *state; - -@@ -1801,116 +1134,194 @@ void rand_initialize_disk(struct gendisk *disk) - } - #endif - --static ssize_t --urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes, -- loff_t *ppos) -+/* -+ * Each time the timer fires, we expect that we got an unpredictable -+ * jump in the cycle counter. Even if the timer is running on another -+ * CPU, the timer activity will be touching the stack of the CPU that is -+ * generating entropy.. -+ * -+ * Note that we don't re-arm the timer in the timer itself - we are -+ * happy to be scheduled away, since that just makes the load more -+ * complex, but we do not want the timer to keep ticking unless the -+ * entropy loop is running. -+ * -+ * So the re-arming always happens in the entropy loop itself. -+ */ -+static void __cold entropy_timer(struct timer_list *t) - { -- int ret; -- -- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); -- ret = extract_crng_user(buf, nbytes); -- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool)); -- return ret; -+ credit_init_bits(1); - } - --static ssize_t --urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) -+/* -+ * If we have an actual cycle counter, see if we can -+ * generate enough entropy with timing noise -+ */ -+static void __cold try_to_generate_entropy(void) - { -- unsigned long flags; -- static int maxwarn = 10; -+ struct { -+ unsigned long entropy; -+ struct timer_list timer; -+ } stack; -+ -+ stack.entropy = random_get_entropy(); -+ -+ /* Slow counter - or none. Don't even bother */ -+ if (stack.entropy == random_get_entropy()) -+ return; - -- if (!crng_ready() && maxwarn > 0) { -- maxwarn--; -- if (__ratelimit(&urandom_warning)) -- pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", -- current->comm, nbytes); -- spin_lock_irqsave(&primary_crng.lock, flags); -- crng_init_cnt = 0; -- spin_unlock_irqrestore(&primary_crng.lock, flags); -+ timer_setup_on_stack(&stack.timer, entropy_timer, 0); -+ while (!crng_ready() && !signal_pending(current)) { -+ if (!timer_pending(&stack.timer)) -+ mod_timer(&stack.timer, jiffies + 1); -+ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); -+ schedule(); -+ stack.entropy = random_get_entropy(); - } - -- return urandom_read_nowarn(file, buf, nbytes, ppos); -+ del_timer_sync(&stack.timer); -+ destroy_timer_on_stack(&stack.timer); -+ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); - } - --static ssize_t --random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) -+ -+/********************************************************************** -+ * -+ * Userspace reader/writer interfaces. -+ * -+ * getrandom(2) is the primary modern interface into the RNG and should -+ * be used in preference to anything else. -+ * -+ * Reading from /dev/random has the same functionality as calling -+ * getrandom(2) with flags=0. In earlier versions, however, it had -+ * vastly different semantics and should therefore be avoided, to -+ * prevent backwards compatibility issues. -+ * -+ * Reading from /dev/urandom has the same functionality as calling -+ * getrandom(2) with flags=GRND_INSECURE. Because it does not block -+ * waiting for the RNG to be ready, it should not be used. -+ * -+ * Writing to either /dev/random or /dev/urandom adds entropy to -+ * the input pool but does not credit it. -+ * -+ * Polling on /dev/random indicates when the RNG is initialized, on -+ * the read side, and when it wants new entropy, on the write side. -+ * -+ * Both /dev/random and /dev/urandom have the same set of ioctls for -+ * adding entropy, getting the entropy count, zeroing the count, and -+ * reseeding the crng. -+ * -+ **********************************************************************/ -+ -+SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) - { -+ struct iov_iter iter; -+ struct iovec iov; - int ret; - -- ret = wait_for_random_bytes(); -- if (ret != 0) -+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) -+ return -EINVAL; -+ -+ /* -+ * Requesting insecure and blocking randomness at the same time makes -+ * no sense. -+ */ -+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) -+ return -EINVAL; -+ -+ if (!crng_ready() && !(flags & GRND_INSECURE)) { -+ if (flags & GRND_NONBLOCK) -+ return -EAGAIN; -+ ret = wait_for_random_bytes(); -+ if (unlikely(ret)) -+ return ret; -+ } -+ -+ ret = import_single_range(READ, ubuf, len, &iov, &iter); -+ if (unlikely(ret)) - return ret; -- return urandom_read_nowarn(file, buf, nbytes, ppos); -+ return get_random_bytes_user(&iter); - } - --static __poll_t --random_poll(struct file *file, poll_table * wait) -+static __poll_t random_poll(struct file *file, poll_table *wait) - { -- __poll_t mask; -- - poll_wait(file, &crng_init_wait, wait); -- poll_wait(file, &random_write_wait, wait); -- mask = 0; -- if (crng_ready()) -- mask |= EPOLLIN | EPOLLRDNORM; -- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) -- mask |= EPOLLOUT | EPOLLWRNORM; -- return mask; -+ return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; - } - --static int --write_pool(struct entropy_store *r, const char __user *buffer, size_t count) -+static ssize_t write_pool_user(struct iov_iter *iter) - { -- size_t bytes; -- __u32 t, buf[16]; -- const char __user *p = buffer; -+ u8 block[BLAKE2S_BLOCK_SIZE]; -+ ssize_t ret = 0; -+ size_t copied; - -- while (count > 0) { -- int b, i = 0; -+ if (unlikely(!iov_iter_count(iter))) -+ return 0; - -- bytes = min(count, sizeof(buf)); -- if (copy_from_user(&buf, p, bytes)) -- return -EFAULT; -+ for (;;) { -+ copied = copy_from_iter(block, sizeof(block), iter); -+ ret += copied; -+ mix_pool_bytes(block, copied); -+ if (!iov_iter_count(iter) || copied != sizeof(block)) -+ break; - -- for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { -- if (!arch_get_random_int(&t)) -+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0); -+ if (ret % PAGE_SIZE == 0) { -+ if (signal_pending(current)) - break; -- buf[i] ^= t; -+ cond_resched(); - } -+ } -+ -+ memzero_explicit(block, sizeof(block)); -+ return ret ? ret : -EFAULT; -+} -+ -+static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter) -+{ -+ return write_pool_user(iter); -+} - -- count -= bytes; -- p += bytes; -+static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) -+{ -+ static int maxwarn = 10; - -- mix_pool_bytes(r, buf, bytes); -- cond_resched(); -+ if (!crng_ready()) { -+ if (!ratelimit_disable && maxwarn <= 0) -+ ++urandom_warning.missed; -+ else if (ratelimit_disable || __ratelimit(&urandom_warning)) { -+ --maxwarn; -+ pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", -+ current->comm, iov_iter_count(iter)); -+ } - } - -- return 0; -+ return get_random_bytes_user(iter); - } - --static ssize_t random_write(struct file *file, const char __user *buffer, -- size_t count, loff_t *ppos) -+static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter) - { -- size_t ret; -+ int ret; - -- ret = write_pool(&input_pool, buffer, count); -- if (ret) -- return ret; -+ if (!crng_ready() && -+ ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) || -+ (kiocb->ki_filp->f_flags & O_NONBLOCK))) -+ return -EAGAIN; - -- return (ssize_t)count; -+ ret = wait_for_random_bytes(); -+ if (ret != 0) -+ return ret; -+ return get_random_bytes_user(iter); - } - - static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) - { -- int size, ent_count; - int __user *p = (int __user *)arg; -- int retval; -+ int ent_count; - - switch (cmd) { - case RNDGETENTCNT: -- /* inherently racy, no point locking */ -- ent_count = ENTROPY_BITS(&input_pool); -- if (put_user(ent_count, p)) -+ /* Inherently racy, no point locking. */ -+ if (put_user(input_pool.init_bits, p)) - return -EFAULT; - return 0; - case RNDADDTOENTCNT: -@@ -1918,38 +1329,48 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) - return -EPERM; - if (get_user(ent_count, p)) - return -EFAULT; -- return credit_entropy_bits_safe(&input_pool, ent_count); -- case RNDADDENTROPY: -+ if (ent_count < 0) -+ return -EINVAL; -+ credit_init_bits(ent_count); -+ return 0; -+ case RNDADDENTROPY: { -+ struct iov_iter iter; -+ struct iovec iov; -+ ssize_t ret; -+ int len; -+ - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (get_user(ent_count, p++)) - return -EFAULT; - if (ent_count < 0) - return -EINVAL; -- if (get_user(size, p++)) -+ if (get_user(len, p++)) -+ return -EFAULT; -+ ret = import_single_range(WRITE, p, len, &iov, &iter); -+ if (unlikely(ret)) -+ return ret; -+ ret = write_pool_user(&iter); -+ if (unlikely(ret < 0)) -+ return ret; -+ /* Since we're crediting, enforce that it was all written into the pool. */ -+ if (unlikely(ret != len)) - return -EFAULT; -- retval = write_pool(&input_pool, (const char __user *)p, -- size); -- if (retval < 0) -- return retval; -- return credit_entropy_bits_safe(&input_pool, ent_count); -+ credit_init_bits(ent_count); -+ return 0; -+ } - case RNDZAPENTCNT: - case RNDCLEARPOOL: -- /* -- * Clear the entropy pool counters. We no longer clear -- * the entropy pool, as that's silly. -- */ -+ /* No longer has any effect. */ - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; -- input_pool.entropy_count = 0; - return 0; - case RNDRESEEDCRNG: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; -- if (crng_init < 2) -+ if (!crng_ready()) - return -ENODATA; -- crng_reseed(&primary_crng, &input_pool); -- crng_global_init_time = jiffies - 1; -+ crng_reseed(); - return 0; - default: - return -EINVAL; -@@ -1962,55 +1383,56 @@ static int random_fasync(int fd, struct file *filp, int on) - } - - const struct file_operations random_fops = { -- .read = random_read, -- .write = random_write, -- .poll = random_poll, -+ .read_iter = random_read_iter, -+ .write_iter = random_write_iter, -+ .poll = random_poll, - .unlocked_ioctl = random_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .fasync = random_fasync, - .llseek = noop_llseek, -+ .splice_read = generic_file_splice_read, -+ .splice_write = iter_file_splice_write, - }; - - const struct file_operations urandom_fops = { -- .read = urandom_read, -- .write = random_write, -+ .read_iter = urandom_read_iter, -+ .write_iter = random_write_iter, - .unlocked_ioctl = random_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .fasync = random_fasync, - .llseek = noop_llseek, -+ .splice_read = generic_file_splice_read, -+ .splice_write = iter_file_splice_write, - }; - --SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, -- unsigned int, flags) --{ -- int ret; -- -- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE)) -- return -EINVAL; -- -- /* -- * Requesting insecure and blocking randomness at the same time makes -- * no sense. -- */ -- if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM)) -- return -EINVAL; -- -- if (count > INT_MAX) -- count = INT_MAX; -- -- if (!(flags & GRND_INSECURE) && !crng_ready()) { -- if (flags & GRND_NONBLOCK) -- return -EAGAIN; -- ret = wait_for_random_bytes(); -- if (unlikely(ret)) -- return ret; -- } -- return urandom_read_nowarn(NULL, buf, count, NULL); --} - - /******************************************************************** - * -- * Sysctl interface -+ * Sysctl interface. -+ * -+ * These are partly unused legacy knobs with dummy values to not break -+ * userspace and partly still useful things. They are usually accessible -+ * in /proc/sys/kernel/random/ and are as follows: -+ * -+ * - boot_id - a UUID representing the current boot. -+ * -+ * - uuid - a random UUID, different each time the file is read. -+ * -+ * - poolsize - the number of bits of entropy that the input pool can -+ * hold, tied to the POOL_BITS constant. -+ * -+ * - entropy_avail - the number of bits of entropy currently in the -+ * input pool. Always <= poolsize. -+ * -+ * - write_wakeup_threshold - the amount of entropy in the input pool -+ * below which write polls to /dev/random will unblock, requesting -+ * more entropy, tied to the POOL_READY_BITS constant. It is writable -+ * to avoid breaking old userspaces, but writing to it does not -+ * change any behavior of the RNG. -+ * -+ * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL. -+ * It is writable to avoid breaking old userspaces, but writing -+ * to it does not change any behavior of the RNG. - * - ********************************************************************/ - -@@ -2018,25 +1440,28 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, - - #include - --static int min_write_thresh; --static int max_write_thresh = INPUT_POOL_WORDS * 32; --static int random_min_urandom_seed = 60; --static char sysctl_bootid[16]; -+static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; -+static int sysctl_random_write_wakeup_bits = POOL_READY_BITS; -+static int sysctl_poolsize = POOL_BITS; -+static u8 sysctl_bootid[UUID_SIZE]; - - /* - * This function is used to return both the bootid UUID, and random -- * UUID. The difference is in whether table->data is NULL; if it is, -+ * UUID. The difference is in whether table->data is NULL; if it is, - * then a new UUID is generated and returned to the user. -- * -- * If the user accesses this via the proc interface, the UUID will be -- * returned as an ASCII string in the standard UUID format; if via the -- * sysctl system call, as 16 bytes of binary data. - */ --static int proc_do_uuid(struct ctl_table *table, int write, -- void *buffer, size_t *lenp, loff_t *ppos) -+static int proc_do_uuid(struct ctl_table *table, int write, void *buf, -+ size_t *lenp, loff_t *ppos) - { -- struct ctl_table fake_table; -- unsigned char buf[64], tmp_uuid[16], *uuid; -+ u8 tmp_uuid[UUID_SIZE], *uuid; -+ char uuid_string[UUID_STRING_LEN + 1]; -+ struct ctl_table fake_table = { -+ .data = uuid_string, -+ .maxlen = UUID_STRING_LEN -+ }; -+ -+ if (write) -+ return -EPERM; - - uuid = table->data; - if (!uuid) { -@@ -2051,32 +1476,17 @@ static int proc_do_uuid(struct ctl_table *table, int write, - spin_unlock(&bootid_spinlock); - } - -- sprintf(buf, "%pU", uuid); -- -- fake_table.data = buf; -- fake_table.maxlen = sizeof(buf); -- -- return proc_dostring(&fake_table, write, buffer, lenp, ppos); -+ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); -+ return proc_dostring(&fake_table, 0, buf, lenp, ppos); - } - --/* -- * Return entropy available scaled to integral bits -- */ --static int proc_do_entropy(struct ctl_table *table, int write, -- void *buffer, size_t *lenp, loff_t *ppos) -+/* The same as proc_dointvec, but writes don't change anything. */ -+static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, -+ size_t *lenp, loff_t *ppos) - { -- struct ctl_table fake_table; -- int entropy_count; -- -- entropy_count = *(int *)table->data >> ENTROPY_SHIFT; -- -- fake_table.data = &entropy_count; -- fake_table.maxlen = sizeof(entropy_count); -- -- return proc_dointvec(&fake_table, write, buffer, lenp, ppos); -+ return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); - } - --static int sysctl_poolsize = INPUT_POOL_WORDS * 32; - extern struct ctl_table random_table[]; - struct ctl_table random_table[] = { - { -@@ -2088,218 +1498,36 @@ struct ctl_table random_table[] = { - }, - { - .procname = "entropy_avail", -+ .data = &input_pool.init_bits, - .maxlen = sizeof(int), - .mode = 0444, -- .proc_handler = proc_do_entropy, -- .data = &input_pool.entropy_count, -+ .proc_handler = proc_dointvec, - }, - { - .procname = "write_wakeup_threshold", -- .data = &random_write_wakeup_bits, -+ .data = &sysctl_random_write_wakeup_bits, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec_minmax, -- .extra1 = &min_write_thresh, -- .extra2 = &max_write_thresh, -+ .proc_handler = proc_do_rointvec, - }, - { - .procname = "urandom_min_reseed_secs", -- .data = &random_min_urandom_seed, -+ .data = &sysctl_random_min_urandom_seed, - .maxlen = sizeof(int), - .mode = 0644, -- .proc_handler = proc_dointvec, -+ .proc_handler = proc_do_rointvec, - }, - { - .procname = "boot_id", - .data = &sysctl_bootid, -- .maxlen = 16, - .mode = 0444, - .proc_handler = proc_do_uuid, - }, - { - .procname = "uuid", -- .maxlen = 16, - .mode = 0444, - .proc_handler = proc_do_uuid, - }, --#ifdef ADD_INTERRUPT_BENCH -- { -- .procname = "add_interrupt_avg_cycles", -- .data = &avg_cycles, -- .maxlen = sizeof(avg_cycles), -- .mode = 0444, -- .proc_handler = proc_doulongvec_minmax, -- }, -- { -- .procname = "add_interrupt_avg_deviation", -- .data = &avg_deviation, -- .maxlen = sizeof(avg_deviation), -- .mode = 0444, -- .proc_handler = proc_doulongvec_minmax, -- }, --#endif - { } - }; --#endif /* CONFIG_SYSCTL */ -- --struct batched_entropy { -- union { -- u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)]; -- u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)]; -- }; -- unsigned int position; -- spinlock_t batch_lock; --}; -- --/* -- * Get a random word for internal kernel use only. The quality of the random -- * number is good as /dev/urandom, but there is no backtrack protection, with -- * the goal of being quite fast and not depleting entropy. In order to ensure -- * that the randomness provided by this function is okay, the function -- * wait_for_random_bytes() should be called and return 0 at least once at any -- * point prior. -- */ --static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { -- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), --}; -- --u64 get_random_u64(void) --{ -- u64 ret; -- unsigned long flags; -- struct batched_entropy *batch; -- static void *previous; -- -- warn_unseeded_randomness(&previous); -- -- batch = raw_cpu_ptr(&batched_entropy_u64); -- spin_lock_irqsave(&batch->batch_lock, flags); -- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { -- extract_crng((u8 *)batch->entropy_u64); -- batch->position = 0; -- } -- ret = batch->entropy_u64[batch->position++]; -- spin_unlock_irqrestore(&batch->batch_lock, flags); -- return ret; --} --EXPORT_SYMBOL(get_random_u64); -- --static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { -- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), --}; --u32 get_random_u32(void) --{ -- u32 ret; -- unsigned long flags; -- struct batched_entropy *batch; -- static void *previous; -- -- warn_unseeded_randomness(&previous); -- -- batch = raw_cpu_ptr(&batched_entropy_u32); -- spin_lock_irqsave(&batch->batch_lock, flags); -- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { -- extract_crng((u8 *)batch->entropy_u32); -- batch->position = 0; -- } -- ret = batch->entropy_u32[batch->position++]; -- spin_unlock_irqrestore(&batch->batch_lock, flags); -- return ret; --} --EXPORT_SYMBOL(get_random_u32); -- --/* It's important to invalidate all potential batched entropy that might -- * be stored before the crng is initialized, which we can do lazily by -- * simply resetting the counter to zero so that it's re-extracted on the -- * next usage. */ --static void invalidate_batched_entropy(void) --{ -- int cpu; -- unsigned long flags; -- -- for_each_possible_cpu (cpu) { -- struct batched_entropy *batched_entropy; -- -- batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); -- spin_lock_irqsave(&batched_entropy->batch_lock, flags); -- batched_entropy->position = 0; -- spin_unlock(&batched_entropy->batch_lock); -- -- batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu); -- spin_lock(&batched_entropy->batch_lock); -- batched_entropy->position = 0; -- spin_unlock_irqrestore(&batched_entropy->batch_lock, flags); -- } --} -- --/** -- * randomize_page - Generate a random, page aligned address -- * @start: The smallest acceptable address the caller will take. -- * @range: The size of the area, starting at @start, within which the -- * random address must fall. -- * -- * If @start + @range would overflow, @range is capped. -- * -- * NOTE: Historical use of randomize_range, which this replaces, presumed that -- * @start was already page aligned. We now align it regardless. -- * -- * Return: A page aligned address within [start, start + range). On error, -- * @start is returned. -- */ --unsigned long --randomize_page(unsigned long start, unsigned long range) --{ -- if (!PAGE_ALIGNED(start)) { -- range -= PAGE_ALIGN(start) - start; -- start = PAGE_ALIGN(start); -- } -- -- if (start > ULONG_MAX - range) -- range = ULONG_MAX - start; -- -- range >>= PAGE_SHIFT; -- -- if (range == 0) -- return start; -- -- return start + (get_random_long() % range << PAGE_SHIFT); --} -- --/* Interface for in-kernel drivers of true hardware RNGs. -- * Those devices may produce endless random bits and will be throttled -- * when our pool is full. -- */ --void add_hwgenerator_randomness(const char *buffer, size_t count, -- size_t entropy) --{ -- struct entropy_store *poolp = &input_pool; -- -- if (unlikely(crng_init == 0)) { -- crng_fast_load(buffer, count); -- return; -- } -- -- /* Suspend writing if we're above the trickle threshold. -- * We'll be woken up again once below random_write_wakeup_thresh, -- * or when the calling thread is about to terminate. -- */ -- wait_event_interruptible(random_write_wait, kthread_should_stop() || -- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); -- mix_pool_bytes(poolp, buffer, count); -- credit_entropy_bits(poolp, entropy); --} --EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); -- --/* Handle random seed passed by bootloader. -- * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise -- * it would be regarded as device data. -- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. -- */ --void add_bootloader_randomness(const void *buf, unsigned int size) --{ -- if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER)) -- add_hwgenerator_randomness(buf, size, size * 8); -- else -- add_device_randomness(buf, size); --} --EXPORT_SYMBOL_GPL(add_bootloader_randomness); -+#endif /* CONFIG_SYSCTL */ -diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c -index 1b18ce5ebab1e..cd266021d0103 100644 ---- a/drivers/char/tpm/eventlog/acpi.c -+++ b/drivers/char/tpm/eventlog/acpi.c -@@ -90,16 +90,21 @@ int tpm_read_log_acpi(struct tpm_chip *chip) - return -ENODEV; - - if (tbl->header.length < -- sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) -+ sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) { -+ acpi_put_table((struct acpi_table_header *)tbl); - return -ENODEV; -+ } - - tpm2_phy = (void *)tbl + sizeof(*tbl); - len = tpm2_phy->log_area_minimum_length; - - start = tpm2_phy->log_area_start_address; -- if (!start || !len) -+ if (!start || !len) { -+ acpi_put_table((struct acpi_table_header *)tbl); - return -ENODEV; -+ } - -+ acpi_put_table((struct acpi_table_header *)tbl); - format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; - } else { - /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ -@@ -120,8 +125,10 @@ int tpm_read_log_acpi(struct tpm_chip *chip) - break; - } - -+ acpi_put_table((struct acpi_table_header *)buff); - format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; - } -+ - if (!len) { - dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__); - return -EIO; -@@ -136,8 +143,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip) - - ret = -EIO; - virt = acpi_os_map_iomem(start, len); -- if (!virt) -+ if (!virt) { -+ dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__); -+ /* try EFI log next */ -+ ret = -ENODEV; - goto err; -+ } - - memcpy_fromio(log->bios_event_log, virt, len); - -@@ -156,5 +167,4 @@ err: - kfree(log->bios_event_log); - log->bios_event_log = NULL; - return ret; -- - } -diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c -index ddaeceb7e1091..65d800ecc9964 100644 ---- a/drivers/char/tpm/tpm-chip.c -+++ b/drivers/char/tpm/tpm-chip.c -@@ -274,14 +274,6 @@ static void tpm_dev_release(struct device *dev) - kfree(chip); - } - --static void tpm_devs_release(struct device *dev) --{ -- struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); -- -- /* release the master device reference */ -- put_device(&chip->dev); --} -- - /** - * tpm_class_shutdown() - prepare the TPM device for loss of power. - * @dev: device to which the chip is associated. -@@ -344,7 +336,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, - chip->dev_num = rc; - - device_initialize(&chip->dev); -- device_initialize(&chip->devs); - - chip->dev.class = tpm_class; - chip->dev.class->shutdown_pre = tpm_class_shutdown; -@@ -352,29 +343,12 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, - chip->dev.parent = pdev; - chip->dev.groups = chip->groups; - -- chip->devs.parent = pdev; -- chip->devs.class = tpmrm_class; -- chip->devs.release = tpm_devs_release; -- /* get extra reference on main device to hold on -- * behalf of devs. This holds the chip structure -- * while cdevs is in use. The corresponding put -- * is in the tpm_devs_release (TPM2 only) -- */ -- if (chip->flags & TPM_CHIP_FLAG_TPM2) -- get_device(&chip->dev); -- - if (chip->dev_num == 0) - chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); - else - chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); - -- chip->devs.devt = -- MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); -- - rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); -- if (rc) -- goto out; -- rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); - if (rc) - goto out; - -@@ -382,9 +356,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, - chip->flags |= TPM_CHIP_FLAG_VIRTUAL; - - cdev_init(&chip->cdev, &tpm_fops); -- cdev_init(&chip->cdevs, &tpmrm_fops); - chip->cdev.owner = THIS_MODULE; -- chip->cdevs.owner = THIS_MODULE; - - rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); - if (rc) { -@@ -396,7 +368,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, - return chip; - - out: -- put_device(&chip->devs); - put_device(&chip->dev); - return ERR_PTR(rc); - } -@@ -445,14 +416,9 @@ static int tpm_add_char_device(struct tpm_chip *chip) - } - - if (chip->flags & TPM_CHIP_FLAG_TPM2) { -- rc = cdev_device_add(&chip->cdevs, &chip->devs); -- if (rc) { -- dev_err(&chip->devs, -- "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", -- dev_name(&chip->devs), MAJOR(chip->devs.devt), -- MINOR(chip->devs.devt), rc); -- return rc; -- } -+ rc = tpm_devs_add(chip); -+ if (rc) -+ goto err_del_cdev; - } - - /* Make the chip available. */ -@@ -460,6 +426,10 @@ static int tpm_add_char_device(struct tpm_chip *chip) - idr_replace(&dev_nums_idr, chip, chip->dev_num); - mutex_unlock(&idr_lock); - -+ return 0; -+ -+err_del_cdev: -+ cdev_device_del(&chip->cdev, &chip->dev); - return rc; - } - -@@ -474,13 +444,21 @@ static void tpm_del_char_device(struct tpm_chip *chip) - - /* Make the driver uncallable. */ - down_write(&chip->ops_sem); -- if (chip->flags & TPM_CHIP_FLAG_TPM2) { -- if (!tpm_chip_start(chip)) { -- tpm2_shutdown(chip, TPM2_SU_CLEAR); -- tpm_chip_stop(chip); -+ -+ /* -+ * Check if chip->ops is still valid: In case that the controller -+ * drivers shutdown handler unregisters the controller in its -+ * shutdown handler we are called twice and chip->ops to NULL. -+ */ -+ if (chip->ops) { -+ if (chip->flags & TPM_CHIP_FLAG_TPM2) { -+ if (!tpm_chip_start(chip)) { -+ tpm2_shutdown(chip, TPM2_SU_CLEAR); -+ tpm_chip_stop(chip); -+ } - } -+ chip->ops = NULL; - } -- chip->ops = NULL; - up_write(&chip->ops_sem); - } - -@@ -641,7 +619,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) - hwrng_unregister(&chip->hwrng); - tpm_bios_log_teardown(chip); - if (chip->flags & TPM_CHIP_FLAG_TPM2) -- cdev_device_del(&chip->cdevs, &chip->devs); -+ tpm_devs_remove(chip); - tpm_del_char_device(chip); - } - EXPORT_SYMBOL_GPL(tpm_chip_unregister); -diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c -index c08cbb306636b..dc4c0a0a51290 100644 ---- a/drivers/char/tpm/tpm-dev-common.c -+++ b/drivers/char/tpm/tpm-dev-common.c -@@ -69,7 +69,13 @@ static void tpm_dev_async_work(struct work_struct *work) - ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, - sizeof(priv->data_buffer)); - tpm_put_ops(priv->chip); -- if (ret > 0) { -+ -+ /* -+ * If ret is > 0 then tpm_dev_transmit returned the size of the -+ * response. If ret is < 0 then tpm_dev_transmit failed and -+ * returned an error code. -+ */ -+ if (ret != 0) { - priv->response_length = ret; - mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); - } -diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c -index 1621ce8187052..d69905233aff2 100644 ---- a/drivers/char/tpm/tpm-interface.c -+++ b/drivers/char/tpm/tpm-interface.c -@@ -401,13 +401,14 @@ int tpm_pm_suspend(struct device *dev) - !pm_suspend_via_firmware()) - goto suspended; - -- if (!tpm_chip_start(chip)) { -+ rc = tpm_try_get_ops(chip); -+ if (!rc) { - if (chip->flags & TPM_CHIP_FLAG_TPM2) - tpm2_shutdown(chip, TPM2_SU_STATE); - else - rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); - -- tpm_chip_stop(chip); -+ tpm_put_ops(chip); - } - - suspended: -diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h -index 283f78211c3a7..2163c6ee0d364 100644 ---- a/drivers/char/tpm/tpm.h -+++ b/drivers/char/tpm/tpm.h -@@ -234,6 +234,8 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, - size_t cmdsiz); - int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, - size_t *bufsiz); -+int tpm_devs_add(struct tpm_chip *chip); -+void tpm_devs_remove(struct tpm_chip *chip); - - void tpm_bios_log_setup(struct tpm_chip *chip); - void tpm_bios_log_teardown(struct tpm_chip *chip); -diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c -index a25815a6f6253..de92065394be9 100644 ---- a/drivers/char/tpm/tpm2-cmd.c -+++ b/drivers/char/tpm/tpm2-cmd.c -@@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, - if (!rc) { - out = (struct tpm2_get_cap_out *) - &buf.data[TPM_HEADER_SIZE]; -- *value = be32_to_cpu(out->value); -+ /* -+ * To prevent failing boot up of some systems, Infineon TPM2.0 -+ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also -+ * the TPM2_Getcapability command returns a zero length list -+ * in field upgrade mode. -+ */ -+ if (be32_to_cpu(out->property_cnt) > 0) -+ *value = be32_to_cpu(out->value); -+ else -+ rc = -ENODATA; - } - tpm_buf_destroy(&buf); - return rc; -diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c -index 784b8b3cb903f..ffb35f0154c16 100644 ---- a/drivers/char/tpm/tpm2-space.c -+++ b/drivers/char/tpm/tpm2-space.c -@@ -58,12 +58,12 @@ int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) - - void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) - { -- mutex_lock(&chip->tpm_mutex); -- if (!tpm_chip_start(chip)) { -+ -+ if (tpm_try_get_ops(chip) == 0) { - tpm2_flush_sessions(chip, space); -- tpm_chip_stop(chip); -+ tpm_put_ops(chip); - } -- mutex_unlock(&chip->tpm_mutex); -+ - kfree(space->context_buf); - kfree(space->session_buf); - } -@@ -455,6 +455,9 @@ static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp, - if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES) - return 0; - -+ if (be32_to_cpu(data->count) > (UINT_MAX - TPM_HEADER_SIZE - 9) / 4) -+ return -EFAULT; -+ - if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count)) - return -EFAULT; - -@@ -571,3 +574,68 @@ out: - dev_err(&chip->dev, "%s: error %d\n", __func__, rc); - return rc; - } -+ -+/* -+ * Put the reference to the main device. -+ */ -+static void tpm_devs_release(struct device *dev) -+{ -+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); -+ -+ /* release the master device reference */ -+ put_device(&chip->dev); -+} -+ -+/* -+ * Remove the device file for exposed TPM spaces and release the device -+ * reference. This may also release the reference to the master device. -+ */ -+void tpm_devs_remove(struct tpm_chip *chip) -+{ -+ cdev_device_del(&chip->cdevs, &chip->devs); -+ put_device(&chip->devs); -+} -+ -+/* -+ * Add a device file to expose TPM spaces. Also take a reference to the -+ * main device. -+ */ -+int tpm_devs_add(struct tpm_chip *chip) -+{ -+ int rc; -+ -+ device_initialize(&chip->devs); -+ chip->devs.parent = chip->dev.parent; -+ chip->devs.class = tpmrm_class; -+ -+ /* -+ * Get extra reference on main device to hold on behalf of devs. -+ * This holds the chip structure while cdevs is in use. The -+ * corresponding put is in the tpm_devs_release. -+ */ -+ get_device(&chip->dev); -+ chip->devs.release = tpm_devs_release; -+ chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); -+ cdev_init(&chip->cdevs, &tpmrm_fops); -+ chip->cdevs.owner = THIS_MODULE; -+ -+ rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); -+ if (rc) -+ goto err_put_devs; -+ -+ rc = cdev_device_add(&chip->cdevs, &chip->devs); -+ if (rc) { -+ dev_err(&chip->devs, -+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", -+ dev_name(&chip->devs), MAJOR(chip->devs.devt), -+ MINOR(chip->devs.devt), rc); -+ goto err_put_devs; -+ } -+ -+ return 0; -+ -+err_put_devs: -+ put_device(&chip->devs); -+ -+ return rc; -+} -diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c -index 18606651d1aa4..16fc481d60950 100644 ---- a/drivers/char/tpm/tpm_crb.c -+++ b/drivers/char/tpm/tpm_crb.c -@@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev, - iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); - if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, - TPM2_TIMEOUT_C)) { -- dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); -+ dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n"); - return -ETIME; - } - -@@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device) - - /* Should the FIFO driver handle this? */ - sm = buf->start_method; -- if (sm == ACPI_TPM2_MEMORY_MAPPED) -- return -ENODEV; -+ if (sm == ACPI_TPM2_MEMORY_MAPPED) { -+ rc = -ENODEV; -+ goto out; -+ } - - priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); -- if (!priv) -- return -ENOMEM; -+ if (!priv) { -+ rc = -ENOMEM; -+ goto out; -+ } - - if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { - if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { -@@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device) - FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", - buf->header.length, - ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); -- return -EINVAL; -+ rc = -EINVAL; -+ goto out; - } - crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); - priv->smc_func_id = crb_smc->smc_func_id; -@@ -700,17 +705,23 @@ static int crb_acpi_add(struct acpi_device *device) - - rc = crb_map_io(device, priv, buf); - if (rc) -- return rc; -+ goto out; - - chip = tpmm_chip_alloc(dev, &tpm_crb); -- if (IS_ERR(chip)) -- return PTR_ERR(chip); -+ if (IS_ERR(chip)) { -+ rc = PTR_ERR(chip); -+ goto out; -+ } - - dev_set_drvdata(&chip->dev, priv); - chip->acpi_dev_handle = device->handle; - chip->flags = TPM_CHIP_FLAG_TPM2; - -- return tpm_chip_register(chip); -+ rc = tpm_chip_register(chip); -+ -+out: -+ acpi_put_table((struct acpi_table_header *)buf); -+ return rc; - } - - static int crb_acpi_remove(struct acpi_device *device) -diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c -index 6e3235565a4d8..d9daaafdd295c 100644 ---- a/drivers/char/tpm/tpm_ftpm_tee.c -+++ b/drivers/char/tpm/tpm_ftpm_tee.c -@@ -397,7 +397,13 @@ static int __init ftpm_mod_init(void) - if (rc) - return rc; - -- return driver_register(&ftpm_tee_driver.driver); -+ rc = driver_register(&ftpm_tee_driver.driver); -+ if (rc) { -+ platform_driver_unregister(&ftpm_tee_plat_driver); -+ return rc; -+ } -+ -+ return 0; - } - - static void __exit ftpm_mod_exit(void) -diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c -index 3af4c07a9342f..d3989b257f422 100644 ---- a/drivers/char/tpm/tpm_ibmvtpm.c -+++ b/drivers/char/tpm/tpm_ibmvtpm.c -@@ -681,6 +681,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, - if (!wait_event_timeout(ibmvtpm->crq_queue.wq, - ibmvtpm->rtce_buf != NULL, - HZ)) { -+ rc = -ENODEV; - dev_err(dev, "CRQ response timed out\n"); - goto init_irq_cleanup; - } -diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c -index d3f2e5364c275..dfb463ee7ca1a 100644 ---- a/drivers/char/tpm/tpm_tis.c -+++ b/drivers/char/tpm/tpm_tis.c -@@ -83,6 +83,22 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = { - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"), - }, - }, -+ { -+ .callback = tpm_tis_disable_irq, -+ .ident = "ThinkStation P360 Tiny", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"), -+ }, -+ }, -+ { -+ .callback = tpm_tis_disable_irq, -+ .ident = "ThinkPad L490", -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"), -+ }, -+ }, - {} - }; - -@@ -125,6 +141,7 @@ static int check_acpi_tpm2(struct device *dev) - const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev); - struct acpi_table_tpm2 *tbl; - acpi_status st; -+ int ret = 0; - - if (!aid || aid->driver_data != DEVICE_IS_TPM2) - return 0; -@@ -132,8 +149,7 @@ static int check_acpi_tpm2(struct device *dev) - /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2 - * table is mandatory - */ -- st = -- acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); -+ st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); - if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { - dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); - return -EINVAL; -@@ -141,9 +157,10 @@ static int check_acpi_tpm2(struct device *dev) - - /* The tpm2_crb driver handles this device */ - if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) -- return -ENODEV; -+ ret = -ENODEV; - -- return 0; -+ acpi_put_table((struct acpi_table_header *)tbl); -+ return ret; - } - #else - static int check_acpi_tpm2(struct device *dev) -diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c -index 69579efb247b3..d7c440ac465f3 100644 ---- a/drivers/char/tpm/tpm_tis_core.c -+++ b/drivers/char/tpm/tpm_tis_core.c -@@ -48,6 +48,7 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, - unsigned long timeout, wait_queue_head_t *queue, - bool check_cancel) - { -+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); - unsigned long stop; - long rc; - u8 status; -@@ -80,8 +81,8 @@ again: - } - } else { - do { -- usleep_range(TPM_TIMEOUT_USECS_MIN, -- TPM_TIMEOUT_USECS_MAX); -+ usleep_range(priv->timeout_min, -+ priv->timeout_max); - status = chip->ops->status(chip); - if ((status & mask) == mask) - return 0; -@@ -135,16 +136,27 @@ static bool check_locality(struct tpm_chip *chip, int l) - return false; - } - --static int release_locality(struct tpm_chip *chip, int l) -+static int __tpm_tis_relinquish_locality(struct tpm_tis_data *priv, int l) -+{ -+ tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); -+ -+ return 0; -+} -+ -+static int tpm_tis_relinquish_locality(struct tpm_chip *chip, int l) - { - struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); - -- tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); -+ mutex_lock(&priv->locality_count_mutex); -+ priv->locality_count--; -+ if (priv->locality_count == 0) -+ __tpm_tis_relinquish_locality(priv, l); -+ mutex_unlock(&priv->locality_count_mutex); - - return 0; - } - --static int request_locality(struct tpm_chip *chip, int l) -+static int __tpm_tis_request_locality(struct tpm_chip *chip, int l) - { - struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); - unsigned long stop, timeout; -@@ -185,6 +197,20 @@ again: - return -1; - } - -+static int tpm_tis_request_locality(struct tpm_chip *chip, int l) -+{ -+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); -+ int ret = 0; -+ -+ mutex_lock(&priv->locality_count_mutex); -+ if (priv->locality_count == 0) -+ ret = __tpm_tis_request_locality(chip, l); -+ if (!ret) -+ priv->locality_count++; -+ mutex_unlock(&priv->locality_count_mutex); -+ return ret; -+} -+ - static u8 tpm_tis_status(struct tpm_chip *chip) - { - struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); -@@ -288,6 +314,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) - int size = 0; - int status; - u32 expected; -+ int rc; - - if (count < TPM_HEADER_SIZE) { - size = -EIO; -@@ -307,8 +334,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) - goto out; - } - -- size += recv_data(chip, &buf[TPM_HEADER_SIZE], -- expected - TPM_HEADER_SIZE); -+ rc = recv_data(chip, &buf[TPM_HEADER_SIZE], -+ expected - TPM_HEADER_SIZE); -+ if (rc < 0) { -+ size = rc; -+ goto out; -+ } -+ size += rc; - if (size < expected) { - dev_err(&chip->dev, "Unable to read remainder of result\n"); - size = -ETIME; -@@ -637,7 +669,7 @@ static int probe_itpm(struct tpm_chip *chip) - if (vendor != TPM_VID_INTEL) - return 0; - -- if (request_locality(chip, 0) != 0) -+ if (tpm_tis_request_locality(chip, 0) != 0) - return -EBUSY; - - rc = tpm_tis_send_data(chip, cmd_getticks, len); -@@ -658,7 +690,7 @@ static int probe_itpm(struct tpm_chip *chip) - - out: - tpm_tis_ready(chip); -- release_locality(chip, priv->locality); -+ tpm_tis_relinquish_locality(chip, priv->locality); - - return rc; - } -@@ -705,7 +737,9 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id) - wake_up_interruptible(&priv->int_queue); - - /* Clear interrupts handled with TPM_EOI */ -+ tpm_tis_request_locality(chip, 0); - rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt); -+ tpm_tis_relinquish_locality(chip, 0); - if (rc < 0) - return IRQ_NONE; - -@@ -713,25 +747,17 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id) - return IRQ_HANDLED; - } - --static int tpm_tis_gen_interrupt(struct tpm_chip *chip) -+static void tpm_tis_gen_interrupt(struct tpm_chip *chip) - { - const char *desc = "attempting to generate an interrupt"; - u32 cap2; - cap_t cap; - int ret; - -- ret = request_locality(chip, 0); -- if (ret < 0) -- return ret; -- - if (chip->flags & TPM_CHIP_FLAG_TPM2) - ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc); - else - ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0); -- -- release_locality(chip, 0); -- -- return ret; - } - - /* Register the IRQ and issue a command that will cause an interrupt. If an -@@ -746,60 +772,66 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, - int rc; - u32 int_status; - -- if (devm_request_irq(chip->dev.parent, irq, tis_int_handler, flags, -- dev_name(&chip->dev), chip) != 0) { -+ -+ rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL, -+ tis_int_handler, IRQF_ONESHOT | flags, -+ dev_name(&chip->dev), chip); -+ if (rc) { - dev_info(&chip->dev, "Unable to request irq: %d for probe\n", - irq); - return -1; - } - priv->irq = irq; - -+ rc = tpm_tis_request_locality(chip, 0); -+ if (rc < 0) -+ return rc; -+ - rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality), - &original_int_vec); -- if (rc < 0) -+ if (rc < 0) { -+ tpm_tis_relinquish_locality(chip, priv->locality); - return rc; -+ } - - rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq); - if (rc < 0) -- return rc; -+ goto restore_irqs; - - rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status); - if (rc < 0) -- return rc; -+ goto restore_irqs; - - /* Clear all existing */ - rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status); - if (rc < 0) -- return rc; -- -+ goto restore_irqs; - /* Turn on */ - rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), - intmask | TPM_GLOBAL_INT_ENABLE); - if (rc < 0) -- return rc; -+ goto restore_irqs; - - priv->irq_tested = false; - - /* Generate an interrupt by having the core call through to - * tpm_tis_send - */ -- rc = tpm_tis_gen_interrupt(chip); -- if (rc < 0) -- return rc; -+ tpm_tis_gen_interrupt(chip); - -+restore_irqs: - /* tpm_tis_send will either confirm the interrupt is working or it - * will call disable_irq which undoes all of the above. - */ - if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { -- rc = tpm_tis_write8(priv, original_int_vec, -- TPM_INT_VECTOR(priv->locality)); -- if (rc < 0) -- return rc; -- -- return 1; -+ tpm_tis_write8(priv, original_int_vec, -+ TPM_INT_VECTOR(priv->locality)); -+ rc = -1; - } - -- return 0; -+ tpm_tis_relinquish_locality(chip, priv->locality); -+ -+ return rc; - } - - /* Try to find the IRQ the TPM is using. This is for legacy x86 systems that -@@ -913,8 +945,8 @@ static const struct tpm_class_ops tpm_tis = { - .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, - .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, - .req_canceled = tpm_tis_req_canceled, -- .request_locality = request_locality, -- .relinquish_locality = release_locality, -+ .request_locality = tpm_tis_request_locality, -+ .relinquish_locality = tpm_tis_relinquish_locality, - .clk_enable = tpm_tis_clkrun_enable, - }; - -@@ -945,9 +977,26 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, - chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX); - chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX); - chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX); -+ priv->timeout_min = TPM_TIMEOUT_USECS_MIN; -+ priv->timeout_max = TPM_TIMEOUT_USECS_MAX; - priv->phy_ops = phy_ops; -+ priv->locality_count = 0; -+ mutex_init(&priv->locality_count_mutex); -+ - dev_set_drvdata(&chip->dev, priv); - -+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); -+ if (rc < 0) -+ return rc; -+ -+ priv->manufacturer_id = vendor; -+ -+ if (priv->manufacturer_id == TPM_VID_ATML && -+ !(chip->flags & TPM_CHIP_FLAG_TPM2)) { -+ priv->timeout_min = TIS_TIMEOUT_MIN_ATML; -+ priv->timeout_max = TIS_TIMEOUT_MAX_ATML; -+ } -+ - if (is_bsw()) { - priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, - ILB_REMAP_SIZE); -@@ -978,7 +1027,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, - intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | - TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; - intmask &= ~TPM_GLOBAL_INT_ENABLE; -+ -+ rc = tpm_tis_request_locality(chip, 0); -+ if (rc < 0) { -+ rc = -ENODEV; -+ goto out_err; -+ } -+ - tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); -+ tpm_tis_relinquish_locality(chip, 0); - - rc = tpm_chip_start(chip); - if (rc) -@@ -988,12 +1045,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, - if (rc) - goto out_err; - -- rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); -- if (rc < 0) -- goto out_err; -- -- priv->manufacturer_id = vendor; -- - rc = tpm_tis_read8(priv, TPM_RID(0), &rid); - if (rc < 0) - goto out_err; -@@ -1044,13 +1095,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, - * proper timeouts for the driver. - */ - -- rc = request_locality(chip, 0); -+ rc = tpm_tis_request_locality(chip, 0); - if (rc < 0) - goto out_err; - - rc = tpm_get_timeouts(chip); - -- release_locality(chip, 0); -+ tpm_tis_relinquish_locality(chip, 0); - - if (rc) { - dev_err(dev, "Could not get TPM timeouts and durations\n"); -@@ -1058,17 +1109,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, - goto out_err; - } - -- if (irq) { -+ if (irq) - tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, - irq); -- if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { -- dev_err(&chip->dev, FW_BUG -+ else -+ tpm_tis_probe_irq(chip, intmask); -+ -+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) { -+ dev_err(&chip->dev, FW_BUG - "TPM interrupt not working, polling instead\n"); - -- disable_interrupts(chip); -- } -- } else { -- tpm_tis_probe_irq(chip, intmask); -+ rc = tpm_tis_request_locality(chip, 0); -+ if (rc < 0) -+ goto out_err; -+ disable_interrupts(chip); -+ tpm_tis_relinquish_locality(chip, 0); - } - } - -@@ -1129,28 +1184,27 @@ int tpm_tis_resume(struct device *dev) - struct tpm_chip *chip = dev_get_drvdata(dev); - int ret; - -+ ret = tpm_tis_request_locality(chip, 0); -+ if (ret < 0) -+ return ret; -+ - if (chip->flags & TPM_CHIP_FLAG_IRQ) - tpm_tis_reenable_interrupts(chip); - - ret = tpm_pm_resume(dev); - if (ret) -- return ret; -+ goto out; - - /* - * TPM 1.2 requires self-test on resume. This function actually returns - * an error code but for unknown reason it isn't handled. - */ -- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { -- ret = request_locality(chip, 0); -- if (ret < 0) -- return ret; -- -+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) - tpm1_do_selftest(chip); -+out: -+ tpm_tis_relinquish_locality(chip, 0); - -- release_locality(chip, 0); -- } -- -- return 0; -+ return ret; - } - EXPORT_SYMBOL_GPL(tpm_tis_resume); - #endif -diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h -index b2a3c6c72882d..464ed352ab2e8 100644 ---- a/drivers/char/tpm/tpm_tis_core.h -+++ b/drivers/char/tpm/tpm_tis_core.h -@@ -54,6 +54,8 @@ enum tis_defaults { - TIS_MEM_LEN = 0x5000, - TIS_SHORT_TIMEOUT = 750, /* ms */ - TIS_LONG_TIMEOUT = 2000, /* 2 sec */ -+ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */ -+ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */ - }; - - /* Some timeout values are needed before it is known whether the chip is -@@ -88,6 +90,8 @@ enum tpm_tis_flags { - - struct tpm_tis_data { - u16 manufacturer_id; -+ struct mutex locality_count_mutex; -+ unsigned int locality_count; - int locality; - int irq; - bool irq_tested; -@@ -98,6 +102,8 @@ struct tpm_tis_data { - wait_queue_head_t read_queue; - const struct tpm_tis_phy_ops *phy_ops; - unsigned short rng_quality; -+ unsigned int timeout_min; /* usecs */ -+ unsigned int timeout_max; /* usecs */ - }; - - struct tpm_tis_phy_ops { -diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c -index c892781037037..e2ab6a329732b 100644 ---- a/drivers/char/tpm/tpm_tis_i2c_cr50.c -+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c -@@ -754,8 +754,8 @@ static int tpm_cr50_i2c_remove(struct i2c_client *client) - struct device *dev = &client->dev; - - if (!chip) { -- dev_err(dev, "Could not get client data at remove\n"); -- return -ENODEV; -+ dev_crit(dev, "Could not get client data at remove, memory corruption ahead\n"); -+ return 0; - } - - tpm_chip_unregister(chip); -diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c -index 54584b4b00d19..aaa59a00eeaef 100644 ---- a/drivers/char/tpm/tpm_tis_spi_main.c -+++ b/drivers/char/tpm/tpm_tis_spi_main.c -@@ -267,6 +267,7 @@ static const struct spi_device_id tpm_tis_spi_id[] = { - { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe }, - { "slb9670", (unsigned long)tpm_tis_spi_probe }, - { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe }, -+ { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe }, - { "cr50", (unsigned long)cr50_spi_probe }, - {} - }; -diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c -index 91c772e38bb54..ff2ec71d592ef 100644 ---- a/drivers/char/tpm/tpm_vtpm_proxy.c -+++ b/drivers/char/tpm/tpm_vtpm_proxy.c -@@ -683,37 +683,21 @@ static struct miscdevice vtpmx_miscdev = { - .fops = &vtpmx_fops, - }; - --static int vtpmx_init(void) --{ -- return misc_register(&vtpmx_miscdev); --} -- --static void vtpmx_cleanup(void) --{ -- misc_deregister(&vtpmx_miscdev); --} -- - static int __init vtpm_module_init(void) - { - int rc; - -- rc = vtpmx_init(); -- if (rc) { -- pr_err("couldn't create vtpmx device\n"); -- return rc; -- } -- - workqueue = create_workqueue("tpm-vtpm"); - if (!workqueue) { - pr_err("couldn't create workqueue\n"); -- rc = -ENOMEM; -- goto err_vtpmx_cleanup; -+ return -ENOMEM; - } - -- return 0; -- --err_vtpmx_cleanup: -- vtpmx_cleanup(); -+ rc = misc_register(&vtpmx_miscdev); -+ if (rc) { -+ pr_err("couldn't create vtpmx device\n"); -+ destroy_workqueue(workqueue); -+ } - - return rc; - } -@@ -721,7 +705,7 @@ err_vtpmx_cleanup: - static void __exit vtpm_module_exit(void) - { - destroy_workqueue(workqueue); -- vtpmx_cleanup(); -+ misc_deregister(&vtpmx_miscdev); - } - - module_init(vtpm_module_init); -diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c -index 7eaf303a7a86f..77bc993d75130 100644 ---- a/drivers/char/virtio_console.c -+++ b/drivers/char/virtio_console.c -@@ -1956,6 +1956,13 @@ static void virtcons_remove(struct virtio_device *vdev) - list_del(&portdev->list); - spin_unlock_irq(&pdrvdata_lock); - -+ /* Device is going away, exit any polling for buffers */ -+ virtio_break_device(vdev); -+ if (use_multiport(portdev)) -+ flush_work(&portdev->control_work); -+ else -+ flush_work(&portdev->config_work); -+ - /* Disable interrupts for vqs */ - vdev->config->reset(vdev); - /* Finish up work that's lined up */ -@@ -2229,7 +2236,7 @@ static struct virtio_driver virtio_rproc_serial = { - .remove = virtcons_remove, - }; - --static int __init init(void) -+static int __init virtio_console_init(void) - { - int err; - -@@ -2264,7 +2271,7 @@ free: - return err; - } - --static void __exit fini(void) -+static void __exit virtio_console_fini(void) - { - reclaim_dma_bufs(); - -@@ -2274,8 +2281,8 @@ static void __exit fini(void) - class_destroy(pdrvdata.class); - debugfs_remove_recursive(pdrvdata.debugfs_dir); - } --module_init(init); --module_exit(fini); -+module_init(virtio_console_init); -+module_exit(virtio_console_fini); - - MODULE_DESCRIPTION("Virtio console driver"); - MODULE_LICENSE("GPL"); -diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c -index e7f88f35c7028..39bcbfd908b46 100644 ---- a/drivers/char/xillybus/xillyusb.c -+++ b/drivers/char/xillybus/xillyusb.c -@@ -549,6 +549,7 @@ static void cleanup_dev(struct kref *kref) - if (xdev->workq) - destroy_workqueue(xdev->workq); - -+ usb_put_dev(xdev->udev); - kfree(xdev->channels); /* Argument may be NULL, and that's fine */ - kfree(xdev); - } -@@ -1912,6 +1913,7 @@ static int xillyusb_setup_base_eps(struct xillyusb_dev *xdev) - - dealloc: - endpoint_dealloc(xdev->msg_ep); /* Also frees FIFO mem if allocated */ -+ xdev->msg_ep = NULL; - return -ENOMEM; - } - -diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig -index c5b3dc97396a6..d12465c227514 100644 ---- a/drivers/clk/Kconfig -+++ b/drivers/clk/Kconfig -@@ -83,7 +83,7 @@ config COMMON_CLK_RK808 - config COMMON_CLK_HI655X - tristate "Clock driver for Hi655x" if EXPERT - depends on (MFD_HI655X_PMIC || COMPILE_TEST) -- depends on REGMAP -+ select REGMAP - default MFD_HI655X_PMIC - help - This driver supports the hi655x PMIC clock. This -@@ -380,6 +380,7 @@ config COMMON_CLK_BD718XX - config COMMON_CLK_FIXED_MMIO - bool "Clock driver for Memory Mapped Fixed values" - depends on COMMON_CLK && OF -+ depends on HAS_IOMEM - help - Support for Memory Mapped IO Fixed clocks - -diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c -index a2f34d13fb543..6ea7da1d6d755 100644 ---- a/drivers/clk/actions/owl-s700.c -+++ b/drivers/clk/actions/owl-s700.c -@@ -162,6 +162,7 @@ static struct clk_div_table hdmia_div_table[] = { - - static struct clk_div_table rmii_div_table[] = { - {0, 4}, {1, 10}, -+ {0, 0} - }; - - /* divider clocks */ -diff --git a/drivers/clk/actions/owl-s900.c b/drivers/clk/actions/owl-s900.c -index 790890978424a..5144ada2c7e1a 100644 ---- a/drivers/clk/actions/owl-s900.c -+++ b/drivers/clk/actions/owl-s900.c -@@ -140,7 +140,7 @@ static struct clk_div_table rmii_ref_div_table[] = { - - static struct clk_div_table usb3_mac_div_table[] = { - { 1, 2 }, { 2, 3 }, { 3, 4 }, -- { 0, 8 }, -+ { 0, 0 } - }; - - static struct clk_div_table i2s_div_table[] = { -diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c -index 428a6f4b9ebc5..8d36e615cd9dd 100644 ---- a/drivers/clk/at91/at91rm9200.c -+++ b/drivers/clk/at91/at91rm9200.c -@@ -40,7 +40,7 @@ static const struct clk_pll_characteristics rm9200_pll_characteristics = { - }; - - static const struct sck at91rm9200_systemck[] = { -- { .n = "udpck", .p = "usbck", .id = 2 }, -+ { .n = "udpck", .p = "usbck", .id = 1 }, - { .n = "uhpck", .p = "usbck", .id = 4 }, - { .n = "pck0", .p = "prog0", .id = 8 }, - { .n = "pck1", .p = "prog1", .id = 9 }, -diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c -index b656d25a97678..fe772baeb15ff 100644 ---- a/drivers/clk/at91/clk-generated.c -+++ b/drivers/clk/at91/clk-generated.c -@@ -106,6 +106,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req, - tmp_rate = parent_rate; - else - tmp_rate = parent_rate / div; -+ -+ if (tmp_rate < req->min_rate || tmp_rate > req->max_rate) -+ return; -+ - tmp_diff = abs(req->rate - tmp_rate); - - if (*best_diff < 0 || *best_diff >= tmp_diff) { -diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c -index a80427980bf73..04d0dd8385945 100644 ---- a/drivers/clk/at91/clk-master.c -+++ b/drivers/clk/at91/clk-master.c -@@ -280,7 +280,7 @@ static int clk_master_pres_set_rate(struct clk_hw *hw, unsigned long rate, - - else if (pres == 3) - pres = MASTER_PRES_MAX; -- else -+ else if (pres) - pres = ffs(pres) - 1; - - spin_lock_irqsave(master->lock, flags); -@@ -309,7 +309,7 @@ static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw, - spin_unlock_irqrestore(master->lock, flags); - - pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK; -- if (pres == 3 && characteristics->have_div3_pres) -+ if (pres == MASTER_PRES_MAX && characteristics->have_div3_pres) - pres = 3; - else - pres = (1 << pres); -@@ -610,7 +610,7 @@ static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate, - - if (div == 3) - div = MASTER_PRES_MAX; -- else -+ else if (div) - div = ffs(div) - 1; - - spin_lock_irqsave(master->lock, flags); -diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c -index 34e3ab13741ac..a6600afa21454 100644 ---- a/drivers/clk/at91/clk-sam9x60-pll.c -+++ b/drivers/clk/at91/clk-sam9x60-pll.c -@@ -71,8 +71,8 @@ static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw, - struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); - struct sam9x60_frac *frac = to_sam9x60_frac(core); - -- return (parent_rate * (frac->mul + 1) + -- ((u64)parent_rate * frac->frac >> 22)); -+ return parent_rate * (frac->mul + 1) + -+ DIV_ROUND_CLOSEST_ULL((u64)parent_rate * frac->frac, (1 << 22)); - } - - static int sam9x60_frac_pll_prepare(struct clk_hw *hw) -@@ -561,7 +561,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock, - - ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN, - parent_rate, true); -- if (ret <= 0) { -+ if (ret < 0) { - hw = ERR_PTR(ret); - goto free; - } -diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c -index 20ee9dccee787..b40035b011d0a 100644 ---- a/drivers/clk/at91/pmc.c -+++ b/drivers/clk/at91/pmc.c -@@ -267,6 +267,11 @@ static int __init pmc_register_ops(void) - if (!np) - return -ENODEV; - -+ if (!of_device_is_available(np)) { -+ of_node_put(np); -+ return -ENODEV; -+ } -+ - pmcreg = device_node_to_regmap(np); - of_node_put(np); - if (IS_ERR(pmcreg)) -diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c -index cf8c079aa086a..9eed97a299d0f 100644 ---- a/drivers/clk/at91/sama7g5.c -+++ b/drivers/clk/at91/sama7g5.c -@@ -687,16 +687,16 @@ static const struct { - { .n = "pdmc0_gclk", - .id = 68, - .r = { .max = 50000000 }, -- .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, -- .pp_mux_table = { 5, 8, }, -+ .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, -+ .pp_mux_table = { 5, 9, }, - .pp_count = 2, - .pp_chg_id = INT_MIN, }, - - { .n = "pdmc1_gclk", - .id = 69, - .r = { .max = 50000000, }, -- .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, -- .pp_mux_table = { 5, 8, }, -+ .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, -+ .pp_mux_table = { 5, 9, }, - .pp_count = 2, - .pp_chg_id = INT_MIN, }, - -@@ -982,16 +982,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np) - } - - parent_names[0] = "cpupll_divpmcck"; -- hw = at91_clk_register_master_pres(regmap, "cpuck", 1, parent_names, -- &mck0_layout, &mck0_characteristics, -- &pmc_mck0_lock, -- CLK_SET_RATE_PARENT, 0); -- if (IS_ERR(hw)) -- goto err_free; -- -- sama7g5_pmc->chws[PMC_CPU] = hw; -- -- hw = at91_clk_register_master_div(regmap, "mck0", "cpuck", -+ hw = at91_clk_register_master_div(regmap, "mck0", "cpupll_divpmcck", - &mck0_layout, &mck0_characteristics, - &pmc_mck0_lock, 0); - if (IS_ERR(hw)) -diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c -index 4062092d67f90..a6642f3d33d44 100644 ---- a/drivers/clk/baikal-t1/ccu-div.c -+++ b/drivers/clk/baikal-t1/ccu-div.c -@@ -34,6 +34,7 @@ - #define CCU_DIV_CTL_CLKDIV_MASK(_width) \ - GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD) - #define CCU_DIV_CTL_LOCK_SHIFTED BIT(27) -+#define CCU_DIV_CTL_GATE_REF_BUF BIT(28) - #define CCU_DIV_CTL_LOCK_NORMAL BIT(31) - - #define CCU_DIV_RST_DELAY_US 1 -@@ -170,6 +171,40 @@ static int ccu_div_gate_is_enabled(struct clk_hw *hw) - return !!(val & CCU_DIV_CTL_EN); - } - -+static int ccu_div_buf_enable(struct clk_hw *hw) -+{ -+ struct ccu_div *div = to_ccu_div(hw); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&div->lock, flags); -+ regmap_update_bits(div->sys_regs, div->reg_ctl, -+ CCU_DIV_CTL_GATE_REF_BUF, 0); -+ spin_unlock_irqrestore(&div->lock, flags); -+ -+ return 0; -+} -+ -+static void ccu_div_buf_disable(struct clk_hw *hw) -+{ -+ struct ccu_div *div = to_ccu_div(hw); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&div->lock, flags); -+ regmap_update_bits(div->sys_regs, div->reg_ctl, -+ CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF); -+ spin_unlock_irqrestore(&div->lock, flags); -+} -+ -+static int ccu_div_buf_is_enabled(struct clk_hw *hw) -+{ -+ struct ccu_div *div = to_ccu_div(hw); -+ u32 val = 0; -+ -+ regmap_read(div->sys_regs, div->reg_ctl, &val); -+ -+ return !(val & CCU_DIV_CTL_GATE_REF_BUF); -+} -+ - static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) - { -@@ -323,6 +358,7 @@ static const struct ccu_div_dbgfs_bit ccu_div_bits[] = { - CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN), - CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST), - CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV), -+ CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF), - CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL) - }; - -@@ -441,6 +477,9 @@ static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry) - continue; - } - -+ if (!strcmp("div_buf", name)) -+ continue; -+ - bits[didx] = ccu_div_bits[bidx]; - bits[didx].div = div; - -@@ -477,6 +516,21 @@ static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry) - &ccu_div_dbgfs_fixed_clkdiv_fops); - } - -+static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry) -+{ -+ struct ccu_div *div = to_ccu_div(hw); -+ struct ccu_div_dbgfs_bit *bit; -+ -+ bit = kmalloc(sizeof(*bit), GFP_KERNEL); -+ if (!bit) -+ return; -+ -+ *bit = ccu_div_bits[3]; -+ bit->div = div; -+ debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit, -+ &ccu_div_dbgfs_bit_fops); -+} -+ - static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry) - { - struct ccu_div *div = to_ccu_div(hw); -@@ -489,6 +543,7 @@ static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry) - - #define ccu_div_var_debug_init NULL - #define ccu_div_gate_debug_init NULL -+#define ccu_div_buf_debug_init NULL - #define ccu_div_fixed_debug_init NULL - - #endif /* !CONFIG_DEBUG_FS */ -@@ -520,6 +575,13 @@ static const struct clk_ops ccu_div_gate_ops = { - .debug_init = ccu_div_gate_debug_init - }; - -+static const struct clk_ops ccu_div_buf_ops = { -+ .enable = ccu_div_buf_enable, -+ .disable = ccu_div_buf_disable, -+ .is_enabled = ccu_div_buf_is_enabled, -+ .debug_init = ccu_div_buf_debug_init -+}; -+ - static const struct clk_ops ccu_div_fixed_ops = { - .recalc_rate = ccu_div_fixed_recalc_rate, - .round_rate = ccu_div_fixed_round_rate, -@@ -566,6 +628,8 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init) - } else if (div_init->type == CCU_DIV_GATE) { - hw_init.ops = &ccu_div_gate_ops; - div->divider = div_init->divider; -+ } else if (div_init->type == CCU_DIV_BUF) { -+ hw_init.ops = &ccu_div_buf_ops; - } else if (div_init->type == CCU_DIV_FIXED) { - hw_init.ops = &ccu_div_fixed_ops; - div->divider = div_init->divider; -@@ -579,6 +643,7 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init) - goto err_free_div; - } - parent_data.fw_name = div_init->parent_name; -+ parent_data.name = div_init->parent_name; - hw_init.parent_data = &parent_data; - hw_init.num_parents = 1; - -diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h -index 795665caefbdc..4eb49ff4803c6 100644 ---- a/drivers/clk/baikal-t1/ccu-div.h -+++ b/drivers/clk/baikal-t1/ccu-div.h -@@ -13,6 +13,14 @@ - #include - #include - -+/* -+ * CCU Divider private clock IDs -+ * @CCU_SYS_SATA_CLK: CCU SATA internal clock -+ * @CCU_SYS_XGMAC_CLK: CCU XGMAC internal clock -+ */ -+#define CCU_SYS_SATA_CLK -1 -+#define CCU_SYS_XGMAC_CLK -2 -+ - /* - * CCU Divider private flags - * @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1. -@@ -31,11 +39,13 @@ - * enum ccu_div_type - CCU Divider types - * @CCU_DIV_VAR: Clocks gate with variable divider. - * @CCU_DIV_GATE: Clocks gate with fixed divider. -+ * @CCU_DIV_BUF: Clock gate with no divider. - * @CCU_DIV_FIXED: Ungateable clock with fixed divider. - */ - enum ccu_div_type { - CCU_DIV_VAR, - CCU_DIV_GATE, -+ CCU_DIV_BUF, - CCU_DIV_FIXED - }; - -diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c -index f141fda12b09a..90f4fda406ee6 100644 ---- a/drivers/clk/baikal-t1/clk-ccu-div.c -+++ b/drivers/clk/baikal-t1/clk-ccu-div.c -@@ -76,6 +76,16 @@ - .divider = _divider \ - } - -+#define CCU_DIV_BUF_INFO(_id, _name, _pname, _base, _flags) \ -+ { \ -+ .id = _id, \ -+ .name = _name, \ -+ .parent_name = _pname, \ -+ .base = _base, \ -+ .type = CCU_DIV_BUF, \ -+ .flags = _flags \ -+ } -+ - #define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \ - { \ - .id = _id, \ -@@ -188,11 +198,14 @@ static const struct ccu_div_rst_map axi_rst_map[] = { - * for the SoC devices registers IO-operations. - */ - static const struct ccu_div_info sys_info[] = { -- CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk", -+ CCU_DIV_VAR_INFO(CCU_SYS_SATA_CLK, "sys_sata_clk", - "sata_clk", CCU_SYS_SATA_REF_BASE, 4, - CLK_SET_RATE_GATE, - CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED | - CCU_DIV_RESET_DOMAIN), -+ CCU_DIV_BUF_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk", -+ "sys_sata_clk", CCU_SYS_SATA_REF_BASE, -+ CLK_SET_RATE_PARENT), - CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk", - "pcie_clk", CCU_SYS_APB_BASE, 5, - CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN), -@@ -204,10 +217,12 @@ static const struct ccu_div_info sys_info[] = { - "eth_clk", CCU_SYS_GMAC1_BASE, 5), - CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk", - "eth_clk", 10), -- CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk", -- "eth_clk", CCU_SYS_XGMAC_BASE, 8), -+ CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_CLK, "sys_xgmac_clk", -+ "eth_clk", CCU_SYS_XGMAC_BASE, 1), -+ CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk", -+ "sys_xgmac_clk", 8), - CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk", -- "eth_clk", 10), -+ "sys_xgmac_clk", 8), - CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk", - "eth_clk", CCU_SYS_USB_BASE, 10), - CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk", -@@ -396,6 +411,9 @@ static int ccu_div_clk_register(struct ccu_div_data *data) - init.base = info->base; - init.sys_regs = data->sys_regs; - init.divider = info->divider; -+ } else if (init.type == CCU_DIV_BUF) { -+ init.base = info->base; -+ init.sys_regs = data->sys_regs; - } else { - init.divider = info->divider; - } -diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c -index a254512965eb8..141ce19bc5700 100644 ---- a/drivers/clk/bcm/clk-bcm2835.c -+++ b/drivers/clk/bcm/clk-bcm2835.c -@@ -30,6 +30,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -502,6 +503,8 @@ struct bcm2835_clock_data { - bool low_jitter; - - u32 tcnt_mux; -+ -+ bool round_up; - }; - - struct bcm2835_gate_data { -@@ -932,8 +935,7 @@ static int bcm2835_clock_is_on(struct clk_hw *hw) - - static u32 bcm2835_clock_choose_div(struct clk_hw *hw, - unsigned long rate, -- unsigned long parent_rate, -- bool round_up) -+ unsigned long parent_rate) - { - struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); - const struct bcm2835_clock_data *data = clock->data; -@@ -945,10 +947,6 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw, - - rem = do_div(temp, rate); - div = temp; -- -- /* Round up and mask off the unused bits */ -- if (round_up && ((div & unused_frac_mask) != 0 || rem != 0)) -- div += unused_frac_mask + 1; - div &= ~unused_frac_mask; - - /* different clamping limits apply for a mash clock */ -@@ -972,9 +970,9 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw, - return div; - } - --static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock, -- unsigned long parent_rate, -- u32 div) -+static unsigned long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock, -+ unsigned long parent_rate, -+ u32 div) - { - const struct bcm2835_clock_data *data = clock->data; - u64 temp; -@@ -999,12 +997,34 @@ static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock, - return temp; - } - -+static unsigned long bcm2835_round_rate(unsigned long rate) -+{ -+ unsigned long scaler; -+ unsigned long limit; -+ -+ limit = rate / 100000; -+ -+ scaler = 1; -+ while (scaler < limit) -+ scaler *= 10; -+ -+ /* -+ * If increasing a clock by less than 0.1% changes it -+ * from ..999.. to ..000.., round up. -+ */ -+ if ((rate + scaler - 1) / scaler % 1000 == 0) -+ rate = roundup(rate, scaler); -+ -+ return rate; -+} -+ - static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw, - unsigned long parent_rate) - { - struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); - struct bcm2835_cprman *cprman = clock->cprman; - const struct bcm2835_clock_data *data = clock->data; -+ unsigned long rate; - u32 div; - - if (data->int_bits == 0 && data->frac_bits == 0) -@@ -1012,7 +1032,12 @@ static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw, - - div = cprman_read(cprman, data->div_reg); - -- return bcm2835_clock_rate_from_divisor(clock, parent_rate, div); -+ rate = bcm2835_clock_rate_from_divisor(clock, parent_rate, div); -+ -+ if (data->round_up) -+ rate = bcm2835_round_rate(rate); -+ -+ return rate; - } - - static void bcm2835_clock_wait_busy(struct bcm2835_clock *clock) -@@ -1079,7 +1104,7 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw, - struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); - struct bcm2835_cprman *cprman = clock->cprman; - const struct bcm2835_clock_data *data = clock->data; -- u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate, false); -+ u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate); - u32 ctl; - - spin_lock(&cprman->regs_lock); -@@ -1130,7 +1155,7 @@ static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw, - - if (!(BIT(parent_idx) & data->set_rate_parent)) { - *prate = clk_hw_get_rate(parent); -- *div = bcm2835_clock_choose_div(hw, rate, *prate, true); -+ *div = bcm2835_clock_choose_div(hw, rate, *prate); - - *avgrate = bcm2835_clock_rate_from_divisor(clock, *prate, *div); - -@@ -1216,7 +1241,7 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw, - rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate, - &div, &prate, - &avgrate); -- if (rate > best_rate && rate <= req->rate) { -+ if (abs(req->rate - rate) < abs(req->rate - best_rate)) { - best_parent = parent; - best_prate = prate; - best_rate = rate; -@@ -1790,7 +1815,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { - .load_mask = CM_PLLC_LOADPER, - .hold_mask = CM_PLLC_HOLDPER, - .fixed_divider = 1, -- .flags = CLK_SET_RATE_PARENT), -+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT), - - /* - * PLLD is the display PLL, used to drive DSI display panels. -@@ -2149,7 +2174,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { - .div_reg = CM_UARTDIV, - .int_bits = 10, - .frac_bits = 12, -- .tcnt_mux = 28), -+ .tcnt_mux = 28, -+ .round_up = true), - - /* TV encoder clock. Only operating frequency is 108Mhz. */ - [BCM2835_CLOCK_VEC] = REGISTER_PER_CLK( -diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c -index 33da30f99c79b..d39c44b61c523 100644 ---- a/drivers/clk/bcm/clk-iproc-pll.c -+++ b/drivers/clk/bcm/clk-iproc-pll.c -@@ -736,6 +736,7 @@ void iproc_pll_clk_setup(struct device_node *node, - const char *parent_name; - struct iproc_clk *iclk_array; - struct clk_hw_onecell_data *clk_data; -+ const char *clk_name; - - if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl)) - return; -@@ -783,7 +784,12 @@ void iproc_pll_clk_setup(struct device_node *node, - iclk = &iclk_array[0]; - iclk->pll = pll; - -- init.name = node->name; -+ ret = of_property_read_string_index(node, "clock-output-names", -+ 0, &clk_name); -+ if (WARN_ON(ret)) -+ goto err_pll_register; -+ -+ init.name = clk_name; - init.ops = &iproc_pll_ops; - init.flags = 0; - parent_name = of_clk_get_parent_name(node, 0); -@@ -803,13 +809,11 @@ void iproc_pll_clk_setup(struct device_node *node, - goto err_pll_register; - - clk_data->hws[0] = &iclk->hw; -+ parent_name = clk_name; - - /* now initialize and register all leaf clocks */ - for (i = 1; i < num_clks; i++) { -- const char *clk_name; -- - memset(&init, 0, sizeof(init)); -- parent_name = node->name; - - ret = of_property_read_string_index(node, "clock-output-names", - i, &clk_name); -diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c -index dd3b71eafabf3..56c5166f841ae 100644 ---- a/drivers/clk/bcm/clk-raspberrypi.c -+++ b/drivers/clk/bcm/clk-raspberrypi.c -@@ -139,7 +139,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw, - ret = raspberrypi_clock_property(rpi->firmware, data, - RPI_FIRMWARE_GET_CLOCK_RATE, &val); - if (ret) -- return ret; -+ return 0; - - return val; - } -@@ -156,7 +156,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate, - ret = raspberrypi_clock_property(rpi->firmware, data, - RPI_FIRMWARE_SET_CLOCK_RATE, &_rate); - if (ret) -- dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d", -+ dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n", - clk_hw_get_name(hw), ret); - - return ret; -@@ -208,7 +208,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi, - RPI_FIRMWARE_GET_MIN_CLOCK_RATE, - &min_rate); - if (ret) { -- dev_err(rpi->dev, "Failed to get clock %d min freq: %d", -+ dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n", - id, ret); - return ERR_PTR(ret); - } -@@ -251,8 +251,13 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi, - struct rpi_firmware_get_clocks_response *clks; - int ret; - -+ /* -+ * The firmware doesn't guarantee that the last element of -+ * RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional -+ * zero element as sentinel. -+ */ - clks = devm_kcalloc(rpi->dev, -- sizeof(*clks), RPI_FIRMWARE_NUM_CLK_ID, -+ RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks), - GFP_KERNEL); - if (!clks) - return -ENOMEM; -diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c -index bccdfa00fd373..67a9edbba29c4 100644 ---- a/drivers/clk/berlin/bg2.c -+++ b/drivers/clk/berlin/bg2.c -@@ -500,12 +500,15 @@ static void __init berlin2_clock_setup(struct device_node *np) - int n, ret; - - clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); -- if (!clk_data) -+ if (!clk_data) { -+ of_node_put(parent_np); - return; -+ } - clk_data->num = MAX_CLKS; - hws = clk_data->hws; - - gbase = of_iomap(parent_np, 0); -+ of_node_put(parent_np); - if (!gbase) - return; - -diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c -index e9518d35f262e..dd2784bb75b64 100644 ---- a/drivers/clk/berlin/bg2q.c -+++ b/drivers/clk/berlin/bg2q.c -@@ -286,19 +286,23 @@ static void __init berlin2q_clock_setup(struct device_node *np) - int n, ret; - - clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL); -- if (!clk_data) -+ if (!clk_data) { -+ of_node_put(parent_np); - return; -+ } - clk_data->num = MAX_CLKS; - hws = clk_data->hws; - - gbase = of_iomap(parent_np, 0); - if (!gbase) { -+ of_node_put(parent_np); - pr_err("%pOF: Unable to map global base\n", np); - return; - } - - /* BG2Q CPU PLL is not part of global registers */ - cpupll_base = of_iomap(parent_np, 1); -+ of_node_put(parent_np); - if (!cpupll_base) { - pr_err("%pOF: Unable to map cpupll base\n", np); - iounmap(gbase); -diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c -index bc3be5f3eae15..9c3305bcb27ae 100644 ---- a/drivers/clk/clk-ast2600.c -+++ b/drivers/clk/clk-ast2600.c -@@ -51,6 +51,8 @@ static DEFINE_SPINLOCK(aspeed_g6_clk_lock); - static struct clk_hw_onecell_data *aspeed_g6_clk_data; - - static void __iomem *scu_g6_base; -+/* AST2600 revision: A0, A1, A2, etc */ -+static u8 soc_rev; - - /* - * Clocks marked with CLK_IS_CRITICAL: -@@ -191,9 +193,8 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val) - static struct clk_hw *ast2600_calc_apll(const char *name, u32 val) - { - unsigned int mult, div; -- u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV); - -- if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) { -+ if (soc_rev >= 2) { - if (val & BIT(24)) { - /* Pass through mode */ - mult = div = 1; -@@ -621,7 +622,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev) - regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */ - - /* P-Bus (BCLK) clock divider */ -- hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0, -+ hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0, - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0, - ast2600_div_table, - &aspeed_g6_clk_lock); -@@ -707,7 +708,7 @@ static const u32 ast2600_a1_axi_ahb200_tbl[] = { - static void __init aspeed_g6_cc(struct regmap *map) - { - struct clk_hw *hw; -- u32 val, div, divbits, chip_id, axi_div, ahb_div; -+ u32 val, div, divbits, axi_div, ahb_div; - - clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000); - -@@ -738,8 +739,7 @@ static void __init aspeed_g6_cc(struct regmap *map) - axi_div = 2; - - divbits = (val >> 11) & 0x3; -- regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id); -- if (chip_id & BIT(16)) { -+ if (soc_rev >= 1) { - if (!divbits) { - ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3]; - if (val & BIT(16)) -@@ -784,6 +784,8 @@ static void __init aspeed_g6_cc_init(struct device_node *np) - if (!scu_g6_base) - return; - -+ soc_rev = (readl(scu_g6_base + ASPEED_G6_SILICON_REV) & CHIP_REVISION_ID) >> 16; -+ - aspeed_g6_clk_data = kzalloc(struct_size(aspeed_g6_clk_data, hws, - ASPEED_G6_NUM_CLKS), GFP_KERNEL); - if (!aspeed_g6_clk_data) -diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c -index e6d6599d310a1..fad78a22218e8 100644 ---- a/drivers/clk/clk-bm1880.c -+++ b/drivers/clk/clk-bm1880.c -@@ -522,14 +522,6 @@ static struct clk_hw *bm1880_clk_register_pll(struct bm1880_pll_hw_clock *pll_cl - return hw; - } - --static void bm1880_clk_unregister_pll(struct clk_hw *hw) --{ -- struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw); -- -- clk_hw_unregister(hw); -- kfree(pll_hw); --} -- - static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks, - int num_clks, - struct bm1880_clock_data *data) -@@ -555,7 +547,7 @@ static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks, - - err_clk: - while (i--) -- bm1880_clk_unregister_pll(data->hw_data.hws[clks[i].pll.id]); -+ clk_hw_unregister(data->hw_data.hws[clks[i].pll.id]); - - return PTR_ERR(hw); - } -@@ -695,14 +687,6 @@ static struct clk_hw *bm1880_clk_register_div(struct bm1880_div_hw_clock *div_cl - return hw; - } - --static void bm1880_clk_unregister_div(struct clk_hw *hw) --{ -- struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw); -- -- clk_hw_unregister(hw); -- kfree(div_hw); --} -- - static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks, - int num_clks, - struct bm1880_clock_data *data) -@@ -729,7 +713,7 @@ static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks, - - err_clk: - while (i--) -- bm1880_clk_unregister_div(data->hw_data.hws[clks[i].div.id]); -+ clk_hw_unregister(data->hw_data.hws[clks[i].div.id]); - - return PTR_ERR(hw); - } -diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c -index 308b353815e17..470d91d7314db 100644 ---- a/drivers/clk/clk-cdce925.c -+++ b/drivers/clk/clk-cdce925.c -@@ -705,6 +705,10 @@ static int cdce925_probe(struct i2c_client *client, - for (i = 0; i < data->chip_info->num_plls; ++i) { - pll_clk_name[i] = kasprintf(GFP_KERNEL, "%pOFn.pll%d", - client->dev.of_node, i); -+ if (!pll_clk_name[i]) { -+ err = -ENOMEM; -+ goto error; -+ } - init.name = pll_clk_name[i]; - data->pll[i].chip = data; - data->pll[i].hw.init = &init; -@@ -746,6 +750,10 @@ static int cdce925_probe(struct i2c_client *client, - init.num_parents = 1; - init.parent_names = &parent_name; /* Mux Y1 to input */ - init.name = kasprintf(GFP_KERNEL, "%pOFn.Y1", client->dev.of_node); -+ if (!init.name) { -+ err = -ENOMEM; -+ goto error; -+ } - data->clk[0].chip = data; - data->clk[0].hw.init = &init; - data->clk[0].index = 0; -@@ -764,6 +772,10 @@ static int cdce925_probe(struct i2c_client *client, - for (i = 1; i < data->chip_info->num_outputs; ++i) { - init.name = kasprintf(GFP_KERNEL, "%pOFn.Y%d", - client->dev.of_node, i+1); -+ if (!init.name) { -+ err = -ENOMEM; -+ goto error; -+ } - data->clk[i].chip = data; - data->clk[i].hw.init = &init; - data->clk[i].index = i; -diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c -index a2c6486ef1708..f8417ee2961aa 100644 ---- a/drivers/clk/clk-clps711x.c -+++ b/drivers/clk/clk-clps711x.c -@@ -28,11 +28,13 @@ static const struct clk_div_table spi_div_table[] = { - { .val = 1, .div = 8, }, - { .val = 2, .div = 2, }, - { .val = 3, .div = 1, }, -+ { /* sentinel */ } - }; - - static const struct clk_div_table timer_div_table[] = { - { .val = 0, .div = 256, }, - { .val = 1, .div = 1, }, -+ { /* sentinel */ } - }; - - struct clps711x_clk { -diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c -index 2ef819606c417..1a4e6340f95ce 100644 ---- a/drivers/clk/clk-conf.c -+++ b/drivers/clk/clk-conf.c -@@ -33,9 +33,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) - else - return rc; - } -- if (clkspec.np == node && !clk_supplier) -+ if (clkspec.np == node && !clk_supplier) { -+ of_node_put(clkspec.np); - return 0; -+ } - pclk = of_clk_get_from_provider(&clkspec); -+ of_node_put(clkspec.np); - if (IS_ERR(pclk)) { - if (PTR_ERR(pclk) != -EPROBE_DEFER) - pr_warn("clk: couldn't get parent clock %d for %pOF\n", -@@ -48,10 +51,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) - if (rc < 0) - goto err; - if (clkspec.np == node && !clk_supplier) { -+ of_node_put(clkspec.np); - rc = 0; - goto err; - } - clk = of_clk_get_from_provider(&clkspec); -+ of_node_put(clkspec.np); - if (IS_ERR(clk)) { - if (PTR_ERR(clk) != -EPROBE_DEFER) - pr_warn("clk: couldn't get assigned clock %d for %pOF\n", -@@ -93,10 +98,13 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) - else - return rc; - } -- if (clkspec.np == node && !clk_supplier) -+ if (clkspec.np == node && !clk_supplier) { -+ of_node_put(clkspec.np); - return 0; -+ } - - clk = of_clk_get_from_provider(&clkspec); -+ of_node_put(clkspec.np); - if (IS_ERR(clk)) { - if (PTR_ERR(clk) != -EPROBE_DEFER) - pr_warn("clk: couldn't get clock %d for %pOF\n", -diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c -index f9d5b73343417..737aa70e2cb3d 100644 ---- a/drivers/clk/clk-devres.c -+++ b/drivers/clk/clk-devres.c -@@ -4,42 +4,101 @@ - #include - #include - -+struct devm_clk_state { -+ struct clk *clk; -+ void (*exit)(struct clk *clk); -+}; -+ - static void devm_clk_release(struct device *dev, void *res) - { -- clk_put(*(struct clk **)res); -+ struct devm_clk_state *state = res; -+ -+ if (state->exit) -+ state->exit(state->clk); -+ -+ clk_put(state->clk); - } - --struct clk *devm_clk_get(struct device *dev, const char *id) -+static struct clk *__devm_clk_get(struct device *dev, const char *id, -+ struct clk *(*get)(struct device *dev, const char *id), -+ int (*init)(struct clk *clk), -+ void (*exit)(struct clk *clk)) - { -- struct clk **ptr, *clk; -+ struct devm_clk_state *state; -+ struct clk *clk; -+ int ret; - -- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); -- if (!ptr) -+ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL); -+ if (!state) - return ERR_PTR(-ENOMEM); - -- clk = clk_get(dev, id); -- if (!IS_ERR(clk)) { -- *ptr = clk; -- devres_add(dev, ptr); -- } else { -- devres_free(ptr); -+ clk = get(dev, id); -+ if (IS_ERR(clk)) { -+ ret = PTR_ERR(clk); -+ goto err_clk_get; - } - -+ if (init) { -+ ret = init(clk); -+ if (ret) -+ goto err_clk_init; -+ } -+ -+ state->clk = clk; -+ state->exit = exit; -+ -+ devres_add(dev, state); -+ - return clk; -+ -+err_clk_init: -+ -+ clk_put(clk); -+err_clk_get: -+ -+ devres_free(state); -+ return ERR_PTR(ret); -+} -+ -+struct clk *devm_clk_get(struct device *dev, const char *id) -+{ -+ return __devm_clk_get(dev, id, clk_get, NULL, NULL); - } - EXPORT_SYMBOL(devm_clk_get); - --struct clk *devm_clk_get_optional(struct device *dev, const char *id) -+struct clk *devm_clk_get_prepared(struct device *dev, const char *id) - { -- struct clk *clk = devm_clk_get(dev, id); -+ return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare); -+} -+EXPORT_SYMBOL_GPL(devm_clk_get_prepared); - -- if (clk == ERR_PTR(-ENOENT)) -- return NULL; -+struct clk *devm_clk_get_enabled(struct device *dev, const char *id) -+{ -+ return __devm_clk_get(dev, id, clk_get, -+ clk_prepare_enable, clk_disable_unprepare); -+} -+EXPORT_SYMBOL_GPL(devm_clk_get_enabled); - -- return clk; -+struct clk *devm_clk_get_optional(struct device *dev, const char *id) -+{ -+ return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL); - } - EXPORT_SYMBOL(devm_clk_get_optional); - -+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id) -+{ -+ return __devm_clk_get(dev, id, clk_get_optional, -+ clk_prepare, clk_unprepare); -+} -+EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared); -+ -+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id) -+{ -+ return __devm_clk_get(dev, id, clk_get_optional, -+ clk_prepare_enable, clk_disable_unprepare); -+} -+EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled); -+ - struct clk_bulk_devres { - struct clk_bulk_data *clks; - int num_clks; -@@ -146,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put); - struct clk *devm_get_clk_from_child(struct device *dev, - struct device_node *np, const char *con_id) - { -- struct clk **ptr, *clk; -+ struct devm_clk_state *state; -+ struct clk *clk; - -- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); -- if (!ptr) -+ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL); -+ if (!state) - return ERR_PTR(-ENOMEM); - - clk = of_clk_get_by_name(np, con_id); - if (!IS_ERR(clk)) { -- *ptr = clk; -- devres_add(dev, ptr); -+ state->clk = clk; -+ devres_add(dev, state); - } else { -- devres_free(ptr); -+ devres_free(state); - } - - return clk; -diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c -index 78d5ea669fea7..2fe36f579ac5e 100644 ---- a/drivers/clk/clk-oxnas.c -+++ b/drivers/clk/clk-oxnas.c -@@ -207,7 +207,7 @@ static const struct of_device_id oxnas_stdclk_dt_ids[] = { - - static int oxnas_stdclk_probe(struct platform_device *pdev) - { -- struct device_node *np = pdev->dev.of_node; -+ struct device_node *np = pdev->dev.of_node, *parent_np; - const struct oxnas_stdclk_data *data; - const struct of_device_id *id; - struct regmap *regmap; -@@ -219,7 +219,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev) - return -ENODEV; - data = id->data; - -- regmap = syscon_node_to_regmap(of_get_parent(np)); -+ parent_np = of_get_parent(np); -+ regmap = syscon_node_to_regmap(parent_np); -+ of_node_put(parent_np); - if (IS_ERR(regmap)) { - dev_err(&pdev->dev, "failed to have parent regmap\n"); - return PTR_ERR(regmap); -diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c -index 88898b97a4431..5eddb9f0d6bdb 100644 ---- a/drivers/clk/clk-qoriq.c -+++ b/drivers/clk/clk-qoriq.c -@@ -1063,8 +1063,13 @@ static void __init _clockgen_init(struct device_node *np, bool legacy); - */ - static void __init legacy_init_clockgen(struct device_node *np) - { -- if (!clockgen.node) -- _clockgen_init(of_get_parent(np), true); -+ if (!clockgen.node) { -+ struct device_node *parent_np; -+ -+ parent_np = of_get_parent(np); -+ _clockgen_init(parent_np, true); -+ of_node_put(parent_np); -+ } - } - - /* Legacy node */ -@@ -1159,6 +1164,7 @@ static struct clk * __init create_sysclk(const char *name) - sysclk = of_get_child_by_name(clockgen.node, "sysclk"); - if (sysclk) { - clk = sysclk_from_fixed(sysclk, name); -+ of_node_put(sysclk); - if (!IS_ERR(clk)) - return clk; - } -diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c -index 57ae183982d8c..91a6bc74ebd5a 100644 ---- a/drivers/clk/clk-si5341.c -+++ b/drivers/clk/clk-si5341.c -@@ -798,6 +798,15 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw, - u32 r_divider; - u8 r[3]; - -+ err = regmap_read(output->data->regmap, -+ SI5341_OUT_CONFIG(output), &val); -+ if (err < 0) -+ return err; -+ -+ /* If SI5341_OUT_CFG_RDIV_FORCE2 is set, r_divider is 2 */ -+ if (val & SI5341_OUT_CFG_RDIV_FORCE2) -+ return parent_rate / 2; -+ - err = regmap_bulk_read(output->data->regmap, - SI5341_OUT_R_REG(output), r, 3); - if (err < 0) -@@ -814,13 +823,6 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw, - r_divider += 1; - r_divider <<= 1; - -- err = regmap_read(output->data->regmap, -- SI5341_OUT_CONFIG(output), &val); -- if (err < 0) -- return err; -- -- if (val & SI5341_OUT_CFG_RDIV_FORCE2) -- r_divider = 2; - - return parent_rate / r_divider; - } -@@ -1552,7 +1554,7 @@ static int si5341_probe(struct i2c_client *client, - struct clk_init_data init; - struct clk *input; - const char *root_clock_name; -- const char *synth_clock_names[SI5341_NUM_SYNTH]; -+ const char *synth_clock_names[SI5341_NUM_SYNTH] = { NULL }; - int err; - unsigned int i; - struct clk_si5341_output_config config[SI5341_MAX_NUM_OUTPUTS]; -@@ -1696,6 +1698,10 @@ static int si5341_probe(struct i2c_client *client, - for (i = 0; i < data->num_synth; ++i) { - synth_clock_names[i] = devm_kasprintf(&client->dev, GFP_KERNEL, - "%s.N%u", client->dev.of_node->name, i); -+ if (!synth_clock_names[i]) { -+ err = -ENOMEM; -+ goto free_clk_names; -+ } - init.name = synth_clock_names[i]; - data->synth[i].index = i; - data->synth[i].data = data; -@@ -1704,6 +1710,7 @@ static int si5341_probe(struct i2c_client *client, - if (err) { - dev_err(&client->dev, - "synth N%u registration failed\n", i); -+ goto free_clk_names; - } - } - -@@ -1713,6 +1720,10 @@ static int si5341_probe(struct i2c_client *client, - for (i = 0; i < data->num_outputs; ++i) { - init.name = kasprintf(GFP_KERNEL, "%s.%d", - client->dev.of_node->name, i); -+ if (!init.name) { -+ err = -ENOMEM; -+ goto free_clk_names; -+ } - init.flags = config[i].synth_master ? CLK_SET_RATE_PARENT : 0; - data->clk[i].index = i; - data->clk[i].data = data; -@@ -1734,17 +1745,17 @@ static int si5341_probe(struct i2c_client *client, - if (err) { - dev_err(&client->dev, - "output %u registration failed\n", i); -- goto cleanup; -+ goto free_clk_names; - } - if (config[i].always_on) - clk_prepare(data->clk[i].hw.clk); - } - -- err = of_clk_add_hw_provider(client->dev.of_node, of_clk_si5341_get, -+ err = devm_of_clk_add_hw_provider(&client->dev, of_clk_si5341_get, - data); - if (err) { - dev_err(&client->dev, "unable to add clk provider\n"); -- goto cleanup; -+ goto free_clk_names; - } - - if (initialization_required) { -@@ -1752,11 +1763,11 @@ static int si5341_probe(struct i2c_client *client, - regcache_cache_only(data->regmap, false); - err = regcache_sync(data->regmap); - if (err < 0) -- goto cleanup; -+ goto free_clk_names; - - err = si5341_finalize_defaults(data); - if (err < 0) -- goto cleanup; -+ goto free_clk_names; - } - - /* wait for device to report input clock present and PLL lock */ -@@ -1765,32 +1776,31 @@ static int si5341_probe(struct i2c_client *client, - 10000, 250000); - if (err) { - dev_err(&client->dev, "Error waiting for input clock or PLL lock\n"); -- goto cleanup; -+ goto free_clk_names; - } - - /* clear sticky alarm bits from initialization */ - err = regmap_write(data->regmap, SI5341_STATUS_STICKY, 0); - if (err) { - dev_err(&client->dev, "unable to clear sticky status\n"); -- goto cleanup; -+ goto free_clk_names; - } - - err = sysfs_create_files(&client->dev.kobj, si5341_attributes); -- if (err) { -+ if (err) - dev_err(&client->dev, "unable to create sysfs files\n"); -- goto cleanup; -- } - -+free_clk_names: - /* Free the names, clk framework makes copies */ - for (i = 0; i < data->num_synth; ++i) - devm_kfree(&client->dev, (void *)synth_clock_names[i]); - -- return 0; -- - cleanup: -- for (i = 0; i < SI5341_MAX_NUM_OUTPUTS; ++i) { -- if (data->clk[i].vddo_reg) -- regulator_disable(data->clk[i].vddo_reg); -+ if (err) { -+ for (i = 0; i < SI5341_MAX_NUM_OUTPUTS; ++i) { -+ if (data->clk[i].vddo_reg) -+ regulator_disable(data->clk[i].vddo_reg); -+ } - } - return err; - } -diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c -index af46176ad0539..473dfe632cc57 100644 ---- a/drivers/clk/clk-stm32f4.c -+++ b/drivers/clk/clk-stm32f4.c -@@ -129,7 +129,6 @@ static const struct stm32f4_gate_data stm32f429_gates[] __initconst = { - { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, -- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, - }; - - static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { -@@ -211,7 +210,6 @@ static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { - { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, -- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, - }; - - static const struct stm32f4_gate_data stm32f746_gates[] __initconst = { -@@ -286,7 +284,6 @@ static const struct stm32f4_gate_data stm32f746_gates[] __initconst = { - { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" }, -- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, - }; - - static const struct stm32f4_gate_data stm32f769_gates[] __initconst = { -@@ -364,7 +361,6 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = { - { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" }, -- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" }, - }; - -diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c -index c6d3b1ab3d55c..c52f02471bb4f 100644 ---- a/drivers/clk/clk-versaclock5.c -+++ b/drivers/clk/clk-versaclock5.c -@@ -992,6 +992,11 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) - } - - init.name = kasprintf(GFP_KERNEL, "%pOFn.mux", client->dev.of_node); -+ if (!init.name) { -+ ret = -ENOMEM; -+ goto err_clk; -+ } -+ - init.ops = &vc5_mux_ops; - init.flags = 0; - init.parent_names = parent_names; -@@ -1006,6 +1011,10 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) - memset(&init, 0, sizeof(init)); - init.name = kasprintf(GFP_KERNEL, "%pOFn.dbl", - client->dev.of_node); -+ if (!init.name) { -+ ret = -ENOMEM; -+ goto err_clk; -+ } - init.ops = &vc5_dbl_ops; - init.flags = CLK_SET_RATE_PARENT; - init.parent_names = parent_names; -@@ -1021,6 +1030,10 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) - /* Register PFD */ - memset(&init, 0, sizeof(init)); - init.name = kasprintf(GFP_KERNEL, "%pOFn.pfd", client->dev.of_node); -+ if (!init.name) { -+ ret = -ENOMEM; -+ goto err_clk; -+ } - init.ops = &vc5_pfd_ops; - init.flags = CLK_SET_RATE_PARENT; - init.parent_names = parent_names; -@@ -1038,6 +1051,10 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) - /* Register PLL */ - memset(&init, 0, sizeof(init)); - init.name = kasprintf(GFP_KERNEL, "%pOFn.pll", client->dev.of_node); -+ if (!init.name) { -+ ret = -ENOMEM; -+ goto err_clk; -+ } - init.ops = &vc5_pll_ops; - init.flags = CLK_SET_RATE_PARENT; - init.parent_names = parent_names; -@@ -1057,6 +1074,10 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) - memset(&init, 0, sizeof(init)); - init.name = kasprintf(GFP_KERNEL, "%pOFn.fod%d", - client->dev.of_node, idx); -+ if (!init.name) { -+ ret = -ENOMEM; -+ goto err_clk; -+ } - init.ops = &vc5_fod_ops; - init.flags = CLK_SET_RATE_PARENT; - init.parent_names = parent_names; -@@ -1075,6 +1096,10 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) - memset(&init, 0, sizeof(init)); - init.name = kasprintf(GFP_KERNEL, "%pOFn.out0_sel_i2cb", - client->dev.of_node); -+ if (!init.name) { -+ ret = -ENOMEM; -+ goto err_clk; -+ } - init.ops = &vc5_clk_out_ops; - init.flags = CLK_SET_RATE_PARENT; - init.parent_names = parent_names; -@@ -1101,6 +1126,10 @@ static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) - memset(&init, 0, sizeof(init)); - init.name = kasprintf(GFP_KERNEL, "%pOFn.out%d", - client->dev.of_node, idx + 1); -+ if (!init.name) { -+ ret = -ENOMEM; -+ goto err_clk; -+ } - init.ops = &vc5_clk_out_ops; - init.flags = CLK_SET_RATE_PARENT; - init.parent_names = parent_names; -@@ -1204,7 +1233,7 @@ static const struct vc5_chip_info idt_5p49v6901_info = { - .model = IDT_VC6_5P49V6901, - .clk_fod_cnt = 4, - .clk_out_cnt = 5, -- .flags = VC5_HAS_PFD_FREQ_DBL, -+ .flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT, - }; - - static const struct vc5_chip_info idt_5p49v6965_info = { -diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c -index 65508eb89ec99..5eba83745d8de 100644 ---- a/drivers/clk/clk.c -+++ b/drivers/clk/clk.c -@@ -631,6 +631,24 @@ static void clk_core_get_boundaries(struct clk_core *core, - *max_rate = min(*max_rate, clk_user->max_rate); - } - -+static bool clk_core_check_boundaries(struct clk_core *core, -+ unsigned long min_rate, -+ unsigned long max_rate) -+{ -+ struct clk *user; -+ -+ lockdep_assert_held(&prepare_lock); -+ -+ if (min_rate > core->max_rate || max_rate < core->min_rate) -+ return false; -+ -+ hlist_for_each_entry(user, &core->clks, clks_node) -+ if (min_rate > user->max_rate || max_rate < user->min_rate) -+ return false; -+ -+ return true; -+} -+ - void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, - unsigned long max_rate) - { -@@ -828,10 +846,9 @@ static void clk_core_unprepare(struct clk_core *core) - if (core->ops->unprepare) - core->ops->unprepare(core->hw); - -- clk_pm_runtime_put(core); -- - trace_clk_unprepare_complete(core); - clk_core_unprepare(core->parent); -+ clk_pm_runtime_put(core); - } - - static void clk_core_unprepare_lock(struct clk_core *core) -@@ -2347,6 +2364,11 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) - clk->min_rate = min; - clk->max_rate = max; - -+ if (!clk_core_check_boundaries(clk->core, min, max)) { -+ ret = -EINVAL; -+ goto out; -+ } -+ - rate = clk_core_get_rate_nolock(clk->core); - if (rate < min || rate > max) { - /* -@@ -2375,6 +2397,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) - } - } - -+out: - if (clk->exclusive_count) - clk_core_rate_protect(clk->core); - -@@ -3340,6 +3363,24 @@ static int __init clk_debug_init(void) - { - struct clk_core *core; - -+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS -+ pr_warn("\n"); -+ pr_warn("********************************************************************\n"); -+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); -+ pr_warn("** **\n"); -+ pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n"); -+ pr_warn("** **\n"); -+ pr_warn("** This means that this kernel is built to expose clk operations **\n"); -+ pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n"); -+ pr_warn("** to userspace, which may compromise security on your system. **\n"); -+ pr_warn("** **\n"); -+ pr_warn("** If you see this message and you are not debugging the **\n"); -+ pr_warn("** kernel, report this immediately to your vendor! **\n"); -+ pr_warn("** **\n"); -+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); -+ pr_warn("********************************************************************\n"); -+#endif -+ - rootdir = debugfs_create_dir("clk", NULL); - - debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, -@@ -3392,6 +3433,19 @@ static void clk_core_reparent_orphans_nolock(void) - __clk_set_parent_after(orphan, parent, NULL); - __clk_recalc_accuracies(orphan); - __clk_recalc_rates(orphan, 0); -+ -+ /* -+ * __clk_init_parent() will set the initial req_rate to -+ * 0 if the clock doesn't have clk_ops::recalc_rate and -+ * is an orphan when it's registered. -+ * -+ * 'req_rate' is used by clk_set_rate_range() and -+ * clk_put() to trigger a clk_set_rate() call whenever -+ * the boundaries are modified. Let's make sure -+ * 'req_rate' is set to something non-zero so that -+ * clk_set_rate_range() doesn't drop the frequency. -+ */ -+ orphan->req_rate = orphan->rate; - } - } - } -@@ -3415,6 +3469,14 @@ static int __clk_core_init(struct clk_core *core) - - clk_prepare_lock(); - -+ /* -+ * Set hw->core after grabbing the prepare_lock to synchronize with -+ * callers of clk_core_fill_parent_index() where we treat hw->core -+ * being NULL as the clk not being registered yet. This is crucial so -+ * that clks aren't parented until their parent is fully registered. -+ */ -+ core->hw->core = core; -+ - ret = clk_pm_runtime_get(core); - if (ret) - goto unlock; -@@ -3579,8 +3641,10 @@ static int __clk_core_init(struct clk_core *core) - out: - clk_pm_runtime_put(core); - unlock: -- if (ret) -+ if (ret) { - hlist_del_init(&core->child_node); -+ core->hw->core = NULL; -+ } - - clk_prepare_unlock(); - -@@ -3702,8 +3766,9 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, - struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id) - { - struct device *dev = hw->core->dev; -+ const char *name = dev ? dev_name(dev) : NULL; - -- return clk_hw_create_clk(dev, hw, dev_name(dev), con_id); -+ return clk_hw_create_clk(dev, hw, name, con_id); - } - EXPORT_SYMBOL(clk_hw_get_clk); - -@@ -3844,7 +3909,6 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) - core->num_parents = init->num_parents; - core->min_rate = 0; - core->max_rate = ULONG_MAX; -- hw->core = core; - - ret = clk_core_populate_parent_map(core, init); - if (ret) -@@ -3862,7 +3926,7 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw) - goto fail_create_clk; - } - -- clk_core_link_consumer(hw->core, hw->clk); -+ clk_core_link_consumer(core, hw->clk); - - ret = __clk_core_init(core); - if (!ret) -@@ -4440,6 +4504,7 @@ int devm_clk_notifier_register(struct device *dev, struct clk *clk, - if (!ret) { - devres->clk = clk; - devres->nb = nb; -+ devres_add(dev, devres); - } else { - devres_free(devres); - } -diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c -index 56012a3d02192..9ea1a80acbe8b 100644 ---- a/drivers/clk/hisilicon/clk-hi3559a.c -+++ b/drivers/clk/hisilicon/clk-hi3559a.c -@@ -611,8 +611,8 @@ static struct hisi_mux_clock hi3559av100_shub_mux_clks[] = { - - - /* shub div clk */ --static struct clk_div_table shub_spi_clk_table[] = {{0, 8}, {1, 4}, {2, 2}}; --static struct clk_div_table shub_uart_div_clk_table[] = {{1, 8}, {2, 4}}; -+static struct clk_div_table shub_spi_clk_table[] = {{0, 8}, {1, 4}, {2, 2}, {/*sentinel*/}}; -+static struct clk_div_table shub_uart_div_clk_table[] = {{1, 8}, {2, 4}, {/*sentinel*/}}; - - static struct hisi_divider_clock hi3559av100_shub_div_clks[] = { - { HI3559AV100_SHUB_SPI_SOURCE_CLK, "clk_spi_clk", "shub_clk", 0, 0x20, 24, 2, -diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c -index 04e728538cefe..75e05582cb24f 100644 ---- a/drivers/clk/imx/clk-composite-8m.c -+++ b/drivers/clk/imx/clk-composite-8m.c -@@ -97,7 +97,7 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw, - int prediv_value; - int div_value; - int ret; -- u32 val; -+ u32 orig, val; - - ret = imx8m_clk_composite_compute_dividers(rate, parent_rate, - &prediv_value, &div_value); -@@ -106,13 +106,15 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw, - - spin_lock_irqsave(divider->lock, flags); - -- val = readl(divider->reg); -- val &= ~((clk_div_mask(divider->width) << divider->shift) | -- (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT)); -+ orig = readl(divider->reg); -+ val = orig & ~((clk_div_mask(divider->width) << divider->shift) | -+ (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT)); - - val |= (u32)(prediv_value - 1) << divider->shift; - val |= (u32)(div_value - 1) << PCG_DIV_SHIFT; -- writel(val, divider->reg); -+ -+ if (val != orig) -+ writel(val, divider->reg); - - spin_unlock_irqrestore(divider->lock, flags); - -diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c -index fc1bd23d45834..598f3cf4eba49 100644 ---- a/drivers/clk/imx/clk-imx6sx.c -+++ b/drivers/clk/imx/clk-imx6sx.c -@@ -280,13 +280,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node) - hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); -- hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT); -+ hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels)); - hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); - hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels)); - hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); - hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); - hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); -- hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT); -+ hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels)); - hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); - hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); - hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels)); -diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c -index 5dbb6a9377324..206e4c43f68f8 100644 ---- a/drivers/clk/imx/clk-imx6ul.c -+++ b/drivers/clk/imx/clk-imx6ul.c -@@ -161,7 +161,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) - hws[IMX6UL_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX6UL_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX6UL_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); -- hws[IMX6UL_CLK_CSI_SEL] = imx_clk_hw_mux_flags("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels), CLK_SET_RATE_PARENT); - - /* Do not bypass PLLs initially */ - clk_set_parent(hws[IMX6UL_PLL1_BYPASS]->clk, hws[IMX6UL_CLK_PLL1]->clk); -@@ -270,6 +269,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) - hws[IMX6UL_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); - hws[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_hw_mux_flags("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels), CLK_SET_RATE_PARENT); - hws[IMX6UL_CLK_LCDIF_SEL] = imx_clk_hw_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels)); -+ hws[IMX6UL_CLK_CSI_SEL] = imx_clk_hw_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels)); - - hws[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_hw_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels)); - hws[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_hw_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels)); -diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c -index c4e0f1c07192f..3f6fd7ef2a68f 100644 ---- a/drivers/clk/imx/clk-imx7d.c -+++ b/drivers/clk/imx/clk-imx7d.c -@@ -849,7 +849,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) - hws[IMX7D_WDOG4_ROOT_CLK] = imx_clk_hw_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0); - hws[IMX7D_KPP_ROOT_CLK] = imx_clk_hw_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0); - hws[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_hw_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0); -- hws[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_hw_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0); - hws[IMX7D_WRCLK_ROOT_CLK] = imx_clk_hw_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0); - hws[IMX7D_USB_CTRL_CLK] = imx_clk_hw_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0); - hws[IMX7D_USB_PHY1_CLK] = imx_clk_hw_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0); -diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c -index c55577604e16a..4499da4154f06 100644 ---- a/drivers/clk/imx/clk-imx8mn.c -+++ b/drivers/clk/imx/clk-imx8mn.c -@@ -30,7 +30,7 @@ static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ - static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", }; - static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", }; - static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", }; --static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", }; -+static const char * const m7_alt_pll_bypass_sels[] = {"m7_alt_pll", "m7_alt_pll_ref_sel", }; - static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", }; - static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", }; - -@@ -40,7 +40,7 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl - - static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", }; - --static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "vpu_pll_out", -+static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "m7_alt_pll_out", - "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", }; - - static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", -@@ -108,27 +108,27 @@ static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out - "sys_pll3_out", "clk_ext4", }; - - static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", -- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", -- "clk_ext3", "clk_ext4", }; -+ "video_pll1_out", "sys_pll1_133m", "dummy", -+ "clk_ext2", "clk_ext3", }; - - static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", -- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", -+ "video_pll1_out", "sys_pll1_133m", "dummy", - "clk_ext3", "clk_ext4", }; - - static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", -- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", -+ "video_pll1_out", "sys_pll1_133m", "dummy", - "clk_ext2", "clk_ext3", }; - - static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", -- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", -+ "video_pll1_out", "sys_pll1_133m", "dummy", - "clk_ext3", "clk_ext4", }; - - static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", -- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", -+ "video_pll1_out", "sys_pll1_133m", "dummy", - "clk_ext3", "clk_ext4", }; - - static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", -- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", -+ "video_pll1_out", "sys_pll1_133m", "dummy", - "clk_ext2", "clk_ext3", }; - - static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m", -@@ -140,8 +140,8 @@ static const char * const imx8mn_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m" - "clk_ext4", "video_pll1_out", }; - - static const char * const imx8mn_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m", -- "sys_pll2_200m", "sys_pll2_500m", "video_pll1_out", -- "audio_pll2_out", }; -+ "sys_pll2_200m", "sys_pll2_500m", "audio_pll1_out", -+ "video_pll_out", "audio_pll2_out", }; - - static const char * const imx8mn_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out", - "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out", -@@ -228,10 +228,10 @@ static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys - "sys_pll1_80m", "video_pll1_out", }; - - static const char * const imx8mn_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", -- "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out", -+ "m7_alt_pll_out", "sys_pll2_125m", "sys_pll3_out", - "sys_pll1_80m", "sys_pll2_166m", }; - --static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out", -+static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "m7_alt_pll_out", - "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m", - "sys_pll2_500m", "sys_pll1_100m", }; - -@@ -277,9 +277,9 @@ static const char * const imx8mn_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audi - - static const char * const imx8mn_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; - --static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", -- "sys_pll1_200m", "audio_pll2_out", "vpu_pll", -- "sys_pll1_80m", }; -+static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy", -+ "sys_pll1_200m", "audio_pll2_out", "sys_pll2_500m", -+ "dummy", "sys_pll1_80m", }; - static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", - "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", - "video_pll1_out", "osc_32k", }; -@@ -299,7 +299,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) - void __iomem *base; - int ret; - -- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, -+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, - IMX8MN_CLK_END), GFP_KERNEL); - if (WARN_ON(!clk_hw_data)) - return -ENOMEM; -@@ -316,10 +316,10 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) - hws[IMX8MN_CLK_EXT4] = imx_obtain_fixed_clk_hw(np, "clk_ext4"); - - np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop"); -- base = of_iomap(np, 0); -+ base = devm_of_iomap(dev, np, 0, NULL); - of_node_put(np); -- if (WARN_ON(!base)) { -- ret = -ENOMEM; -+ if (WARN_ON(IS_ERR(base))) { -+ ret = PTR_ERR(base); - goto unregister_hws; - } - -@@ -328,7 +328,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) - hws[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); - hws[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); - hws[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); -- hws[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); -+ hws[IMX8MN_M7_ALT_PLL_REF_SEL] = imx_clk_hw_mux("m7_alt_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); - hws[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); - hws[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); - -@@ -337,7 +337,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) - hws[IMX8MN_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll); - hws[IMX8MN_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll); - hws[IMX8MN_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll); -- hws[IMX8MN_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll); -+ hws[IMX8MN_M7_ALT_PLL] = imx_clk_hw_pll14xx("m7_alt_pll", "m7_alt_pll_ref_sel", base + 0x74, &imx_1416x_pll); - hws[IMX8MN_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll); - hws[IMX8MN_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000); - hws[IMX8MN_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000); -@@ -349,7 +349,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) - hws[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MN_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); -- hws[IMX8MN_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); -+ hws[IMX8MN_M7_ALT_PLL_BYPASS] = imx_clk_hw_mux_flags("m7_alt_pll_bypass", base + 0x74, 28, 1, m7_alt_pll_bypass_sels, ARRAY_SIZE(m7_alt_pll_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MN_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); - -@@ -359,7 +359,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) - hws[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13); - hws[IMX8MN_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13); - hws[IMX8MN_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11); -- hws[IMX8MN_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11); -+ hws[IMX8MN_M7_ALT_PLL_OUT] = imx_clk_hw_gate("m7_alt_pll_out", "m7_alt_pll_bypass", base + 0x74, 11); - hws[IMX8MN_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11); - hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11); - -diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c -index 12837304545d5..2f898c0bc867c 100644 ---- a/drivers/clk/imx/clk-imx8mp.c -+++ b/drivers/clk/imx/clk-imx8mp.c -@@ -176,10 +176,6 @@ static const char * const imx8mp_sai3_sels[] = {"osc_24m", "audio_pll1_out", "au - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", - "clk_ext3", "clk_ext4", }; - --static const char * const imx8mp_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", -- "video_pll1_out", "sys_pll1_133m", "osc_hdmi", -- "clk_ext1", "clk_ext2", }; -- - static const char * const imx8mp_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", - "clk_ext2", "clk_ext3", }; -@@ -407,25 +403,22 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) - struct device *dev = &pdev->dev; - struct device_node *np; - void __iomem *anatop_base, *ccm_base; -+ int err; - - np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop"); -- anatop_base = of_iomap(np, 0); -+ anatop_base = devm_of_iomap(dev, np, 0, NULL); - of_node_put(np); -- if (WARN_ON(!anatop_base)) -- return -ENOMEM; -+ if (WARN_ON(IS_ERR(anatop_base))) -+ return PTR_ERR(anatop_base); - - np = dev->of_node; - ccm_base = devm_platform_ioremap_resource(pdev, 0); -- if (WARN_ON(IS_ERR(ccm_base))) { -- iounmap(anatop_base); -+ if (WARN_ON(IS_ERR(ccm_base))) - return PTR_ERR(ccm_base); -- } - -- clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, IMX8MP_CLK_END), GFP_KERNEL); -- if (WARN_ON(!clk_hw_data)) { -- iounmap(anatop_base); -+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, IMX8MP_CLK_END), GFP_KERNEL); -+ if (WARN_ON(!clk_hw_data)) - return -ENOMEM; -- } - - clk_hw_data->num = IMX8MP_CLK_END; - hws = clk_hw_data->hws; -@@ -569,7 +562,6 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) - hws[IMX8MP_CLK_SAI1] = imx8m_clk_hw_composite("sai1", imx8mp_sai1_sels, ccm_base + 0xa580); - hws[IMX8MP_CLK_SAI2] = imx8m_clk_hw_composite("sai2", imx8mp_sai2_sels, ccm_base + 0xa600); - hws[IMX8MP_CLK_SAI3] = imx8m_clk_hw_composite("sai3", imx8mp_sai3_sels, ccm_base + 0xa680); -- hws[IMX8MP_CLK_SAI4] = imx8m_clk_hw_composite("sai4", imx8mp_sai4_sels, ccm_base + 0xa700); - hws[IMX8MP_CLK_SAI5] = imx8m_clk_hw_composite("sai5", imx8mp_sai5_sels, ccm_base + 0xa780); - hws[IMX8MP_CLK_SAI6] = imx8m_clk_hw_composite("sai6", imx8mp_sai6_sels, ccm_base + 0xa800); - hws[IMX8MP_CLK_ENET_QOS] = imx8m_clk_hw_composite("enet_qos", imx8mp_enet_qos_sels, ccm_base + 0xa880); -@@ -675,7 +667,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) - hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0); - hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0); - hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0); -- hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0); -+ hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0); - hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0); - hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0); - hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0); -@@ -710,7 +702,12 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) - - imx_check_clk_hws(hws, IMX8MP_CLK_END); - -- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); -+ err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); -+ if (err < 0) { -+ dev_err(dev, "failed to register hws for i.MX8MP\n"); -+ imx_unregister_hw_clocks(hws, IMX8MP_CLK_END); -+ return err; -+ } - - imx_register_uart_clocks(4); - -diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c -index d3e905cf867d7..5e31a6a24b3a3 100644 ---- a/drivers/clk/imx/clk-imx8qxp-lpcg.c -+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c -@@ -248,7 +248,7 @@ static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev, - - for (i = 0; i < count; i++) { - idx = bit_offset[i] / 4; -- if (idx > IMX_LPCG_MAX_CLKS) { -+ if (idx >= IMX_LPCG_MAX_CLKS) { - dev_warn(&pdev->dev, "invalid bit offset of clock %d\n", - i); - ret = -EINVAL; -@@ -370,7 +370,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = { - .probe = imx8qxp_lpcg_clk_probe, - }; - --builtin_platform_driver(imx8qxp_lpcg_clk_driver); -+module_platform_driver(imx8qxp_lpcg_clk_driver); - - MODULE_AUTHOR("Aisheng Dong "); - MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver"); -diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c -index c53a688d8ccca..40a2efb1329be 100644 ---- a/drivers/clk/imx/clk-imx8qxp.c -+++ b/drivers/clk/imx/clk-imx8qxp.c -@@ -308,7 +308,7 @@ static struct platform_driver imx8qxp_clk_driver = { - }, - .probe = imx8qxp_clk_probe, - }; --builtin_platform_driver(imx8qxp_clk_driver); -+module_platform_driver(imx8qxp_clk_driver); - - MODULE_AUTHOR("Aisheng Dong "); - MODULE_DESCRIPTION("NXP i.MX8QXP clock driver"); -diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c -index 2b5ed86b9dbbb..483f496f437a8 100644 ---- a/drivers/clk/imx/clk-pll14xx.c -+++ b/drivers/clk/imx/clk-pll14xx.c -@@ -60,8 +60,6 @@ static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = { - PLL_1443X_RATE(650000000U, 325, 3, 2, 0), - PLL_1443X_RATE(594000000U, 198, 2, 2, 0), - PLL_1443X_RATE(519750000U, 173, 2, 2, 16384), -- PLL_1443X_RATE(393216000U, 262, 2, 3, 9437), -- PLL_1443X_RATE(361267200U, 361, 3, 3, 17511), - }; - - struct imx_pll14xx_clk imx_1443x_pll = { -diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c -index 083da31dc3ead..1cee88b073fa2 100644 ---- a/drivers/clk/imx/clk-scu.c -+++ b/drivers/clk/imx/clk-scu.c -@@ -690,7 +690,11 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name, - pr_warn("%s: failed to attached the power domain %d\n", - name, ret); - -- platform_device_add(pdev); -+ ret = platform_device_add(pdev); -+ if (ret) { -+ platform_device_put(pdev); -+ return ERR_PTR(ret); -+ } - - /* For API backwards compatiblilty, simply return NULL for success */ - return NULL; -@@ -698,11 +702,11 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name, - - void imx_clk_scu_unregister(void) - { -- struct imx_scu_clk_node *clk; -+ struct imx_scu_clk_node *clk, *n; - int i; - - for (i = 0; i < IMX_SC_R_LAST; i++) { -- list_for_each_entry(clk, &imx_scu_clks[i], node) { -+ list_for_each_entry_safe(clk, n, &imx_scu_clks[i], node) { - clk_hw_unregister(clk->hw); - kfree(clk); - } -diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c -index 266c7595d3302..af31633a8862e 100644 ---- a/drivers/clk/ingenic/cgu.c -+++ b/drivers/clk/ingenic/cgu.c -@@ -453,15 +453,15 @@ ingenic_clk_calc_div(struct clk_hw *hw, - } - - /* Impose hardware constraints */ -- div = min_t(unsigned, div, 1 << clk_info->div.bits); -- div = max_t(unsigned, div, 1); -+ div = clamp_t(unsigned int, div, clk_info->div.div, -+ clk_info->div.div << clk_info->div.bits); - - /* - * If the divider value itself must be divided before being written to - * the divider register, we must ensure we don't have any bits set that - * would be lost as a result of doing so. - */ -- div /= clk_info->div.div; -+ div = DIV_ROUND_UP(div, clk_info->div.div); - div *= clk_info->div.div; - - return div; -diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c -index 5154b0cf8ad6c..66ff141da0a42 100644 ---- a/drivers/clk/ingenic/jz4725b-cgu.c -+++ b/drivers/clk/ingenic/jz4725b-cgu.c -@@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { - }, - - [JZ4725B_CLK_I2S] = { -- "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, -+ "i2s", CGU_CLK_MUX | CGU_CLK_DIV, - .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, - .mux = { CGU_REG_CPCCR, 31, 1 }, - .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 }, -- .gate = { CGU_REG_CLKGR, 6 }, - }, - - [JZ4725B_CLK_SPI] = { -diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c -index 14483797a4dbf..11906242e1d3d 100644 ---- a/drivers/clk/ingenic/jz4760-cgu.c -+++ b/drivers/clk/ingenic/jz4760-cgu.c -@@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info, - unsigned long rate, unsigned long parent_rate, - unsigned int *pm, unsigned int *pn, unsigned int *pod) - { -- unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2; -+ unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1; - - /* The frequency after the N divider must be between 1 and 50 MHz. */ - n = parent_rate / (1 * MHZ); -@@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info, - /* The N divider must be >= 2. */ - n = clamp_val(n, 2, 1 << pll_info->n_bits); - -- for (;; n >>= 1) { -- od = (unsigned int)-1; -+ rate /= MHZ; -+ parent_rate /= MHZ; - -- do { -- m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ); -- } while ((m > m_max || m & 1) && (od < 4)); -- -- if (od < 4 && m >= 4 && m <= m_max) -- break; -+ for (m = m_max; m >= m_max && n >= 2; n--) { -+ m = rate * n / parent_rate; -+ od = m & 1; -+ m <<= od; - } - - *pm = m; -- *pn = n; -+ *pn = n + 1; - *pod = 1 << od; - } - -diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c -index 77acfbeb48300..11fc395618365 100644 ---- a/drivers/clk/ingenic/tcu.c -+++ b/drivers/clk/ingenic/tcu.c -@@ -100,15 +100,11 @@ static bool ingenic_tcu_enable_regs(struct clk_hw *hw) - bool enabled = false; - - /* -- * If the SoC has no global TCU clock, we must ungate the channel's -- * clock to be able to access its registers. -- * If we have a TCU clock, it will be enabled automatically as it has -- * been attached to the regmap. -+ * According to the programming manual, a timer channel's registers can -+ * only be accessed when the channel's stop bit is clear. - */ -- if (!tcu->clk) { -- enabled = !!ingenic_tcu_is_enabled(hw); -- regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); -- } -+ enabled = !!ingenic_tcu_is_enabled(hw); -+ regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); - - return enabled; - } -@@ -119,8 +115,7 @@ static void ingenic_tcu_disable_regs(struct clk_hw *hw) - const struct ingenic_tcu_clk_info *info = tcu_clk->info; - struct ingenic_tcu *tcu = tcu_clk->tcu; - -- if (!tcu->clk) -- regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); -+ regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); - } - - static u8 ingenic_tcu_get_parent(struct clk_hw *hw) -diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c -index d59a7621bb204..ee5c72369334f 100644 ---- a/drivers/clk/keystone/pll.c -+++ b/drivers/clk/keystone/pll.c -@@ -209,7 +209,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl) - } - - clk = clk_register_pll(NULL, node->name, parent_name, pll_data); -- if (clk) { -+ if (!IS_ERR_OR_NULL(clk)) { - of_clk_add_provider(node, of_clk_src_simple_get, clk); - return; - } -diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c -index 7e1b136e71ae0..8af2a9faa805a 100644 ---- a/drivers/clk/keystone/sci-clk.c -+++ b/drivers/clk/keystone/sci-clk.c -@@ -302,6 +302,8 @@ static int _sci_clk_build(struct sci_clk_provider *provider, - - name = kasprintf(GFP_KERNEL, "clk:%d:%d", sci_clk->dev_id, - sci_clk->clk_id); -+ if (!name) -+ return -ENOMEM; - - init.name = name; - -diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c -index 703f87622cf5f..1ebf740380efb 100644 ---- a/drivers/clk/loongson1/clk-loongson1c.c -+++ b/drivers/clk/loongson1/clk-loongson1c.c -@@ -37,6 +37,7 @@ static const struct clk_div_table ahb_div_table[] = { - [1] = { .val = 1, .div = 4 }, - [2] = { .val = 2, .div = 3 }, - [3] = { .val = 3, .div = 3 }, -+ [4] = { /* sentinel */ } - }; - - void __init ls1x_clk_init(void) -diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c -index 37b4162c58820..3a33014eee7f7 100644 ---- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c -+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c -@@ -18,9 +18,9 @@ static const struct mtk_gate_regs mfg_cg_regs = { - .sta_ofs = 0x0, - }; - --#define GATE_MFG(_id, _name, _parent, _shift) \ -- GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \ -- &mtk_clk_gate_ops_setclr) -+#define GATE_MFG(_id, _name, _parent, _shift) \ -+ GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, _shift, \ -+ &mtk_clk_gate_ops_setclr, CLK_SET_RATE_PARENT) - - static const struct mtk_gate mfg_clks[] = { - GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0) -diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c -index e562dc3c10a4b..d311da574499f 100644 ---- a/drivers/clk/mediatek/reset.c -+++ b/drivers/clk/mediatek/reset.c -@@ -25,7 +25,7 @@ static int mtk_reset_assert_set_clr(struct reset_controller_dev *rcdev, - struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); - unsigned int reg = data->regofs + ((id / 32) << 4); - -- return regmap_write(data->regmap, reg, 1); -+ return regmap_write(data->regmap, reg, BIT(id % 32)); - } - - static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev, -@@ -34,7 +34,7 @@ static int mtk_reset_deassert_set_clr(struct reset_controller_dev *rcdev, - struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); - unsigned int reg = data->regofs + ((id / 32) << 4) + 0x4; - -- return regmap_write(data->regmap, reg, 1); -+ return regmap_write(data->regmap, reg, BIT(id % 32)); - } - - static int mtk_reset_assert(struct reset_controller_dev *rcdev, -diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c -index d6eed760327d0..608e0e8ca49a8 100644 ---- a/drivers/clk/meson/gxbb.c -+++ b/drivers/clk/meson/gxbb.c -@@ -713,6 +713,35 @@ static struct clk_regmap gxbb_mpll_prediv = { - }; - - static struct clk_regmap gxbb_mpll0_div = { -+ .data = &(struct meson_clk_mpll_data){ -+ .sdm = { -+ .reg_off = HHI_MPLL_CNTL7, -+ .shift = 0, -+ .width = 14, -+ }, -+ .sdm_en = { -+ .reg_off = HHI_MPLL_CNTL, -+ .shift = 25, -+ .width = 1, -+ }, -+ .n2 = { -+ .reg_off = HHI_MPLL_CNTL7, -+ .shift = 16, -+ .width = 9, -+ }, -+ .lock = &meson_clk_lock, -+ }, -+ .hw.init = &(struct clk_init_data){ -+ .name = "mpll0_div", -+ .ops = &meson_clk_mpll_ops, -+ .parent_hws = (const struct clk_hw *[]) { -+ &gxbb_mpll_prediv.hw -+ }, -+ .num_parents = 1, -+ }, -+}; -+ -+static struct clk_regmap gxl_mpll0_div = { - .data = &(struct meson_clk_mpll_data){ - .sdm = { - .reg_off = HHI_MPLL_CNTL7, -@@ -749,7 +778,16 @@ static struct clk_regmap gxbb_mpll0 = { - .hw.init = &(struct clk_init_data){ - .name = "mpll0", - .ops = &clk_regmap_gate_ops, -- .parent_hws = (const struct clk_hw *[]) { &gxbb_mpll0_div.hw }, -+ .parent_data = &(const struct clk_parent_data) { -+ /* -+ * Note: -+ * GXL and GXBB have different SDM_EN registers. We -+ * fallback to the global naming string mechanism so -+ * mpll0_div picks up the appropriate one. -+ */ -+ .name = "mpll0_div", -+ .index = -1, -+ }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - }, -@@ -3044,7 +3082,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = { - [CLKID_VAPB_1] = &gxbb_vapb_1.hw, - [CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw, - [CLKID_VAPB] = &gxbb_vapb.hw, -- [CLKID_MPLL0_DIV] = &gxbb_mpll0_div.hw, -+ [CLKID_MPLL0_DIV] = &gxl_mpll0_div.hw, - [CLKID_MPLL1_DIV] = &gxbb_mpll1_div.hw, - [CLKID_MPLL2_DIV] = &gxbb_mpll2_div.hw, - [CLKID_MPLL_PREDIV] = &gxbb_mpll_prediv.hw, -@@ -3439,7 +3477,7 @@ static struct clk_regmap *const gxl_clk_regmaps[] = { - &gxbb_mpll0, - &gxbb_mpll1, - &gxbb_mpll2, -- &gxbb_mpll0_div, -+ &gxl_mpll0_div, - &gxbb_mpll1_div, - &gxbb_mpll2_div, - &gxbb_cts_amclk_div, -diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c -index 27cd2c1f3f612..434cd8f9de826 100644 ---- a/drivers/clk/meson/meson-aoclk.c -+++ b/drivers/clk/meson/meson-aoclk.c -@@ -38,6 +38,7 @@ int meson_aoclkc_probe(struct platform_device *pdev) - struct meson_aoclk_reset_controller *rstc; - struct meson_aoclk_data *data; - struct device *dev = &pdev->dev; -+ struct device_node *np; - struct regmap *regmap; - int ret, clkid; - -@@ -49,7 +50,9 @@ int meson_aoclkc_probe(struct platform_device *pdev) - if (!rstc) - return -ENOMEM; - -- regmap = syscon_node_to_regmap(of_get_parent(dev->of_node)); -+ np = of_get_parent(dev->of_node); -+ regmap = syscon_node_to_regmap(np); -+ of_node_put(np); - if (IS_ERR(regmap)) { - dev_err(dev, "failed to get regmap\n"); - return PTR_ERR(regmap); -diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c -index 8d5a5dab955a8..0e5e6b57eb20e 100644 ---- a/drivers/clk/meson/meson-eeclk.c -+++ b/drivers/clk/meson/meson-eeclk.c -@@ -18,6 +18,7 @@ int meson_eeclkc_probe(struct platform_device *pdev) - { - const struct meson_eeclkc_data *data; - struct device *dev = &pdev->dev; -+ struct device_node *np; - struct regmap *map; - int ret, i; - -@@ -26,7 +27,9 @@ int meson_eeclkc_probe(struct platform_device *pdev) - return -EINVAL; - - /* Get the hhi system controller node */ -- map = syscon_node_to_regmap(of_get_parent(dev->of_node)); -+ np = of_get_parent(dev->of_node); -+ map = syscon_node_to_regmap(np); -+ of_node_put(np); - if (IS_ERR(map)) { - dev_err(dev, - "failed to get HHI regmap\n"); -diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c -index a844d35b553a2..809a0bfb670db 100644 ---- a/drivers/clk/meson/meson8b.c -+++ b/drivers/clk/meson/meson8b.c -@@ -3717,12 +3717,15 @@ static void __init meson8b_clkc_init_common(struct device_node *np, - struct clk_hw_onecell_data *clk_hw_onecell_data) - { - struct meson8b_clk_reset *rstc; -+ struct device_node *parent_np; - const char *notifier_clk_name; - struct clk *notifier_clk; - struct regmap *map; - int i, ret; - -- map = syscon_node_to_regmap(of_get_parent(np)); -+ parent_np = of_get_parent(np); -+ map = syscon_node_to_regmap(parent_np); -+ of_node_put(parent_np); - if (IS_ERR(map)) { - pr_err("failed to get HHI regmap - Trying obsolete regs\n"); - return; -diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c -index 08ba59ec3fb17..71bdd7c3ff034 100644 ---- a/drivers/clk/mvebu/ap-cpu-clk.c -+++ b/drivers/clk/mvebu/ap-cpu-clk.c -@@ -256,12 +256,15 @@ static int ap_cpu_clock_probe(struct platform_device *pdev) - int cpu, err; - - err = of_property_read_u32(dn, "reg", &cpu); -- if (WARN_ON(err)) -+ if (WARN_ON(err)) { -+ of_node_put(dn); - return err; -+ } - - /* If cpu2 or cpu3 is enabled */ - if (cpu & APN806_CLUSTER_NUM_MASK) { - nclusters = 2; -+ of_node_put(dn); - break; - } - } -@@ -288,8 +291,10 @@ static int ap_cpu_clock_probe(struct platform_device *pdev) - int cpu, err; - - err = of_property_read_u32(dn, "reg", &cpu); -- if (WARN_ON(err)) -+ if (WARN_ON(err)) { -+ of_node_put(dn); - return err; -+ } - - cluster_index = cpu & APN806_CLUSTER_NUM_MASK; - cluster_index >>= APN806_CLUSTER_NUM_OFFSET; -@@ -301,6 +306,7 @@ static int ap_cpu_clock_probe(struct platform_device *pdev) - parent = of_clk_get(np, cluster_index); - if (IS_ERR(parent)) { - dev_err(dev, "Could not get the clock parent\n"); -+ of_node_put(dn); - return -EINVAL; - } - parent_name = __clk_get_name(parent); -@@ -319,8 +325,10 @@ static int ap_cpu_clock_probe(struct platform_device *pdev) - init.parent_names = &parent_name; - - ret = devm_clk_hw_register(dev, &ap_cpu_clk[cluster_index].hw); -- if (ret) -+ if (ret) { -+ of_node_put(dn); - return ret; -+ } - ap_cpu_data->hws[cluster_index] = &ap_cpu_clk[cluster_index].hw; - } - -diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig -index 9ef007b3cf9b4..6ba86cffc4135 100644 ---- a/drivers/clk/qcom/Kconfig -+++ b/drivers/clk/qcom/Kconfig -@@ -550,6 +550,7 @@ config SM_DISPCC_8250 - - config SM_GCC_6115 - tristate "SM6115 and SM4250 Global Clock Controller" -+ select QCOM_GDSC - help - Support for the global clock controller on SM6115 and SM4250 devices. - Say Y if you want to use peripheral devices such as UART, SPI, -diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile -index 9825ef843f4a0..63c356ae32f23 100644 ---- a/drivers/clk/qcom/Makefile -+++ b/drivers/clk/qcom/Makefile -@@ -11,6 +11,7 @@ clk-qcom-y += clk-branch.o - clk-qcom-y += clk-regmap-divider.o - clk-qcom-y += clk-regmap-mux.o - clk-qcom-y += clk-regmap-mux-div.o -+clk-qcom-y += clk-regmap-phy-mux.o - clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o - clk-qcom-y += clk-hfpll.o - clk-qcom-y += reset.o -diff --git a/drivers/clk/qcom/apss-ipq6018.c b/drivers/clk/qcom/apss-ipq6018.c -index d78ff2f310bfa..b5d93657e1ee3 100644 ---- a/drivers/clk/qcom/apss-ipq6018.c -+++ b/drivers/clk/qcom/apss-ipq6018.c -@@ -57,7 +57,7 @@ static struct clk_branch apcs_alias0_core_clk = { - .parent_hws = (const struct clk_hw *[]){ - &apcs_alias0_clk_src.clkr.hw }, - .num_parents = 1, -- .flags = CLK_SET_RATE_PARENT, -+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, - .ops = &clk_branch2_ops, - }, - }, -diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c -index ce73ee9037cb0..ee99e3a853125 100644 ---- a/drivers/clk/qcom/camcc-sc7180.c -+++ b/drivers/clk/qcom/camcc-sc7180.c -@@ -1493,12 +1493,21 @@ static struct clk_branch cam_cc_sys_tmr_clk = { - }, - }; - -+static struct gdsc titan_top_gdsc = { -+ .gdscr = 0xb134, -+ .pd = { -+ .name = "titan_top_gdsc", -+ }, -+ .pwrsts = PWRSTS_OFF_ON, -+}; -+ - static struct gdsc bps_gdsc = { - .gdscr = 0x6004, - .pd = { - .name = "bps_gdsc", - }, - .pwrsts = PWRSTS_OFF_ON, -+ .parent = &titan_top_gdsc.pd, - .flags = HW_CTRL, - }; - -@@ -1508,6 +1517,7 @@ static struct gdsc ife_0_gdsc = { - .name = "ife_0_gdsc", - }, - .pwrsts = PWRSTS_OFF_ON, -+ .parent = &titan_top_gdsc.pd, - }; - - static struct gdsc ife_1_gdsc = { -@@ -1516,6 +1526,7 @@ static struct gdsc ife_1_gdsc = { - .name = "ife_1_gdsc", - }, - .pwrsts = PWRSTS_OFF_ON, -+ .parent = &titan_top_gdsc.pd, - }; - - static struct gdsc ipe_0_gdsc = { -@@ -1525,15 +1536,9 @@ static struct gdsc ipe_0_gdsc = { - }, - .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL, -+ .parent = &titan_top_gdsc.pd, - }; - --static struct gdsc titan_top_gdsc = { -- .gdscr = 0xb134, -- .pd = { -- .name = "titan_top_gdsc", -- }, -- .pwrsts = PWRSTS_OFF_ON, --}; - - static struct clk_hw *cam_cc_sc7180_hws[] = { - [CAM_CC_PLL2_OUT_EARLY] = &cam_cc_pll2_out_early.hw, -@@ -1672,7 +1677,7 @@ static int cam_cc_sc7180_probe(struct platform_device *pdev) - return ret; - } - -- ret = pm_runtime_get(&pdev->dev); -+ ret = pm_runtime_resume_and_get(&pdev->dev); - if (ret) - return ret; - -diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c -index 1b2cefef7431d..a8a2cfa83290a 100644 ---- a/drivers/clk/qcom/camcc-sdm845.c -+++ b/drivers/clk/qcom/camcc-sdm845.c -@@ -1521,6 +1521,8 @@ static struct clk_branch cam_cc_sys_tmr_clk = { - }, - }; - -+static struct gdsc titan_top_gdsc; -+ - static struct gdsc bps_gdsc = { - .gdscr = 0x6004, - .pd = { -@@ -1554,6 +1556,7 @@ static struct gdsc ife_0_gdsc = { - .name = "ife_0_gdsc", - }, - .flags = POLL_CFG_GDSCR, -+ .parent = &titan_top_gdsc.pd, - .pwrsts = PWRSTS_OFF_ON, - }; - -@@ -1563,6 +1566,7 @@ static struct gdsc ife_1_gdsc = { - .name = "ife_1_gdsc", - }, - .flags = POLL_CFG_GDSCR, -+ .parent = &titan_top_gdsc.pd, - .pwrsts = PWRSTS_OFF_ON, - }; - -diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c -index 439eaafdcc862..9b32c56a5bc5a 100644 ---- a/drivers/clk/qcom/camcc-sm8250.c -+++ b/drivers/clk/qcom/camcc-sm8250.c -@@ -2205,6 +2205,8 @@ static struct clk_branch cam_cc_sleep_clk = { - }, - }; - -+static struct gdsc titan_top_gdsc; -+ - static struct gdsc bps_gdsc = { - .gdscr = 0x7004, - .pd = { -@@ -2238,6 +2240,7 @@ static struct gdsc ife_0_gdsc = { - .name = "ife_0_gdsc", - }, - .flags = POLL_CFG_GDSCR, -+ .parent = &titan_top_gdsc.pd, - .pwrsts = PWRSTS_OFF_ON, - }; - -@@ -2247,6 +2250,7 @@ static struct gdsc ife_1_gdsc = { - .name = "ife_1_gdsc", - }, - .flags = POLL_CFG_GDSCR, -+ .parent = &titan_top_gdsc.pd, - .pwrsts = PWRSTS_OFF_ON, - }; - -@@ -2440,17 +2444,7 @@ static struct platform_driver cam_cc_sm8250_driver = { - }, - }; - --static int __init cam_cc_sm8250_init(void) --{ -- return platform_driver_register(&cam_cc_sm8250_driver); --} --subsys_initcall(cam_cc_sm8250_init); -- --static void __exit cam_cc_sm8250_exit(void) --{ -- platform_driver_unregister(&cam_cc_sm8250_driver); --} --module_exit(cam_cc_sm8250_exit); -+module_platform_driver(cam_cc_sm8250_driver); - - MODULE_DESCRIPTION("QTI CAMCC SM8250 Driver"); - MODULE_LICENSE("GPL v2"); -diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c -index eaedcceb766f9..5e44ceb730ad1 100644 ---- a/drivers/clk/qcom/clk-alpha-pll.c -+++ b/drivers/clk/qcom/clk-alpha-pll.c -@@ -1420,7 +1420,7 @@ const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = { - EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops); - - /** -- * clk_lucid_pll_configure - configure the lucid pll -+ * clk_trion_pll_configure - configure the trion pll - * - * @pll: clk alpha pll - * @regmap: register map -@@ -1429,6 +1429,15 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops); - void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, - const struct alpha_pll_config *config) - { -+ /* -+ * If the bootloader left the PLL enabled it's likely that there are -+ * RCGs that will lock up if we disable the PLL below. -+ */ -+ if (trion_pll_is_enabled(pll, regmap)) { -+ pr_debug("Trion PLL is already enabled, skipping configuration\n"); -+ return; -+ } -+ - clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l); - regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL); - clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha); -diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c -index 59f1af415b580..e74fc81a14d00 100644 ---- a/drivers/clk/qcom/clk-krait.c -+++ b/drivers/clk/qcom/clk-krait.c -@@ -32,11 +32,16 @@ static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel) - regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT); - } - krait_set_l2_indirect_reg(mux->offset, regval); -- spin_unlock_irqrestore(&krait_clock_reg_lock, flags); - - /* Wait for switch to complete. */ - mb(); - udelay(1); -+ -+ /* -+ * Unlock now to make sure the mux register is not -+ * modified while switching to the new parent. -+ */ -+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags); - } - - static int krait_mux_set_parent(struct clk_hw *hw, u8 index) -@@ -93,6 +98,8 @@ static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate, - - if (d->lpl) - mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift; -+ else -+ mask <<= d->shift; - - spin_lock_irqsave(&krait_clock_reg_lock, flags); - val = krait_get_l2_indirect_reg(d->offset); -diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c -index e1b1b426fae4b..c3823cc32edc6 100644 ---- a/drivers/clk/qcom/clk-rcg2.c -+++ b/drivers/clk/qcom/clk-rcg2.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -264,7 +265,7 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, - - static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) - { -- u32 cfg, mask; -+ u32 cfg, mask, d_val, not2d_val, n_minus_m; - struct clk_hw *hw = &rcg->clkr.hw; - int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src); - -@@ -283,8 +284,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) - if (ret) - return ret; - -+ /* Calculate 2d value */ -+ d_val = f->n; -+ -+ n_minus_m = f->n - f->m; -+ n_minus_m *= 2; -+ -+ d_val = clamp_t(u32, d_val, f->m, n_minus_m); -+ not2d_val = ~d_val & mask; -+ - ret = regmap_update_bits(rcg->clkr.regmap, -- RCG_D_OFFSET(rcg), mask, ~f->n); -+ RCG_D_OFFSET(rcg), mask, not2d_val); - if (ret) - return ret; - } -@@ -396,7 +406,7 @@ static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) - static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) - { - struct clk_rcg2 *rcg = to_clk_rcg2(hw); -- u32 notn_m, n, m, d, not2d, mask, duty_per; -+ u32 notn_m, n, m, d, not2d, mask, duty_per, cfg; - int ret; - - /* Duty-cycle cannot be modified for non-MND RCGs */ -@@ -407,6 +417,11 @@ static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) - - regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); - regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); -+ regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); -+ -+ /* Duty-cycle cannot be modified if MND divider is in bypass mode. */ -+ if (!(cfg & CFG_MODE_MASK)) -+ return -EINVAL; - - n = (~(notn_m) + m) & mask; - -@@ -415,9 +430,11 @@ static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) - /* Calculate 2d value */ - d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100); - -- /* Check bit widths of 2d. If D is too big reduce duty cycle. */ -- if (d > mask) -- d = mask; -+ /* -+ * Check bit widths of 2d. If D is too big reduce duty cycle. -+ * Also make sure it is never zero. -+ */ -+ d = clamp_val(d, 1, mask); - - if ((d / 2) > (n - m)) - d = (n - m) * 2; -@@ -720,6 +737,7 @@ static const struct frac_entry frac_table_pixel[] = { - { 2, 9 }, - { 4, 9 }, - { 1, 1 }, -+ { 2, 3 }, - { } - }; - -diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c -index b2d00b4519634..45d9cca28064f 100644 ---- a/drivers/clk/qcom/clk-regmap-mux.c -+++ b/drivers/clk/qcom/clk-regmap-mux.c -@@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw) - val &= mask; - - if (mux->parent_map) -- return qcom_find_src_index(hw, mux->parent_map, val); -+ return qcom_find_cfg_index(hw, mux->parent_map, val); - - return val; - } -diff --git a/drivers/clk/qcom/clk-regmap-phy-mux.c b/drivers/clk/qcom/clk-regmap-phy-mux.c -new file mode 100644 -index 0000000000000..7b7243b7107dc ---- /dev/null -+++ b/drivers/clk/qcom/clk-regmap-phy-mux.c -@@ -0,0 +1,62 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* -+ * Copyright (c) 2022, Linaro Ltd. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "clk-regmap.h" -+#include "clk-regmap-phy-mux.h" -+ -+#define PHY_MUX_MASK GENMASK(1, 0) -+#define PHY_MUX_PHY_SRC 0 -+#define PHY_MUX_REF_SRC 2 -+ -+static inline struct clk_regmap_phy_mux *to_clk_regmap_phy_mux(struct clk_regmap *clkr) -+{ -+ return container_of(clkr, struct clk_regmap_phy_mux, clkr); -+} -+ -+static int phy_mux_is_enabled(struct clk_hw *hw) -+{ -+ struct clk_regmap *clkr = to_clk_regmap(hw); -+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr); -+ unsigned int val; -+ -+ regmap_read(clkr->regmap, phy_mux->reg, &val); -+ val = FIELD_GET(PHY_MUX_MASK, val); -+ -+ WARN_ON(val != PHY_MUX_PHY_SRC && val != PHY_MUX_REF_SRC); -+ -+ return val == PHY_MUX_PHY_SRC; -+} -+ -+static int phy_mux_enable(struct clk_hw *hw) -+{ -+ struct clk_regmap *clkr = to_clk_regmap(hw); -+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr); -+ -+ return regmap_update_bits(clkr->regmap, phy_mux->reg, -+ PHY_MUX_MASK, -+ FIELD_PREP(PHY_MUX_MASK, PHY_MUX_PHY_SRC)); -+} -+ -+static void phy_mux_disable(struct clk_hw *hw) -+{ -+ struct clk_regmap *clkr = to_clk_regmap(hw); -+ struct clk_regmap_phy_mux *phy_mux = to_clk_regmap_phy_mux(clkr); -+ -+ regmap_update_bits(clkr->regmap, phy_mux->reg, -+ PHY_MUX_MASK, -+ FIELD_PREP(PHY_MUX_MASK, PHY_MUX_REF_SRC)); -+} -+ -+const struct clk_ops clk_regmap_phy_mux_ops = { -+ .enable = phy_mux_enable, -+ .disable = phy_mux_disable, -+ .is_enabled = phy_mux_is_enabled, -+}; -+EXPORT_SYMBOL_GPL(clk_regmap_phy_mux_ops); -diff --git a/drivers/clk/qcom/clk-regmap-phy-mux.h b/drivers/clk/qcom/clk-regmap-phy-mux.h -new file mode 100644 -index 0000000000000..614dd384695ca ---- /dev/null -+++ b/drivers/clk/qcom/clk-regmap-phy-mux.h -@@ -0,0 +1,33 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* -+ * Copyright (c) 2022, Linaro Ltd. -+ */ -+ -+#ifndef __QCOM_CLK_REGMAP_PHY_MUX_H__ -+#define __QCOM_CLK_REGMAP_PHY_MUX_H__ -+ -+#include "clk-regmap.h" -+ -+/* -+ * A clock implementation for PHY pipe and symbols clock muxes. -+ * -+ * If the clock is running off the from-PHY source, report it as enabled. -+ * Report it as disabled otherwise (if it uses reference source). -+ * -+ * This way the PHY will disable the pipe clock before turning off the GDSC, -+ * which in turn would lead to disabling corresponding pipe_clk_src (and thus -+ * it being parked to a safe, reference clock source). And vice versa, after -+ * enabling the GDSC the PHY will enable the pipe clock, which would cause -+ * pipe_clk_src to be switched from a safe source to the working one. -+ * -+ * For some platforms this should be used for the UFS symbol_clk_src clocks -+ * too. -+ */ -+struct clk_regmap_phy_mux { -+ u32 reg; -+ struct clk_regmap clkr; -+}; -+ -+extern const struct clk_ops clk_regmap_phy_mux_ops; -+ -+#endif -diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c -index 60d2a78d13950..2af04fc4abfa9 100644 ---- a/drivers/clk/qcom/common.c -+++ b/drivers/clk/qcom/common.c -@@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src) - } - EXPORT_SYMBOL_GPL(qcom_find_src_index); - -+int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg) -+{ -+ int i, num_parents = clk_hw_get_num_parents(hw); -+ -+ for (i = 0; i < num_parents; i++) -+ if (cfg == map[i].cfg) -+ return i; -+ -+ return -ENOENT; -+} -+EXPORT_SYMBOL_GPL(qcom_find_cfg_index); -+ - struct regmap * - qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc) - { -diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h -index bb39a7e106d8a..9c8f7b798d9fc 100644 ---- a/drivers/clk/qcom/common.h -+++ b/drivers/clk/qcom/common.h -@@ -49,6 +49,8 @@ extern void - qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count); - extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, - u8 src); -+extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, -+ u8 cfg); - - extern int qcom_cc_register_board_clk(struct device *dev, const char *path, - const char *name, unsigned long rate); -diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c -index 538e4963c9152..5d2ae297e7413 100644 ---- a/drivers/clk/qcom/dispcc-sc7180.c -+++ b/drivers/clk/qcom/dispcc-sc7180.c -@@ -1,6 +1,6 @@ - // SPDX-License-Identifier: GPL-2.0-only - /* -- * Copyright (c) 2019, The Linux Foundation. All rights reserved. -+ * Copyright (c) 2019, 2022, The Linux Foundation. All rights reserved. - */ - - #include -@@ -625,6 +625,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { - - static struct gdsc mdss_gdsc = { - .gdscr = 0x3000, -+ .en_rest_wait_val = 0x2, -+ .en_few_wait_val = 0x2, -+ .clk_dis_wait_val = 0xf, - .pd = { - .name = "mdss_gdsc", - }, -diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c -index 4ef4ae231794b..ad596d567f6ab 100644 ---- a/drivers/clk/qcom/dispcc-sc7280.c -+++ b/drivers/clk/qcom/dispcc-sc7280.c -@@ -1,6 +1,6 @@ - // SPDX-License-Identifier: GPL-2.0-only - /* -- * Copyright (c) 2021, The Linux Foundation. All rights reserved. -+ * Copyright (c) 2021-2022, The Linux Foundation. All rights reserved. - */ - - #include -@@ -787,6 +787,9 @@ static struct clk_branch disp_cc_sleep_clk = { - - static struct gdsc disp_cc_mdss_core_gdsc = { - .gdscr = 0x1004, -+ .en_rest_wait_val = 0x2, -+ .en_few_wait_val = 0x2, -+ .clk_dis_wait_val = 0xf, - .pd = { - .name = "disp_cc_mdss_core_gdsc", - }, -diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c -index bf9ffe1a1cf47..73c5feea9818b 100644 ---- a/drivers/clk/qcom/dispcc-sm8250.c -+++ b/drivers/clk/qcom/dispcc-sm8250.c -@@ -1,6 +1,6 @@ - // SPDX-License-Identifier: GPL-2.0 - /* -- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. -+ * Copyright (c) 2018-2020, 2022, The Linux Foundation. All rights reserved. - */ - - #include -@@ -1125,6 +1125,9 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { - - static struct gdsc mdss_gdsc = { - .gdscr = 0x3000, -+ .en_rest_wait_val = 0x2, -+ .en_few_wait_val = 0x2, -+ .clk_dis_wait_val = 0xf, - .pd = { - .name = "mdss_gdsc", - }, -diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c -index 3f9c2f61a5d93..cde62a11f5736 100644 ---- a/drivers/clk/qcom/gcc-ipq6018.c -+++ b/drivers/clk/qcom/gcc-ipq6018.c -@@ -1654,7 +1654,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { - .name = "sdcc1_apps_clk_src", - .parent_data = gcc_xo_gpll0_gpll2_gpll0_out_main_div2, - .num_parents = 4, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_floor_ops, - }, - }; - -@@ -4517,24 +4517,24 @@ static const struct qcom_reset_map gcc_ipq6018_resets[] = { - [GCC_PCIE0_AHB_ARES] = { 0x75040, 5 }, - [GCC_PCIE0_AXI_MASTER_STICKY_ARES] = { 0x75040, 6 }, - [GCC_PCIE0_AXI_SLAVE_STICKY_ARES] = { 0x75040, 7 }, -- [GCC_PPE_FULL_RESET] = { 0x68014, 0 }, -- [GCC_UNIPHY0_SOFT_RESET] = { 0x56004, 0 }, -+ [GCC_PPE_FULL_RESET] = { .reg = 0x68014, .bitmask = 0xf0000 }, -+ [GCC_UNIPHY0_SOFT_RESET] = { .reg = 0x56004, .bitmask = 0x3ff2 }, - [GCC_UNIPHY0_XPCS_RESET] = { 0x56004, 2 }, -- [GCC_UNIPHY1_SOFT_RESET] = { 0x56104, 0 }, -+ [GCC_UNIPHY1_SOFT_RESET] = { .reg = 0x56104, .bitmask = 0x32 }, - [GCC_UNIPHY1_XPCS_RESET] = { 0x56104, 2 }, -- [GCC_EDMA_HW_RESET] = { 0x68014, 0 }, -- [GCC_NSSPORT1_RESET] = { 0x68014, 0 }, -- [GCC_NSSPORT2_RESET] = { 0x68014, 0 }, -- [GCC_NSSPORT3_RESET] = { 0x68014, 0 }, -- [GCC_NSSPORT4_RESET] = { 0x68014, 0 }, -- [GCC_NSSPORT5_RESET] = { 0x68014, 0 }, -- [GCC_UNIPHY0_PORT1_ARES] = { 0x56004, 0 }, -- [GCC_UNIPHY0_PORT2_ARES] = { 0x56004, 0 }, -- [GCC_UNIPHY0_PORT3_ARES] = { 0x56004, 0 }, -- [GCC_UNIPHY0_PORT4_ARES] = { 0x56004, 0 }, -- [GCC_UNIPHY0_PORT5_ARES] = { 0x56004, 0 }, -- [GCC_UNIPHY0_PORT_4_5_RESET] = { 0x56004, 0 }, -- [GCC_UNIPHY0_PORT_4_RESET] = { 0x56004, 0 }, -+ [GCC_EDMA_HW_RESET] = { .reg = 0x68014, .bitmask = 0x300000 }, -+ [GCC_NSSPORT1_RESET] = { .reg = 0x68014, .bitmask = 0x1000003 }, -+ [GCC_NSSPORT2_RESET] = { .reg = 0x68014, .bitmask = 0x200000c }, -+ [GCC_NSSPORT3_RESET] = { .reg = 0x68014, .bitmask = 0x4000030 }, -+ [GCC_NSSPORT4_RESET] = { .reg = 0x68014, .bitmask = 0x8000300 }, -+ [GCC_NSSPORT5_RESET] = { .reg = 0x68014, .bitmask = 0x10000c00 }, -+ [GCC_UNIPHY0_PORT1_ARES] = { .reg = 0x56004, .bitmask = 0x30 }, -+ [GCC_UNIPHY0_PORT2_ARES] = { .reg = 0x56004, .bitmask = 0xc0 }, -+ [GCC_UNIPHY0_PORT3_ARES] = { .reg = 0x56004, .bitmask = 0x300 }, -+ [GCC_UNIPHY0_PORT4_ARES] = { .reg = 0x56004, .bitmask = 0xc00 }, -+ [GCC_UNIPHY0_PORT5_ARES] = { .reg = 0x56004, .bitmask = 0x3000 }, -+ [GCC_UNIPHY0_PORT_4_5_RESET] = { .reg = 0x56004, .bitmask = 0x3c02 }, -+ [GCC_UNIPHY0_PORT_4_RESET] = { .reg = 0x56004, .bitmask = 0xc02 }, - [GCC_LPASS_BCR] = {0x1F000, 0}, - [GCC_UBI32_TBU_BCR] = {0x65000, 0}, - [GCC_LPASS_TBU_BCR] = {0x6C000, 0}, -diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c -index 108fe27bee10f..d6d5defb82c9f 100644 ---- a/drivers/clk/qcom/gcc-ipq8074.c -+++ b/drivers/clk/qcom/gcc-ipq8074.c -@@ -60,11 +60,6 @@ static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = { - { P_GPLL0_DIV2, 4 }, - }; - --static const char * const gcc_xo_gpll0[] = { -- "xo", -- "gpll0", --}; -- - static const struct parent_map gcc_xo_gpll0_map[] = { - { P_XO, 0 }, - { P_GPLL0, 1 }, -@@ -667,6 +662,7 @@ static struct clk_branch gcc_sleep_clk_src = { - }, - .num_parents = 1, - .ops = &clk_branch2_ops, -+ .flags = CLK_IS_CRITICAL, - }, - }, - }; -@@ -956,6 +952,11 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = { - }, - }; - -+static const struct clk_parent_data gcc_xo_gpll0[] = { -+ { .fw_name = "xo" }, -+ { .hw = &gpll0.clkr.hw }, -+}; -+ - static const struct freq_tbl ftbl_pcie_axi_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(200000000, P_GPLL0, 4, 0, 0), -@@ -969,7 +970,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = { - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "pcie0_axi_clk_src", -- .parent_names = gcc_xo_gpll0, -+ .parent_data = gcc_xo_gpll0, - .num_parents = 2, - .ops = &clk_rcg2_ops, - }, -@@ -1016,7 +1017,7 @@ static struct clk_rcg2 pcie1_axi_clk_src = { - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "pcie1_axi_clk_src", -- .parent_names = gcc_xo_gpll0, -+ .parent_data = gcc_xo_gpll0, - .num_parents = 2, - .ops = &clk_rcg2_ops, - }, -@@ -1074,7 +1075,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { - .name = "sdcc1_apps_clk_src", - .parent_names = gcc_xo_gpll0_gpll2_gpll0_out_main_div2, - .num_parents = 4, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_floor_ops, - }, - }; - -@@ -1330,7 +1331,7 @@ static struct clk_rcg2 nss_ce_clk_src = { - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "nss_ce_clk_src", -- .parent_names = gcc_xo_gpll0, -+ .parent_data = gcc_xo_gpll0, - .num_parents = 2, - .ops = &clk_rcg2_ops, - }, -@@ -1788,8 +1789,10 @@ static struct clk_regmap_div nss_port4_tx_div_clk_src = { - static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(25000000, P_UNIPHY1_RX, 12.5, 0, 0), -+ F(25000000, P_UNIPHY0_RX, 5, 0, 0), - F(78125000, P_UNIPHY1_RX, 4, 0, 0), - F(125000000, P_UNIPHY1_RX, 2.5, 0, 0), -+ F(125000000, P_UNIPHY0_RX, 1, 0, 0), - F(156250000, P_UNIPHY1_RX, 2, 0, 0), - F(312500000, P_UNIPHY1_RX, 1, 0, 0), - { } -@@ -1828,8 +1831,10 @@ static struct clk_regmap_div nss_port5_rx_div_clk_src = { - static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), - F(25000000, P_UNIPHY1_TX, 12.5, 0, 0), -+ F(25000000, P_UNIPHY0_TX, 5, 0, 0), - F(78125000, P_UNIPHY1_TX, 4, 0, 0), - F(125000000, P_UNIPHY1_TX, 2.5, 0, 0), -+ F(125000000, P_UNIPHY0_TX, 1, 0, 0), - F(156250000, P_UNIPHY1_TX, 2, 0, 0), - F(312500000, P_UNIPHY1_TX, 1, 0, 0), - { } -@@ -1867,8 +1872,10 @@ static struct clk_regmap_div nss_port5_tx_div_clk_src = { - - static const struct freq_tbl ftbl_nss_port6_rx_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), -+ F(25000000, P_UNIPHY2_RX, 5, 0, 0), - F(25000000, P_UNIPHY2_RX, 12.5, 0, 0), - F(78125000, P_UNIPHY2_RX, 4, 0, 0), -+ F(125000000, P_UNIPHY2_RX, 1, 0, 0), - F(125000000, P_UNIPHY2_RX, 2.5, 0, 0), - F(156250000, P_UNIPHY2_RX, 2, 0, 0), - F(312500000, P_UNIPHY2_RX, 1, 0, 0), -@@ -1907,8 +1914,10 @@ static struct clk_regmap_div nss_port6_rx_div_clk_src = { - - static const struct freq_tbl ftbl_nss_port6_tx_clk_src[] = { - F(19200000, P_XO, 1, 0, 0), -+ F(25000000, P_UNIPHY2_TX, 5, 0, 0), - F(25000000, P_UNIPHY2_TX, 12.5, 0, 0), - F(78125000, P_UNIPHY2_TX, 4, 0, 0), -+ F(125000000, P_UNIPHY2_TX, 1, 0, 0), - F(125000000, P_UNIPHY2_TX, 2.5, 0, 0), - F(156250000, P_UNIPHY2_TX, 2, 0, 0), - F(312500000, P_UNIPHY2_TX, 1, 0, 0), -@@ -3346,6 +3355,7 @@ static struct clk_branch gcc_nssnoc_ubi1_ahb_clk = { - - static struct clk_branch gcc_ubi0_ahb_clk = { - .halt_reg = 0x6820c, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x6820c, - .enable_mask = BIT(0), -@@ -3363,6 +3373,7 @@ static struct clk_branch gcc_ubi0_ahb_clk = { - - static struct clk_branch gcc_ubi0_axi_clk = { - .halt_reg = 0x68200, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x68200, - .enable_mask = BIT(0), -@@ -3380,6 +3391,7 @@ static struct clk_branch gcc_ubi0_axi_clk = { - - static struct clk_branch gcc_ubi0_nc_axi_clk = { - .halt_reg = 0x68204, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x68204, - .enable_mask = BIT(0), -@@ -3397,6 +3409,7 @@ static struct clk_branch gcc_ubi0_nc_axi_clk = { - - static struct clk_branch gcc_ubi0_core_clk = { - .halt_reg = 0x68210, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x68210, - .enable_mask = BIT(0), -@@ -3414,6 +3427,7 @@ static struct clk_branch gcc_ubi0_core_clk = { - - static struct clk_branch gcc_ubi0_mpt_clk = { - .halt_reg = 0x68208, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x68208, - .enable_mask = BIT(0), -@@ -3431,6 +3445,7 @@ static struct clk_branch gcc_ubi0_mpt_clk = { - - static struct clk_branch gcc_ubi1_ahb_clk = { - .halt_reg = 0x6822c, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x6822c, - .enable_mask = BIT(0), -@@ -3448,6 +3463,7 @@ static struct clk_branch gcc_ubi1_ahb_clk = { - - static struct clk_branch gcc_ubi1_axi_clk = { - .halt_reg = 0x68220, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x68220, - .enable_mask = BIT(0), -@@ -3465,6 +3481,7 @@ static struct clk_branch gcc_ubi1_axi_clk = { - - static struct clk_branch gcc_ubi1_nc_axi_clk = { - .halt_reg = 0x68224, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x68224, - .enable_mask = BIT(0), -@@ -3482,6 +3499,7 @@ static struct clk_branch gcc_ubi1_nc_axi_clk = { - - static struct clk_branch gcc_ubi1_core_clk = { - .halt_reg = 0x68230, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x68230, - .enable_mask = BIT(0), -@@ -3499,6 +3517,7 @@ static struct clk_branch gcc_ubi1_core_clk = { - - static struct clk_branch gcc_ubi1_mpt_clk = { - .halt_reg = 0x68228, -+ .halt_check = BRANCH_HALT_DELAY, - .clkr = { - .enable_reg = 0x68228, - .enable_mask = BIT(0), -@@ -4329,8 +4348,7 @@ static struct clk_rcg2 pcie0_rchng_clk_src = { - .parent_map = gcc_xo_gpll0_map, - .clkr.hw.init = &(struct clk_init_data){ - .name = "pcie0_rchng_clk_src", -- .parent_hws = (const struct clk_hw *[]) { -- &gpll0.clkr.hw }, -+ .parent_data = gcc_xo_gpll0, - .num_parents = 2, - .ops = &clk_rcg2_ops, - }, -@@ -4372,6 +4390,33 @@ static struct clk_branch gcc_pcie0_axi_s_bridge_clk = { - }, - }; - -+static const struct alpha_pll_config ubi32_pll_config = { -+ .l = 0x4e, -+ .config_ctl_val = 0x200d4aa8, -+ .config_ctl_hi_val = 0x3c2, -+ .main_output_mask = BIT(0), -+ .aux_output_mask = BIT(1), -+ .pre_div_val = 0x0, -+ .pre_div_mask = BIT(12), -+ .post_div_val = 0x0, -+ .post_div_mask = GENMASK(9, 8), -+}; -+ -+static const struct alpha_pll_config nss_crypto_pll_config = { -+ .l = 0x3e, -+ .alpha = 0x0, -+ .alpha_hi = 0x80, -+ .config_ctl_val = 0x4001055b, -+ .main_output_mask = BIT(0), -+ .pre_div_val = 0x0, -+ .pre_div_mask = GENMASK(14, 12), -+ .post_div_val = 0x1 << 8, -+ .post_div_mask = GENMASK(11, 8), -+ .vco_mask = GENMASK(21, 20), -+ .vco_val = 0x0, -+ .alpha_en_mask = BIT(24), -+}; -+ - static struct clk_hw *gcc_ipq8074_hws[] = { - &gpll0_out_main_div2.hw, - &gpll6_out_main_div2.hw, -@@ -4773,7 +4818,20 @@ static const struct qcom_cc_desc gcc_ipq8074_desc = { - - static int gcc_ipq8074_probe(struct platform_device *pdev) - { -- return qcom_cc_probe(pdev, &gcc_ipq8074_desc); -+ struct regmap *regmap; -+ -+ regmap = qcom_cc_map(pdev, &gcc_ipq8074_desc); -+ if (IS_ERR(regmap)) -+ return PTR_ERR(regmap); -+ -+ /* SW Workaround for UBI32 Huayra PLL */ -+ regmap_update_bits(regmap, 0x2501c, BIT(26), BIT(26)); -+ -+ clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config); -+ clk_alpha_pll_configure(&nss_crypto_pll_main, regmap, -+ &nss_crypto_pll_config); -+ -+ return qcom_cc_really_probe(pdev, &gcc_ipq8074_desc, regmap); - } - - static struct platform_driver gcc_ipq8074_driver = { -diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c -index 8bed02a748aba..470a277603a92 100644 ---- a/drivers/clk/qcom/gcc-mdm9615.c -+++ b/drivers/clk/qcom/gcc-mdm9615.c -@@ -58,7 +58,7 @@ static struct clk_regmap pll0_vote = { - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "pll0_vote", -- .parent_names = (const char *[]){ "pll8" }, -+ .parent_names = (const char *[]){ "pll0" }, - .num_parents = 1, - .ops = &clk_pll_vote_ops, - }, -diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c -index 39ebb443ae3d5..de0022e5450de 100644 ---- a/drivers/clk/qcom/gcc-msm8939.c -+++ b/drivers/clk/qcom/gcc-msm8939.c -@@ -632,7 +632,7 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = { - }; - - static struct clk_rcg2 bimc_ddr_clk_src = { -- .cmd_rcgr = 0x32004, -+ .cmd_rcgr = 0x32024, - .hid_width = 5, - .parent_map = gcc_xo_gpll0_bimc_map, - .clkr.hw.init = &(struct clk_init_data){ -@@ -644,6 +644,18 @@ static struct clk_rcg2 bimc_ddr_clk_src = { - }, - }; - -+static struct clk_rcg2 system_mm_noc_bfdcd_clk_src = { -+ .cmd_rcgr = 0x2600c, -+ .hid_width = 5, -+ .parent_map = gcc_xo_gpll0_gpll6a_map, -+ .clkr.hw.init = &(struct clk_init_data){ -+ .name = "system_mm_noc_bfdcd_clk_src", -+ .parent_data = gcc_xo_gpll0_gpll6a_parent_data, -+ .num_parents = 3, -+ .ops = &clk_rcg2_ops, -+ }, -+}; -+ - static const struct freq_tbl ftbl_gcc_camss_ahb_clk[] = { - F(40000000, P_GPLL0, 10, 1, 2), - F(80000000, P_GPLL0, 10, 0, 0), -@@ -1002,7 +1014,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = { - }; - - static const struct freq_tbl ftbl_gcc_camss_cci_clk[] = { -- F(19200000, P_XO, 1, 0, 0), -+ F(19200000, P_XO, 1, 0, 0), - { } - }; - -@@ -2441,7 +2453,7 @@ static struct clk_branch gcc_camss_jpeg_axi_clk = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_camss_jpeg_axi_clk", - .parent_data = &(const struct clk_parent_data){ -- .hw = &system_noc_bfdcd_clk_src.clkr.hw, -+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, -@@ -2645,7 +2657,7 @@ static struct clk_branch gcc_camss_vfe_axi_clk = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_camss_vfe_axi_clk", - .parent_data = &(const struct clk_parent_data){ -- .hw = &system_noc_bfdcd_clk_src.clkr.hw, -+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, -@@ -2801,7 +2813,7 @@ static struct clk_branch gcc_mdss_axi_clk = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_mdss_axi_clk", - .parent_data = &(const struct clk_parent_data){ -- .hw = &system_noc_bfdcd_clk_src.clkr.hw, -+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, -@@ -3193,7 +3205,7 @@ static struct clk_branch gcc_mdp_tbu_clk = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_mdp_tbu_clk", - .parent_data = &(const struct clk_parent_data){ -- .hw = &system_noc_bfdcd_clk_src.clkr.hw, -+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, -@@ -3211,7 +3223,7 @@ static struct clk_branch gcc_venus_tbu_clk = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_venus_tbu_clk", - .parent_data = &(const struct clk_parent_data){ -- .hw = &system_noc_bfdcd_clk_src.clkr.hw, -+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, -@@ -3229,7 +3241,7 @@ static struct clk_branch gcc_vfe_tbu_clk = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_vfe_tbu_clk", - .parent_data = &(const struct clk_parent_data){ -- .hw = &system_noc_bfdcd_clk_src.clkr.hw, -+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, -@@ -3247,7 +3259,7 @@ static struct clk_branch gcc_jpeg_tbu_clk = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_jpeg_tbu_clk", - .parent_data = &(const struct clk_parent_data){ -- .hw = &system_noc_bfdcd_clk_src.clkr.hw, -+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, -@@ -3484,7 +3496,7 @@ static struct clk_branch gcc_venus0_axi_clk = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_venus0_axi_clk", - .parent_data = &(const struct clk_parent_data){ -- .hw = &system_noc_bfdcd_clk_src.clkr.hw, -+ .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw, - }, - .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, -@@ -3623,6 +3635,7 @@ static struct clk_regmap *gcc_msm8939_clocks[] = { - [GPLL2_VOTE] = &gpll2_vote, - [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr, - [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr, -+ [SYSTEM_MM_NOC_BFDCD_CLK_SRC] = &system_mm_noc_bfdcd_clk_src.clkr, - [CAMSS_AHB_CLK_SRC] = &camss_ahb_clk_src.clkr, - [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr, - [CSI0_CLK_SRC] = &csi0_clk_src.clkr, -diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c -index 144d2ba7a9bef..463a444c8a7e4 100644 ---- a/drivers/clk/qcom/gcc-msm8994.c -+++ b/drivers/clk/qcom/gcc-msm8994.c -@@ -108,6 +108,7 @@ static struct clk_alpha_pll gpll4_early = { - - static struct clk_alpha_pll_postdiv gpll4 = { - .offset = 0x1dc0, -+ .width = 4, - .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr.hw.init = &(struct clk_init_data) - { -diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c -index 3c3a7ff045621..9b1674b28d45d 100644 ---- a/drivers/clk/qcom/gcc-msm8996.c -+++ b/drivers/clk/qcom/gcc-msm8996.c -@@ -2937,20 +2937,6 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = { - }, - }; - --static struct clk_branch gcc_aggre1_pnoc_ahb_clk = { -- .halt_reg = 0x82014, -- .clkr = { -- .enable_reg = 0x82014, -- .enable_mask = BIT(0), -- .hw.init = &(struct clk_init_data){ -- .name = "gcc_aggre1_pnoc_ahb_clk", -- .parent_names = (const char *[]){ "periph_noc_clk_src" }, -- .num_parents = 1, -- .ops = &clk_branch2_ops, -- }, -- }, --}; -- - static struct clk_branch gcc_aggre2_ufs_axi_clk = { - .halt_reg = 0x83014, - .clkr = { -@@ -3474,7 +3460,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = { - [GCC_AGGRE0_CNOC_AHB_CLK] = &gcc_aggre0_cnoc_ahb_clk.clkr, - [GCC_SMMU_AGGRE0_AXI_CLK] = &gcc_smmu_aggre0_axi_clk.clkr, - [GCC_SMMU_AGGRE0_AHB_CLK] = &gcc_smmu_aggre0_ahb_clk.clkr, -- [GCC_AGGRE1_PNOC_AHB_CLK] = &gcc_aggre1_pnoc_ahb_clk.clkr, - [GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr, - [GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr, - [GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr, -diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c -index c2ea09945c472..a38394b4739a2 100644 ---- a/drivers/clk/qcom/gcc-sc7180.c -+++ b/drivers/clk/qcom/gcc-sc7180.c -@@ -667,6 +667,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { - .name = "gcc_sdcc2_apps_clk_src", - .parent_data = gcc_parent_data_5, - .num_parents = ARRAY_SIZE(gcc_parent_data_5), -+ .flags = CLK_OPS_PARENT_ENABLE, - .ops = &clk_rcg2_floor_ops, - }, - }; -diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c -index 6cefcdc869905..d10efbf260b7a 100644 ---- a/drivers/clk/qcom/gcc-sc7280.c -+++ b/drivers/clk/qcom/gcc-sc7280.c -@@ -2998,7 +2998,7 @@ static struct clk_branch gcc_cfg_noc_lpass_clk = { - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_cfg_noc_lpass_clk", -- .ops = &clk_branch2_ops, -+ .ops = &clk_branch2_aon_ops, - }, - }, - }; -@@ -3571,6 +3571,7 @@ static int gcc_sc7280_probe(struct platform_device *pdev) - regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0)); - regmap_update_bits(regmap, 0x28014, BIT(0), BIT(0)); - regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); -+ regmap_update_bits(regmap, 0x7100C, BIT(13), BIT(13)); - - ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, - ARRAY_SIZE(gcc_dfs_clocks)); -diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c -index 68fe9f6f0d2f3..1c3be4e07d5bc 100644 ---- a/drivers/clk/qcom/gcc-sm6115.c -+++ b/drivers/clk/qcom/gcc-sm6115.c -@@ -53,11 +53,25 @@ static struct pll_vco gpll10_vco[] = { - { 750000000, 1500000000, 1 }, - }; - -+static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = { -+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = { -+ [PLL_OFF_L_VAL] = 0x04, -+ [PLL_OFF_ALPHA_VAL] = 0x08, -+ [PLL_OFF_ALPHA_VAL_U] = 0x0c, -+ [PLL_OFF_TEST_CTL] = 0x10, -+ [PLL_OFF_TEST_CTL_U] = 0x14, -+ [PLL_OFF_USER_CTL] = 0x18, -+ [PLL_OFF_USER_CTL_U] = 0x1c, -+ [PLL_OFF_CONFIG_CTL] = 0x20, -+ [PLL_OFF_STATUS] = 0x24, -+ }, -+}; -+ - static struct clk_alpha_pll gpll0 = { - .offset = 0x0, - .vco_table = default_vco, - .num_vco = ARRAY_SIZE(default_vco), -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr = { - .enable_reg = 0x79000, - .enable_mask = BIT(0), -@@ -83,7 +97,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_aux2 = { - .post_div_table = post_div_table_gpll0_out_aux2, - .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2), - .width = 4, -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr.hw.init = &(struct clk_init_data){ - .name = "gpll0_out_aux2", - .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw }, -@@ -115,7 +129,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_main = { - .post_div_table = post_div_table_gpll0_out_main, - .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_main), - .width = 4, -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr.hw.init = &(struct clk_init_data){ - .name = "gpll0_out_main", - .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw }, -@@ -137,7 +151,7 @@ static struct clk_alpha_pll gpll10 = { - .offset = 0xa000, - .vco_table = gpll10_vco, - .num_vco = ARRAY_SIZE(gpll10_vco), -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr = { - .enable_reg = 0x79000, - .enable_mask = BIT(10), -@@ -163,7 +177,7 @@ static struct clk_alpha_pll_postdiv gpll10_out_main = { - .post_div_table = post_div_table_gpll10_out_main, - .num_post_div = ARRAY_SIZE(post_div_table_gpll10_out_main), - .width = 4, -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr.hw.init = &(struct clk_init_data){ - .name = "gpll10_out_main", - .parent_hws = (const struct clk_hw *[]){ &gpll10.clkr.hw }, -@@ -189,7 +203,7 @@ static struct clk_alpha_pll gpll11 = { - .vco_table = default_vco, - .num_vco = ARRAY_SIZE(default_vco), - .flags = SUPPORTS_DYNAMIC_UPDATE, -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr = { - .enable_reg = 0x79000, - .enable_mask = BIT(11), -@@ -215,7 +229,7 @@ static struct clk_alpha_pll_postdiv gpll11_out_main = { - .post_div_table = post_div_table_gpll11_out_main, - .num_post_div = ARRAY_SIZE(post_div_table_gpll11_out_main), - .width = 4, -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr.hw.init = &(struct clk_init_data){ - .name = "gpll11_out_main", - .parent_hws = (const struct clk_hw *[]){ &gpll11.clkr.hw }, -@@ -229,7 +243,7 @@ static struct clk_alpha_pll gpll3 = { - .offset = 0x3000, - .vco_table = default_vco, - .num_vco = ARRAY_SIZE(default_vco), -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr = { - .enable_reg = 0x79000, - .enable_mask = BIT(3), -@@ -248,7 +262,7 @@ static struct clk_alpha_pll gpll4 = { - .offset = 0x4000, - .vco_table = default_vco, - .num_vco = ARRAY_SIZE(default_vco), -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr = { - .enable_reg = 0x79000, - .enable_mask = BIT(4), -@@ -274,7 +288,7 @@ static struct clk_alpha_pll_postdiv gpll4_out_main = { - .post_div_table = post_div_table_gpll4_out_main, - .num_post_div = ARRAY_SIZE(post_div_table_gpll4_out_main), - .width = 4, -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr.hw.init = &(struct clk_init_data){ - .name = "gpll4_out_main", - .parent_hws = (const struct clk_hw *[]){ &gpll4.clkr.hw }, -@@ -287,7 +301,7 @@ static struct clk_alpha_pll gpll6 = { - .offset = 0x6000, - .vco_table = default_vco, - .num_vco = ARRAY_SIZE(default_vco), -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr = { - .enable_reg = 0x79000, - .enable_mask = BIT(6), -@@ -313,7 +327,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_main = { - .post_div_table = post_div_table_gpll6_out_main, - .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main), - .width = 4, -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr.hw.init = &(struct clk_init_data){ - .name = "gpll6_out_main", - .parent_hws = (const struct clk_hw *[]){ &gpll6.clkr.hw }, -@@ -326,7 +340,7 @@ static struct clk_alpha_pll gpll7 = { - .offset = 0x7000, - .vco_table = default_vco, - .num_vco = ARRAY_SIZE(default_vco), -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr = { - .enable_reg = 0x79000, - .enable_mask = BIT(7), -@@ -352,7 +366,7 @@ static struct clk_alpha_pll_postdiv gpll7_out_main = { - .post_div_table = post_div_table_gpll7_out_main, - .num_post_div = ARRAY_SIZE(post_div_table_gpll7_out_main), - .width = 4, -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr.hw.init = &(struct clk_init_data){ - .name = "gpll7_out_main", - .parent_hws = (const struct clk_hw *[]){ &gpll7.clkr.hw }, -@@ -380,7 +394,7 @@ static struct clk_alpha_pll gpll8 = { - .offset = 0x8000, - .vco_table = default_vco, - .num_vco = ARRAY_SIZE(default_vco), -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .flags = SUPPORTS_DYNAMIC_UPDATE, - .clkr = { - .enable_reg = 0x79000, -@@ -407,7 +421,7 @@ static struct clk_alpha_pll_postdiv gpll8_out_main = { - .post_div_table = post_div_table_gpll8_out_main, - .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main), - .width = 4, -- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], -+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT], - .clkr.hw.init = &(struct clk_init_data){ - .name = "gpll8_out_main", - .parent_hws = (const struct clk_hw *[]){ &gpll8.clkr.hw }, -@@ -706,7 +720,7 @@ static struct clk_rcg2 gcc_camss_axi_clk_src = { - .parent_data = gcc_parents_7, - .num_parents = ARRAY_SIZE(gcc_parents_7), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -727,7 +741,7 @@ static struct clk_rcg2 gcc_camss_cci_clk_src = { - .parent_data = gcc_parents_9, - .num_parents = ARRAY_SIZE(gcc_parents_9), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -750,7 +764,7 @@ static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = { - .parent_data = gcc_parents_4, - .num_parents = ARRAY_SIZE(gcc_parents_4), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -765,7 +779,7 @@ static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = { - .parent_data = gcc_parents_4, - .num_parents = ARRAY_SIZE(gcc_parents_4), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -780,7 +794,7 @@ static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = { - .parent_data = gcc_parents_4, - .num_parents = ARRAY_SIZE(gcc_parents_4), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -802,7 +816,7 @@ static struct clk_rcg2 gcc_camss_mclk0_clk_src = { - .parent_data = gcc_parents_3, - .num_parents = ARRAY_SIZE(gcc_parents_3), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -817,7 +831,7 @@ static struct clk_rcg2 gcc_camss_mclk1_clk_src = { - .parent_data = gcc_parents_3, - .num_parents = ARRAY_SIZE(gcc_parents_3), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -832,7 +846,7 @@ static struct clk_rcg2 gcc_camss_mclk2_clk_src = { - .parent_data = gcc_parents_3, - .num_parents = ARRAY_SIZE(gcc_parents_3), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -847,7 +861,7 @@ static struct clk_rcg2 gcc_camss_mclk3_clk_src = { - .parent_data = gcc_parents_3, - .num_parents = ARRAY_SIZE(gcc_parents_3), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -869,7 +883,7 @@ static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = { - .parent_data = gcc_parents_8, - .num_parents = ARRAY_SIZE(gcc_parents_8), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -893,7 +907,7 @@ static struct clk_rcg2 gcc_camss_ope_clk_src = { - .parent_data = gcc_parents_8, - .num_parents = ARRAY_SIZE(gcc_parents_8), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -928,7 +942,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_clk_src = { - .parent_data = gcc_parents_5, - .num_parents = ARRAY_SIZE(gcc_parents_5), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -953,7 +967,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = { - .parent_data = gcc_parents_6, - .num_parents = ARRAY_SIZE(gcc_parents_6), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -968,7 +982,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_clk_src = { - .parent_data = gcc_parents_5, - .num_parents = ARRAY_SIZE(gcc_parents_5), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -983,7 +997,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = { - .parent_data = gcc_parents_6, - .num_parents = ARRAY_SIZE(gcc_parents_6), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -998,7 +1012,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_clk_src = { - .parent_data = gcc_parents_5, - .num_parents = ARRAY_SIZE(gcc_parents_5), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -1013,7 +1027,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = { - .parent_data = gcc_parents_6, - .num_parents = ARRAY_SIZE(gcc_parents_6), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -1036,7 +1050,7 @@ static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = { - .parent_data = gcc_parents_10, - .num_parents = ARRAY_SIZE(gcc_parents_10), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -1058,7 +1072,7 @@ static struct clk_rcg2 gcc_camss_top_ahb_clk_src = { - .parent_data = gcc_parents_7, - .num_parents = ARRAY_SIZE(gcc_parents_7), - .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -1128,7 +1142,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = { - .name = "gcc_pdm2_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -1341,7 +1355,7 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = { - .name = "gcc_ufs_phy_axi_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -1363,7 +1377,7 @@ static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = { - .name = "gcc_ufs_phy_ice_core_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -1404,7 +1418,7 @@ static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = { - .name = "gcc_ufs_phy_unipro_core_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -1426,7 +1440,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { - .name = "gcc_usb30_prim_master_clk_src", - .parent_data = gcc_parents_0, - .num_parents = ARRAY_SIZE(gcc_parents_0), -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -@@ -1495,7 +1509,7 @@ static struct clk_rcg2 gcc_video_venus_clk_src = { - .parent_data = gcc_parents_13, - .num_parents = ARRAY_SIZE(gcc_parents_13), - .flags = CLK_SET_RATE_PARENT, -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_shared_ops, - }, - }; - -diff --git a/drivers/clk/qcom/gcc-sm6125.c b/drivers/clk/qcom/gcc-sm6125.c -index 543cfab7561f9..431b55bb0d2f7 100644 ---- a/drivers/clk/qcom/gcc-sm6125.c -+++ b/drivers/clk/qcom/gcc-sm6125.c -@@ -1121,7 +1121,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { - .name = "gcc_sdcc1_apps_clk_src", - .parent_data = gcc_parent_data_1, - .num_parents = ARRAY_SIZE(gcc_parent_data_1), -- .ops = &clk_rcg2_ops, -+ .ops = &clk_rcg2_floor_ops, - }, - }; - -@@ -1143,7 +1143,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { - .name = "gcc_sdcc1_ice_core_clk_src", - .parent_data = gcc_parent_data_0, - .num_parents = ARRAY_SIZE(gcc_parent_data_0), -- .ops = &clk_rcg2_floor_ops, -+ .ops = &clk_rcg2_ops, - }, - }; - -diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c -index 3236706771b11..e32ad7499285f 100644 ---- a/drivers/clk/qcom/gcc-sm6350.c -+++ b/drivers/clk/qcom/gcc-sm6350.c -@@ -640,6 +640,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { - .name = "gcc_sdcc2_apps_clk_src", - .parent_data = gcc_parent_data_8, - .num_parents = ARRAY_SIZE(gcc_parent_data_8), -+ .flags = CLK_OPS_PARENT_ENABLE, - .ops = &clk_rcg2_floor_ops, - }, - }; -diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c -index 9755ef4888c19..30bd561461074 100644 ---- a/drivers/clk/qcom/gcc-sm8250.c -+++ b/drivers/clk/qcom/gcc-sm8250.c -@@ -721,6 +721,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { - .name = "gcc_sdcc2_apps_clk_src", - .parent_data = gcc_parent_data_4, - .num_parents = ARRAY_SIZE(gcc_parent_data_4), -+ .flags = CLK_OPS_PARENT_ENABLE, - .ops = &clk_rcg2_floor_ops, - }, - }; -@@ -3267,7 +3268,7 @@ static struct gdsc usb30_prim_gdsc = { - .pd = { - .name = "usb30_prim_gdsc", - }, -- .pwrsts = PWRSTS_OFF_ON, -+ .pwrsts = PWRSTS_RET_ON, - }; - - static struct gdsc usb30_sec_gdsc = { -@@ -3275,7 +3276,7 @@ static struct gdsc usb30_sec_gdsc = { - .pd = { - .name = "usb30_sec_gdsc", - }, -- .pwrsts = PWRSTS_OFF_ON, -+ .pwrsts = PWRSTS_RET_ON, - }; - - static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { -diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c -index 6d0a9e2d51041..87d03b1684ed0 100644 ---- a/drivers/clk/qcom/gcc-sm8350.c -+++ b/drivers/clk/qcom/gcc-sm8350.c -@@ -16,6 +16,7 @@ - #include "clk-regmap.h" - #include "clk-regmap-divider.h" - #include "clk-regmap-mux.h" -+#include "clk-regmap-phy-mux.h" - #include "gdsc.h" - #include "reset.h" - -@@ -166,26 +167,6 @@ static const struct clk_parent_data gcc_parent_data_3[] = { - { .fw_name = "core_bi_pll_test_se" }, - }; - --static const struct parent_map gcc_parent_map_4[] = { -- { P_PCIE_0_PIPE_CLK, 0 }, -- { P_BI_TCXO, 2 }, --}; -- --static const struct clk_parent_data gcc_parent_data_4[] = { -- { .fw_name = "pcie_0_pipe_clk", }, -- { .fw_name = "bi_tcxo" }, --}; -- --static const struct parent_map gcc_parent_map_5[] = { -- { P_PCIE_1_PIPE_CLK, 0 }, -- { P_BI_TCXO, 2 }, --}; -- --static const struct clk_parent_data gcc_parent_data_5[] = { -- { .fw_name = "pcie_1_pipe_clk" }, -- { .fw_name = "bi_tcxo" }, --}; -- - static const struct parent_map gcc_parent_map_6[] = { - { P_BI_TCXO, 0 }, - { P_GCC_GPLL0_OUT_MAIN, 1 }, -@@ -288,32 +269,30 @@ static const struct clk_parent_data gcc_parent_data_14[] = { - { .fw_name = "bi_tcxo" }, - }; - --static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = { -+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = { - .reg = 0x6b054, -- .shift = 0, -- .width = 2, -- .parent_map = gcc_parent_map_4, - .clkr = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_pcie_0_pipe_clk_src", -- .parent_data = gcc_parent_data_4, -- .num_parents = ARRAY_SIZE(gcc_parent_data_4), -- .ops = &clk_regmap_mux_closest_ops, -+ .parent_data = &(const struct clk_parent_data){ -+ .fw_name = "pcie_0_pipe_clk", -+ }, -+ .num_parents = 1, -+ .ops = &clk_regmap_phy_mux_ops, - }, - }, - }; - --static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = { -+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = { - .reg = 0x8d054, -- .shift = 0, -- .width = 2, -- .parent_map = gcc_parent_map_5, - .clkr = { - .hw.init = &(struct clk_init_data){ - .name = "gcc_pcie_1_pipe_clk_src", -- .parent_data = gcc_parent_data_5, -- .num_parents = ARRAY_SIZE(gcc_parent_data_5), -- .ops = &clk_regmap_mux_closest_ops, -+ .parent_data = &(const struct clk_parent_data){ -+ .fw_name = "pcie_1_pipe_clk", -+ }, -+ .num_parents = 1, -+ .ops = &clk_regmap_phy_mux_ops, - }, - }, - }; -diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c -index 4ece326ea233e..cf23cfd7e4674 100644 ---- a/drivers/clk/qcom/gdsc.c -+++ b/drivers/clk/qcom/gdsc.c -@@ -1,6 +1,6 @@ - // SPDX-License-Identifier: GPL-2.0-only - /* -- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. -+ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. - */ - - #include -@@ -34,9 +34,14 @@ - #define CFG_GDSCR_OFFSET 0x4 - - /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ --#define EN_REST_WAIT_VAL (0x2 << 20) --#define EN_FEW_WAIT_VAL (0x8 << 16) --#define CLK_DIS_WAIT_VAL (0x2 << 12) -+#define EN_REST_WAIT_VAL 0x2 -+#define EN_FEW_WAIT_VAL 0x8 -+#define CLK_DIS_WAIT_VAL 0x2 -+ -+/* Transition delay shifts */ -+#define EN_REST_WAIT_SHIFT 20 -+#define EN_FEW_WAIT_SHIFT 16 -+#define CLK_DIS_WAIT_SHIFT 12 - - #define RETAIN_MEM BIT(14) - #define RETAIN_PERIPH BIT(13) -@@ -341,7 +346,18 @@ static int gdsc_init(struct gdsc *sc) - */ - mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | - EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; -- val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; -+ -+ if (!sc->en_rest_wait_val) -+ sc->en_rest_wait_val = EN_REST_WAIT_VAL; -+ if (!sc->en_few_wait_val) -+ sc->en_few_wait_val = EN_FEW_WAIT_VAL; -+ if (!sc->clk_dis_wait_val) -+ sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; -+ -+ val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | -+ sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | -+ sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; -+ - ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); - if (ret) - return ret; -diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h -index 5bb396b344d16..762f1b5e1ec51 100644 ---- a/drivers/clk/qcom/gdsc.h -+++ b/drivers/clk/qcom/gdsc.h -@@ -1,6 +1,6 @@ - /* SPDX-License-Identifier: GPL-2.0-only */ - /* -- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. -+ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. - */ - - #ifndef __QCOM_GDSC_H__ -@@ -22,6 +22,9 @@ struct reset_controller_dev; - * @cxcs: offsets of branch registers to toggle mem/periph bits in - * @cxc_count: number of @cxcs - * @pwrsts: Possible powerdomain power states -+ * @en_rest_wait_val: transition delay value for receiving enr ack signal -+ * @en_few_wait_val: transition delay value for receiving enf ack signal -+ * @clk_dis_wait_val: transition delay value for halting clock - * @resets: ids of resets associated with this gdsc - * @reset_count: number of @resets - * @rcdev: reset controller -@@ -35,6 +38,9 @@ struct gdsc { - unsigned int clamp_io_ctrl; - unsigned int *cxcs; - unsigned int cxc_count; -+ unsigned int en_rest_wait_val; -+ unsigned int en_few_wait_val; -+ unsigned int clk_dis_wait_val; - const u8 pwrsts; - /* Powerdomain allowable state bitfields */ - #define PWRSTS_OFF BIT(0) -diff --git a/drivers/clk/qcom/gpucc-sc7280.c b/drivers/clk/qcom/gpucc-sc7280.c -index 9a832f2bcf491..1490cd45a654a 100644 ---- a/drivers/clk/qcom/gpucc-sc7280.c -+++ b/drivers/clk/qcom/gpucc-sc7280.c -@@ -463,6 +463,7 @@ static int gpu_cc_sc7280_probe(struct platform_device *pdev) - */ - regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0)); - regmap_update_bits(regmap, 0x1098, BIT(0), BIT(0)); -+ regmap_update_bits(regmap, 0x1098, BIT(13), BIT(13)); - - return qcom_cc_really_probe(pdev, &gpu_cc_sc7280_desc, regmap); - } -diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c -index ac09b7b840aba..a5731994cbed1 100644 ---- a/drivers/clk/qcom/lpasscorecc-sc7180.c -+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c -@@ -356,7 +356,7 @@ static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = { - .num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs), - }; - --static int lpass_create_pm_clks(struct platform_device *pdev) -+static int lpass_setup_runtime_pm(struct platform_device *pdev) - { - int ret; - -@@ -375,7 +375,7 @@ static int lpass_create_pm_clks(struct platform_device *pdev) - if (ret < 0) - dev_err(&pdev->dev, "failed to acquire iface clock\n"); - -- return ret; -+ return pm_runtime_resume_and_get(&pdev->dev); - } - - static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) -@@ -384,7 +384,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) - struct regmap *regmap; - int ret; - -- ret = lpass_create_pm_clks(pdev); -+ ret = lpass_setup_runtime_pm(pdev); - if (ret) - return ret; - -@@ -392,12 +392,14 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) - desc = &lpass_audio_hm_sc7180_desc; - ret = qcom_cc_probe_by_index(pdev, 1, desc); - if (ret) -- return ret; -+ goto exit; - - lpass_core_cc_sc7180_regmap_config.name = "lpass_core_cc"; - regmap = qcom_cc_map(pdev, &lpass_core_cc_sc7180_desc); -- if (IS_ERR(regmap)) -- return PTR_ERR(regmap); -+ if (IS_ERR(regmap)) { -+ ret = PTR_ERR(regmap); -+ goto exit; -+ } - - /* - * Keep the CLK always-ON -@@ -415,6 +417,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) - ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap); - - pm_runtime_mark_last_busy(&pdev->dev); -+exit: - pm_runtime_put_autosuspend(&pdev->dev); - - return ret; -@@ -425,14 +428,19 @@ static int lpass_hm_core_probe(struct platform_device *pdev) - const struct qcom_cc_desc *desc; - int ret; - -- ret = lpass_create_pm_clks(pdev); -+ ret = lpass_setup_runtime_pm(pdev); - if (ret) - return ret; - - lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core"; - desc = &lpass_core_hm_sc7180_desc; - -- return qcom_cc_probe_by_index(pdev, 0, desc); -+ ret = qcom_cc_probe_by_index(pdev, 0, desc); -+ -+ pm_runtime_mark_last_busy(&pdev->dev); -+ pm_runtime_put_autosuspend(&pdev->dev); -+ -+ return ret; - } - - static const struct of_device_id lpass_hm_sc7180_match_table[] = { -diff --git a/drivers/clk/qcom/mss-sc7180.c b/drivers/clk/qcom/mss-sc7180.c -index 5a14074406623..d106bc65470e1 100644 ---- a/drivers/clk/qcom/mss-sc7180.c -+++ b/drivers/clk/qcom/mss-sc7180.c -@@ -87,11 +87,22 @@ static int mss_sc7180_probe(struct platform_device *pdev) - return ret; - } - -+ ret = pm_runtime_resume_and_get(&pdev->dev); -+ if (ret) -+ return ret; -+ - ret = qcom_cc_probe(pdev, &mss_sc7180_desc); - if (ret < 0) -- return ret; -+ goto err_put_rpm; -+ -+ pm_runtime_put(&pdev->dev); - - return 0; -+ -+err_put_rpm: -+ pm_runtime_put_sync(&pdev->dev); -+ -+ return ret; - } - - static const struct dev_pm_ops mss_sc7180_pm_ops = { -diff --git a/drivers/clk/qcom/q6sstop-qcs404.c b/drivers/clk/qcom/q6sstop-qcs404.c -index 507386bee07dc..eb86fec29927e 100644 ---- a/drivers/clk/qcom/q6sstop-qcs404.c -+++ b/drivers/clk/qcom/q6sstop-qcs404.c -@@ -173,21 +173,32 @@ static int q6sstopcc_qcs404_probe(struct platform_device *pdev) - return ret; - } - -+ ret = pm_runtime_resume_and_get(&pdev->dev); -+ if (ret) -+ return ret; -+ - q6sstop_regmap_config.name = "q6sstop_tcsr"; - desc = &tcsr_qcs404_desc; - - ret = qcom_cc_probe_by_index(pdev, 1, desc); - if (ret) -- return ret; -+ goto err_put_rpm; - - q6sstop_regmap_config.name = "q6sstop_cc"; - desc = &q6sstop_qcs404_desc; - - ret = qcom_cc_probe_by_index(pdev, 0, desc); - if (ret) -- return ret; -+ goto err_put_rpm; -+ -+ pm_runtime_put(&pdev->dev); - - return 0; -+ -+err_put_rpm: -+ pm_runtime_put_sync(&pdev->dev); -+ -+ return ret; - } - - static const struct dev_pm_ops q6sstopcc_pm_ops = { -diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c -index 819d194be8f7b..e45e32804d2c7 100644 ---- a/drivers/clk/qcom/reset.c -+++ b/drivers/clk/qcom/reset.c -@@ -13,8 +13,11 @@ - - static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id) - { -+ struct qcom_reset_controller *rst = to_qcom_reset_controller(rcdev); -+ - rcdev->ops->assert(rcdev, id); -- udelay(1); -+ fsleep(rst->reset_map[id].udelay ?: 1); /* use 1 us as default */ -+ - rcdev->ops->deassert(rcdev, id); - return 0; - } -@@ -28,7 +31,7 @@ qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) - - rst = to_qcom_reset_controller(rcdev); - map = &rst->reset_map[id]; -- mask = BIT(map->bit); -+ mask = map->bitmask ? map->bitmask : BIT(map->bit); - - return regmap_update_bits(rst->regmap, map->reg, mask, mask); - } -@@ -42,7 +45,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) - - rst = to_qcom_reset_controller(rcdev); - map = &rst->reset_map[id]; -- mask = BIT(map->bit); -+ mask = map->bitmask ? map->bitmask : BIT(map->bit); - - return regmap_update_bits(rst->regmap, map->reg, mask, 0); - } -diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h -index 2a08b5e282c77..9a47c838d9b1b 100644 ---- a/drivers/clk/qcom/reset.h -+++ b/drivers/clk/qcom/reset.h -@@ -11,6 +11,8 @@ - struct qcom_reset_map { - unsigned int reg; - u8 bit; -+ u8 udelay; -+ u32 bitmask; - }; - - struct regmap; -diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c -index 4543bda793f4f..c76d36a1fcfda 100644 ---- a/drivers/clk/qcom/turingcc-qcs404.c -+++ b/drivers/clk/qcom/turingcc-qcs404.c -@@ -124,11 +124,22 @@ static int turingcc_probe(struct platform_device *pdev) - return ret; - } - -+ ret = pm_runtime_resume_and_get(&pdev->dev); -+ if (ret) -+ return ret; -+ - ret = qcom_cc_probe(pdev, &turingcc_desc); - if (ret < 0) -- return ret; -+ goto err_put_rpm; -+ -+ pm_runtime_put(&pdev->dev); - - return 0; -+ -+err_put_rpm: -+ pm_runtime_put_sync(&pdev->dev); -+ -+ return ret; - } - - static const struct dev_pm_ops turingcc_pm_ops = { -diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c -index c99942f0e4d4c..3e43ae8480ddf 100644 ---- a/drivers/clk/renesas/r9a06g032-clocks.c -+++ b/drivers/clk/renesas/r9a06g032-clocks.c -@@ -286,8 +286,8 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = { - .name = "uart_group_012", - .type = K_BITSEL, - .source = 1 + R9A06G032_DIV_UART, -- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */ -- .dual.sel = ((0xec / 4) << 5) | 24, -+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */ -+ .dual.sel = ((0x34 / 4) << 5) | 30, - .dual.group = 0, - }, - { -@@ -295,8 +295,8 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = { - .name = "uart_group_34567", - .type = K_BITSEL, - .source = 1 + R9A06G032_DIV_P2_PG, -- /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG0_0 */ -- .dual.sel = ((0x34 / 4) << 5) | 30, -+ /* R9A06G032_SYSCTRL_REG_PWRCTRL_PG1_PR2 */ -+ .dual.sel = ((0xec / 4) << 5) | 24, - .dual.group = 1, - }, - D_UGATE(CLK_UART0, "clk_uart0", UART_GROUP_012, 0, 0, 0x1b2, 0x1b3, 0x1b4, 0x1b5), -@@ -386,7 +386,7 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, - int error; - int index; - -- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, -+ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i++, - &clkspec)) { - if (clkspec.np != pd->dev.of_node) - continue; -@@ -399,7 +399,6 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, - if (error) - return error; - } -- i++; - } - - return 0; -diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c -index 1490446985e2e..61609eddf7d04 100644 ---- a/drivers/clk/renesas/r9a07g044-cpg.c -+++ b/drivers/clk/renesas/r9a07g044-cpg.c -@@ -61,8 +61,8 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = { - DEF_FIXED(".osc", R9A07G044_OSCCLK, CLK_EXTAL, 1, 1), - DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000), - DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)), -- DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 133, 2), -- DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 133, 2), -+ DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3), -+ DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3), - - DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 1, 2), - DEF_FIXED(".pll2_div16", CLK_PLL2_DIV16, CLK_PLL2, 1, 16), -diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c -index 761922ea5db76..1c92e73cd2b8c 100644 ---- a/drivers/clk/renesas/rzg2l-cpg.c -+++ b/drivers/clk/renesas/rzg2l-cpg.c -@@ -638,10 +638,16 @@ static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device - pm_clk_destroy(dev); - } - -+static void rzg2l_cpg_genpd_remove(void *data) -+{ -+ pm_genpd_remove(data); -+} -+ - static int __init rzg2l_cpg_add_clk_domain(struct device *dev) - { - struct device_node *np = dev->of_node; - struct generic_pm_domain *genpd; -+ int ret; - - genpd = devm_kzalloc(dev, sizeof(*genpd), GFP_KERNEL); - if (!genpd) -@@ -652,10 +658,15 @@ static int __init rzg2l_cpg_add_clk_domain(struct device *dev) - GENPD_FLAG_ACTIVE_WAKEUP; - genpd->attach_dev = rzg2l_cpg_attach_dev; - genpd->detach_dev = rzg2l_cpg_detach_dev; -- pm_genpd_init(genpd, &pm_domain_always_on_gov, false); -+ ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false); -+ if (ret) -+ return ret; - -- of_genpd_add_provider_simple(np, genpd); -- return 0; -+ ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd); -+ if (ret) -+ return ret; -+ -+ return of_genpd_add_provider_simple(np, genpd); - } - - static int __init rzg2l_cpg_probe(struct platform_device *pdev) -diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c -index f7827b3b7fc1c..6e5e502be44a6 100644 ---- a/drivers/clk/rockchip/clk-pll.c -+++ b/drivers/clk/rockchip/clk-pll.c -@@ -981,6 +981,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, - return mux_clk; - - err_pll: -+ kfree(pll->rate_table); - clk_unregister(mux_clk); - mux_clk = pll_clk; - err_mux: -diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c -index 62a4f25439607..6bcf631b4e4c2 100644 ---- a/drivers/clk/rockchip/clk-rk3399.c -+++ b/drivers/clk/rockchip/clk-rk3399.c -@@ -1263,7 +1263,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { - RK3399_CLKSEL_CON(56), 6, 2, MFLAGS, - RK3399_CLKGATE_CON(10), 7, GFLAGS), - -- COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, 0, -+ COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, CLK_SET_RATE_PARENT, - RK3399_CLKSEL_CON(56), 5, 1, MFLAGS, 0, 5, DFLAGS), - - /* gic */ -diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c -index 75ca855e720df..6e5440841d1ee 100644 ---- a/drivers/clk/rockchip/clk-rk3568.c -+++ b/drivers/clk/rockchip/clk-rk3568.c -@@ -1038,13 +1038,13 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = { - RK3568_CLKGATE_CON(20), 8, GFLAGS), - GATE(HCLK_VOP, "hclk_vop", "hclk_vo", 0, - RK3568_CLKGATE_CON(20), 9, GFLAGS), -- COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, -+ COMPOSITE(DCLK_VOP0, "dclk_vop0", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT, - RK3568_CLKSEL_CON(39), 10, 2, MFLAGS, 0, 8, DFLAGS, - RK3568_CLKGATE_CON(20), 10, GFLAGS), -- COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, -+ COMPOSITE(DCLK_VOP1, "dclk_vop1", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT, - RK3568_CLKSEL_CON(40), 10, 2, MFLAGS, 0, 8, DFLAGS, - RK3568_CLKGATE_CON(20), 11, GFLAGS), -- COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, 0, -+ COMPOSITE(DCLK_VOP2, "dclk_vop2", hpll_vpll_gpll_cpll_p, CLK_SET_RATE_NO_REPARENT, - RK3568_CLKSEL_CON(41), 10, 2, MFLAGS, 0, 8, DFLAGS, - RK3568_CLKGATE_CON(20), 12, GFLAGS), - GATE(CLK_VOP_PWM, "clk_vop_pwm", "xin24m", 0, -diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c -index b7be7e11b0dfe..bb8a844309bf5 100644 ---- a/drivers/clk/rockchip/clk.c -+++ b/drivers/clk/rockchip/clk.c -@@ -180,6 +180,7 @@ static void rockchip_fractional_approximation(struct clk_hw *hw, - unsigned long rate, unsigned long *parent_rate, - unsigned long *m, unsigned long *n) - { -+ struct clk_fractional_divider *fd = to_clk_fd(hw); - unsigned long p_rate, p_parent_rate; - struct clk_hw *p_parent; - -@@ -190,6 +191,8 @@ static void rockchip_fractional_approximation(struct clk_hw *hw, - *parent_rate = p_parent_rate; - } - -+ fd->flags |= CLK_FRAC_DIVIDER_POWER_OF_TWO_PS; -+ - clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n); - } - -diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c -index 5873a9354b507..4909e940f0ab6 100644 ---- a/drivers/clk/samsung/clk-pll.c -+++ b/drivers/clk/samsung/clk-pll.c -@@ -1385,6 +1385,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, - if (ret) { - pr_err("%s: failed to register pll clock %s : %d\n", - __func__, pll_clk->name, ret); -+ kfree(pll->rate_table); - kfree(pll); - return; - } -diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c -index 1ec9678d8cd32..ee2a2d284113c 100644 ---- a/drivers/clk/socfpga/clk-gate.c -+++ b/drivers/clk/socfpga/clk-gate.c -@@ -188,8 +188,10 @@ void __init socfpga_gate_init(struct device_node *node) - return; - - ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL); -- if (WARN_ON(!ops)) -+ if (WARN_ON(!ops)) { -+ kfree(socfpga_clk); - return; -+ } - - rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); - if (rc) -@@ -243,6 +245,7 @@ void __init socfpga_gate_init(struct device_node *node) - - err = clk_hw_register(NULL, hw_clk); - if (err) { -+ kfree(ops); - kfree(socfpga_clk); - return; - } -diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c -index d620bbbcdfc88..2bfbab8db94bf 100644 ---- a/drivers/clk/sprd/common.c -+++ b/drivers/clk/sprd/common.c -@@ -17,7 +17,6 @@ static const struct regmap_config sprdclk_regmap_config = { - .reg_bits = 32, - .reg_stride = 4, - .val_bits = 32, -- .max_register = 0xffff, - .fast_io = true, - }; - -@@ -41,8 +40,10 @@ int sprd_clk_regmap_init(struct platform_device *pdev, - { - void __iomem *base; - struct device *dev = &pdev->dev; -- struct device_node *node = dev->of_node; -+ struct device_node *node = dev->of_node, *np; - struct regmap *regmap; -+ struct resource *res; -+ struct regmap_config reg_config = sprdclk_regmap_config; - - if (of_find_property(node, "sprd,syscon", NULL)) { - regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon"); -@@ -50,20 +51,23 @@ int sprd_clk_regmap_init(struct platform_device *pdev, - pr_err("%s: failed to get syscon regmap\n", __func__); - return PTR_ERR(regmap); - } -- } else if (of_device_is_compatible(of_get_parent(dev->of_node), -- "syscon")) { -- regmap = device_node_to_regmap(of_get_parent(dev->of_node)); -+ } else if (of_device_is_compatible(np = of_get_parent(node), "syscon") || -+ (of_node_put(np), 0)) { -+ regmap = device_node_to_regmap(np); -+ of_node_put(np); - if (IS_ERR(regmap)) { - dev_err(dev, "failed to get regmap from its parent.\n"); - return PTR_ERR(regmap); - } - } else { -- base = devm_platform_ioremap_resource(pdev, 0); -+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); - if (IS_ERR(base)) - return PTR_ERR(base); - -+ reg_config.max_register = resource_size(res) - reg_config.reg_stride; -+ - regmap = devm_regmap_init_mmio(&pdev->dev, base, -- &sprdclk_regmap_config); -+ ®_config); - if (IS_ERR(regmap)) { - pr_err("failed to init regmap\n"); - return PTR_ERR(regmap); -diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c -index 164285d6be978..ba18e58f0aae3 100644 ---- a/drivers/clk/st/clkgen-fsyn.c -+++ b/drivers/clk/st/clkgen-fsyn.c -@@ -1008,9 +1008,10 @@ static void __init st_of_quadfs_setup(struct device_node *np, - - clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, datac->data, - reg, lock); -- if (IS_ERR(clk)) -+ if (IS_ERR(clk)) { -+ kfree(lock); - goto err_exit; -- else -+ } else - pr_debug("%s: parent %s rate %u\n", - __clk_get_name(clk), - __clk_get_name(clk_get_parent(clk)), -diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c -index f32366d9336e7..bd9a8782fec3d 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c -+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c -@@ -1464,7 +1464,7 @@ static void __init sun4i_ccu_init(struct device_node *node, - val &= ~GENMASK(7, 6); - writel(val | (2 << 6), reg + SUN4I_AHB_REG); - -- sunxi_ccu_probe(node, reg, desc); -+ of_sunxi_ccu_probe(node, reg, desc); - } - - static void __init sun4i_a10_ccu_setup(struct device_node *node) -diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c -index a56142b909938..6f2a589705561 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c -+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c -@@ -196,7 +196,7 @@ static int sun50i_a100_r_ccu_probe(struct platform_device *pdev) - if (IS_ERR(reg)) - return PTR_ERR(reg); - -- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a100_r_ccu_desc); -+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a100_r_ccu_desc); - } - - static const struct of_device_id sun50i_a100_r_ccu_ids[] = { -diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c -index 81b48c73d389f..913bb08e6dee8 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c -+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c -@@ -1247,7 +1247,7 @@ static int sun50i_a100_ccu_probe(struct platform_device *pdev) - writel(val, reg + sun50i_a100_usb2_clk_regs[i]); - } - -- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a100_ccu_desc); -+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a100_ccu_desc); - if (ret) - return ret; - -diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c -index 149cfde817cba..54f25c624f020 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c -+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c -@@ -955,7 +955,7 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev) - - writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG); - -- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc); -+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc); - if (ret) - return ret; - -diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c -index f8909a7ed5539..f30d7eb5424d8 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c -+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c -@@ -232,7 +232,7 @@ static void __init sunxi_r_ccu_init(struct device_node *node, - return; - } - -- sunxi_ccu_probe(node, reg, desc); -+ of_sunxi_ccu_probe(node, reg, desc); - } - - static void __init sun50i_h6_r_ccu_setup(struct device_node *node) -diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c -index bff446b782907..c0800da2fa3d7 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c -+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c -@@ -1240,7 +1240,7 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev) - val |= BIT(24); - writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG); - -- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_h6_ccu_desc); -+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc); - } - - static const struct of_device_id sun50i_h6_ccu_ids[] = { -diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c -index 225307305880e..22eb18079a154 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c -+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c -@@ -1141,9 +1141,7 @@ static void __init sun50i_h616_ccu_setup(struct device_node *node) - val |= BIT(24); - writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG); - -- i = sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc); -- if (i) -- pr_err("%pOF: probing clocks fails: %d\n", node, i); -+ of_sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc); - } - - CLK_OF_DECLARE(sun50i_h616_ccu, "allwinner,sun50i-h616-ccu", -diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c -index b78e9b507c1c6..1f4bc0e773a7e 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun5i.c -+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c -@@ -1012,7 +1012,7 @@ static void __init sun5i_ccu_init(struct device_node *node, - val &= ~GENMASK(7, 6); - writel(val | (2 << 6), reg + SUN5I_AHB_REG); - -- sunxi_ccu_probe(node, reg, desc); -+ of_sunxi_ccu_probe(node, reg, desc); - } - - static void __init sun5i_a10s_ccu_setup(struct device_node *node) -diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c -index 9b40d53266a3f..3df5c0b415804 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c -+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c -@@ -1257,7 +1257,7 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node) - val |= 0x3 << 12; - writel(val, reg + SUN6I_A31_AHB1_REG); - -- sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); -+ of_sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); - - ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk, - &sun6i_a31_cpu_nb); -diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c -index 103aa504f6c8a..577bb235d6584 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c -+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c -@@ -745,7 +745,7 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node) - val &= ~BIT(16); - writel(val, reg + SUN8I_A23_PLL_MIPI_REG); - -- sunxi_ccu_probe(node, reg, &sun8i_a23_ccu_desc); -+ of_sunxi_ccu_probe(node, reg, &sun8i_a23_ccu_desc); - } - CLK_OF_DECLARE(sun8i_a23_ccu, "allwinner,sun8i-a23-ccu", - sun8i_a23_ccu_setup); -diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c -index 91838cd110377..8f65cd03f5acc 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c -+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c -@@ -805,7 +805,7 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node) - val &= ~BIT(16); - writel(val, reg + SUN8I_A33_PLL_MIPI_REG); - -- sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc); -+ of_sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc); - - /* Gate then ungate PLL CPU after any rate changes */ - ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb); -diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c -index 2b434521c5ccf..c2ddcd2ddab4e 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c -+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c -@@ -906,7 +906,7 @@ static int sun8i_a83t_ccu_probe(struct platform_device *pdev) - sun8i_a83t_cpu_pll_fixup(reg + SUN8I_A83T_PLL_C0CPUX_REG); - sun8i_a83t_cpu_pll_fixup(reg + SUN8I_A83T_PLL_C1CPUX_REG); - -- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun8i_a83t_ccu_desc); -+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_a83t_ccu_desc); - } - - static const struct of_device_id sun8i_a83t_ccu_ids[] = { -diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c -index 524f33275bc73..4b94b6041b271 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c -+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c -@@ -342,7 +342,7 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev) - goto err_disable_mod_clk; - } - -- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, ccu_desc); -+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, ccu_desc); - if (ret) - goto err_assert_reset; - -diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c -index 7e629a4493afd..d2fc2903787d8 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c -+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c -@@ -1154,7 +1154,7 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node, - val &= ~GENMASK(19, 16); - writel(val | (0 << 16), reg + SUN8I_H3_PLL_AUDIO_REG); - -- sunxi_ccu_probe(node, reg, desc); -+ of_sunxi_ccu_probe(node, reg, desc); - - /* Gate then ungate PLL CPU after any rate changes */ - ccu_pll_notifier_register(&sun8i_h3_pll_cpu_nb); -diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c -index 4c8c491b87c27..9e754d1f754a1 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c -+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c -@@ -265,7 +265,7 @@ static void __init sunxi_r_ccu_init(struct device_node *node, - return; - } - -- sunxi_ccu_probe(node, reg, desc); -+ of_sunxi_ccu_probe(node, reg, desc); - } - - static void __init sun8i_a83t_r_ccu_setup(struct device_node *node) -diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c -index 84153418453f4..002e0c3a04dbe 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c -+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c -@@ -1346,7 +1346,7 @@ static int sun8i_r40_ccu_probe(struct platform_device *pdev) - if (IS_ERR(regmap)) - return PTR_ERR(regmap); - -- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun8i_r40_ccu_desc); -+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_r40_ccu_desc); - if (ret) - return ret; - -diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c -index f49724a22540e..ce150f83ab54e 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c -+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c -@@ -822,7 +822,7 @@ static void __init sun8i_v3_v3s_ccu_init(struct device_node *node, - val &= ~GENMASK(19, 16); - writel(val, reg + SUN8I_V3S_PLL_AUDIO_REG); - -- sunxi_ccu_probe(node, reg, ccu_desc); -+ of_sunxi_ccu_probe(node, reg, ccu_desc); - } - - static void __init sun8i_v3s_ccu_setup(struct device_node *node) -diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c -index 6616e8114f623..261e64416f26a 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c -+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c -@@ -246,8 +246,7 @@ static int sun9i_a80_de_clk_probe(struct platform_device *pdev) - goto err_disable_clk; - } - -- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, -- &sun9i_a80_de_clk_desc); -+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun9i_a80_de_clk_desc); - if (ret) - goto err_assert_reset; - -diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c -index 4b4a507d04edf..596243b3e0fa3 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c -+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c -@@ -117,8 +117,7 @@ static int sun9i_a80_usb_clk_probe(struct platform_device *pdev) - return ret; - } - -- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, -- &sun9i_a80_usb_clk_desc); -+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun9i_a80_usb_clk_desc); - if (ret) - goto err_disable_clk; - -diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c -index ef29582676f6e..97aaed0e68500 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c -+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c -@@ -1231,7 +1231,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev) - sun9i_a80_cpu_pll_fixup(reg + SUN9I_A80_PLL_C0CPUX_REG); - sun9i_a80_cpu_pll_fixup(reg + SUN9I_A80_PLL_C1CPUX_REG); - -- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun9i_a80_ccu_desc); -+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun9i_a80_ccu_desc); - } - - static const struct of_device_id sun9i_a80_ccu_ids[] = { -diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c -index 7ecc3a5a5b5e1..61ad7ee91c114 100644 ---- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c -+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c -@@ -538,7 +538,7 @@ static void __init suniv_f1c100s_ccu_setup(struct device_node *node) - val &= ~GENMASK(19, 16); - writel(val | (3 << 16), reg + SUNIV_PLL_AUDIO_REG); - -- sunxi_ccu_probe(node, reg, &suniv_ccu_desc); -+ of_sunxi_ccu_probe(node, reg, &suniv_ccu_desc); - - /* Gate then ungate PLL CPU after any rate changes */ - ccu_pll_notifier_register(&suniv_pll_cpu_nb); -diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c -index 2e20e650b6c01..88cb569e58358 100644 ---- a/drivers/clk/sunxi-ng/ccu_common.c -+++ b/drivers/clk/sunxi-ng/ccu_common.c -@@ -7,6 +7,7 @@ - - #include - #include -+#include - #include - #include - -@@ -14,6 +15,11 @@ - #include "ccu_gate.h" - #include "ccu_reset.h" - -+struct sunxi_ccu { -+ const struct sunxi_ccu_desc *desc; -+ struct ccu_reset reset; -+}; -+ - static DEFINE_SPINLOCK(ccu_lock); - - void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock) -@@ -79,12 +85,15 @@ int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb) - &pll_nb->clk_nb); - } - --int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, -- const struct sunxi_ccu_desc *desc) -+static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev, -+ struct device_node *node, void __iomem *reg, -+ const struct sunxi_ccu_desc *desc) - { - struct ccu_reset *reset; - int i, ret; - -+ ccu->desc = desc; -+ - for (i = 0; i < desc->num_ccu_clks; i++) { - struct ccu_common *cclk = desc->ccu_clks[i]; - -@@ -103,7 +112,10 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, - continue; - - name = hw->init->name; -- ret = of_clk_hw_register(node, hw); -+ if (dev) -+ ret = clk_hw_register(dev, hw); -+ else -+ ret = of_clk_hw_register(node, hw); - if (ret) { - pr_err("Couldn't register clock %d - %s\n", i, name); - goto err_clk_unreg; -@@ -115,15 +127,10 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, - if (ret) - goto err_clk_unreg; - -- reset = kzalloc(sizeof(*reset), GFP_KERNEL); -- if (!reset) { -- ret = -ENOMEM; -- goto err_alloc_reset; -- } -- -+ reset = &ccu->reset; - reset->rcdev.of_node = node; - reset->rcdev.ops = &ccu_reset_ops; -- reset->rcdev.owner = THIS_MODULE; -+ reset->rcdev.owner = dev ? dev->driver->owner : THIS_MODULE; - reset->rcdev.nr_resets = desc->num_resets; - reset->base = reg; - reset->lock = &ccu_lock; -@@ -131,13 +138,11 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, - - ret = reset_controller_register(&reset->rcdev); - if (ret) -- goto err_of_clk_unreg; -+ goto err_del_provider; - - return 0; - --err_of_clk_unreg: -- kfree(reset); --err_alloc_reset: -+err_del_provider: - of_clk_del_provider(node); - err_clk_unreg: - while (--i >= 0) { -@@ -149,3 +154,59 @@ err_clk_unreg: - } - return ret; - } -+ -+static void devm_sunxi_ccu_release(struct device *dev, void *res) -+{ -+ struct sunxi_ccu *ccu = res; -+ const struct sunxi_ccu_desc *desc = ccu->desc; -+ int i; -+ -+ reset_controller_unregister(&ccu->reset.rcdev); -+ of_clk_del_provider(dev->of_node); -+ -+ for (i = 0; i < desc->hw_clks->num; i++) { -+ struct clk_hw *hw = desc->hw_clks->hws[i]; -+ -+ if (!hw) -+ continue; -+ clk_hw_unregister(hw); -+ } -+} -+ -+int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg, -+ const struct sunxi_ccu_desc *desc) -+{ -+ struct sunxi_ccu *ccu; -+ int ret; -+ -+ ccu = devres_alloc(devm_sunxi_ccu_release, sizeof(*ccu), GFP_KERNEL); -+ if (!ccu) -+ return -ENOMEM; -+ -+ ret = sunxi_ccu_probe(ccu, dev, dev->of_node, reg, desc); -+ if (ret) { -+ devres_free(ccu); -+ return ret; -+ } -+ -+ devres_add(dev, ccu); -+ -+ return 0; -+} -+ -+void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg, -+ const struct sunxi_ccu_desc *desc) -+{ -+ struct sunxi_ccu *ccu; -+ int ret; -+ -+ ccu = kzalloc(sizeof(*ccu), GFP_KERNEL); -+ if (!ccu) -+ return; -+ -+ ret = sunxi_ccu_probe(ccu, NULL, node, reg, desc); -+ if (ret) { -+ pr_err("%pOF: probing clocks failed: %d\n", node, ret); -+ kfree(ccu); -+ } -+} -diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h -index 04e7a12200a21..98a1834b58bb4 100644 ---- a/drivers/clk/sunxi-ng/ccu_common.h -+++ b/drivers/clk/sunxi-ng/ccu_common.h -@@ -63,7 +63,9 @@ struct ccu_pll_nb { - - int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb); - --int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, -- const struct sunxi_ccu_desc *desc); -+int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg, -+ const struct sunxi_ccu_desc *desc); -+void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg, -+ const struct sunxi_ccu_desc *desc); - - #endif /* _COMMON_H_ */ -diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c -index de33414fc5c28..c6a6ce98ca03a 100644 ---- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c -+++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c -@@ -43,7 +43,7 @@ int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode) - EXPORT_SYMBOL_GPL(sunxi_ccu_set_mmc_timing_mode); - - /** -- * sunxi_ccu_set_mmc_timing_mode: Get the current MMC clock timing mode -+ * sunxi_ccu_get_mmc_timing_mode: Get the current MMC clock timing mode - * @clk: clock to query - * - * Returns 0 if the clock is in old timing mode, > 0 if it is in -diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c -index 542b31d6e96dd..636bcf2439ef2 100644 ---- a/drivers/clk/sunxi/clk-sun9i-mmc.c -+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c -@@ -109,6 +109,8 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev) - spin_lock_init(&data->lock); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!r) -+ return -EINVAL; - /* one clock/reset pair per word */ - count = DIV_ROUND_UP((resource_size(r)), SUN9I_MMC_WIDTH); - data->membase = devm_ioremap_resource(&pdev->dev, r); -diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c -index 6144447f86c63..62238dca9a534 100644 ---- a/drivers/clk/tegra/clk-dfll.c -+++ b/drivers/clk/tegra/clk-dfll.c -@@ -271,6 +271,7 @@ struct tegra_dfll { - struct clk *ref_clk; - struct clk *i2c_clk; - struct clk *dfll_clk; -+ struct reset_control *dfll_rst; - struct reset_control *dvco_rst; - unsigned long ref_rate; - unsigned long i2c_clk_rate; -@@ -1464,6 +1465,7 @@ static int dfll_init(struct tegra_dfll *td) - return -EINVAL; - } - -+ reset_control_deassert(td->dfll_rst); - reset_control_deassert(td->dvco_rst); - - ret = clk_prepare(td->ref_clk); -@@ -1509,6 +1511,7 @@ di_err1: - clk_unprepare(td->ref_clk); - - reset_control_assert(td->dvco_rst); -+ reset_control_assert(td->dfll_rst); - - return ret; - } -@@ -1530,6 +1533,7 @@ int tegra_dfll_suspend(struct device *dev) - } - - reset_control_assert(td->dvco_rst); -+ reset_control_assert(td->dfll_rst); - - return 0; - } -@@ -1548,6 +1552,7 @@ int tegra_dfll_resume(struct device *dev) - { - struct tegra_dfll *td = dev_get_drvdata(dev); - -+ reset_control_deassert(td->dfll_rst); - reset_control_deassert(td->dvco_rst); - - pm_runtime_get_sync(td->dev); -@@ -1951,6 +1956,12 @@ int tegra_dfll_register(struct platform_device *pdev, - - td->soc = soc; - -+ td->dfll_rst = devm_reset_control_get_optional(td->dev, "dfll"); -+ if (IS_ERR(td->dfll_rst)) { -+ dev_err(td->dev, "couldn't get dfll reset\n"); -+ return PTR_ERR(td->dfll_rst); -+ } -+ - td->dvco_rst = devm_reset_control_get(td->dev, "dvco"); - if (IS_ERR(td->dvco_rst)) { - dev_err(td->dev, "couldn't get dvco reset\n"); -@@ -2087,6 +2098,7 @@ struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev) - clk_unprepare(td->i2c_clk); - - reset_control_assert(td->dvco_rst); -+ reset_control_assert(td->dfll_rst); - - return td->soc; - } -diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c -index bc9e47a4cb60a..4e2b26e3e5738 100644 ---- a/drivers/clk/tegra/clk-tegra114.c -+++ b/drivers/clk/tegra/clk-tegra114.c -@@ -1317,6 +1317,7 @@ static void __init tegra114_clock_init(struct device_node *np) - } - - pmc_base = of_iomap(node, 0); -+ of_node_put(node); - if (!pmc_base) { - pr_err("Can't map pmc registers\n"); - WARN_ON(1); -diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c -index 74c1d894cca86..2a6db04342815 100644 ---- a/drivers/clk/tegra/clk-tegra124-emc.c -+++ b/drivers/clk/tegra/clk-tegra124-emc.c -@@ -198,6 +198,7 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra) - - tegra->emc = platform_get_drvdata(pdev); - if (!tegra->emc) { -+ put_device(&pdev->dev); - pr_err("%s: cannot find EMC driver\n", __func__); - return NULL; - } -@@ -463,6 +464,7 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra, - err = load_one_timing_from_dt(tegra, timing, child); - if (err) { - of_node_put(child); -+ kfree(tegra->timings); - return err; - } - -@@ -514,6 +516,7 @@ struct clk *tegra124_clk_register_emc(void __iomem *base, struct device_node *np - err = load_timings_from_dt(tegra, node, node_ram_code); - if (err) { - of_node_put(node); -+ kfree(tegra); - return ERR_PTR(err); - } - } -diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c -index 3664593a5ba4e..cc57ababc882d 100644 ---- a/drivers/clk/tegra/clk-tegra20.c -+++ b/drivers/clk/tegra/clk-tegra20.c -@@ -18,24 +18,24 @@ - #define MISC_CLK_ENB 0x48 - - #define OSC_CTRL 0x50 --#define OSC_CTRL_OSC_FREQ_MASK (3<<30) --#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30) --#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30) --#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30) --#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30) --#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK) -- --#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28) --#define OSC_CTRL_PLL_REF_DIV_1 (0<<28) --#define OSC_CTRL_PLL_REF_DIV_2 (1<<28) --#define OSC_CTRL_PLL_REF_DIV_4 (2<<28) -+#define OSC_CTRL_OSC_FREQ_MASK (3u<<30) -+#define OSC_CTRL_OSC_FREQ_13MHZ (0u<<30) -+#define OSC_CTRL_OSC_FREQ_19_2MHZ (1u<<30) -+#define OSC_CTRL_OSC_FREQ_12MHZ (2u<<30) -+#define OSC_CTRL_OSC_FREQ_26MHZ (3u<<30) -+#define OSC_CTRL_MASK (0x3f2u | OSC_CTRL_OSC_FREQ_MASK) -+ -+#define OSC_CTRL_PLL_REF_DIV_MASK (3u<<28) -+#define OSC_CTRL_PLL_REF_DIV_1 (0u<<28) -+#define OSC_CTRL_PLL_REF_DIV_2 (1u<<28) -+#define OSC_CTRL_PLL_REF_DIV_4 (2u<<28) - - #define OSC_FREQ_DET 0x58 --#define OSC_FREQ_DET_TRIG (1<<31) -+#define OSC_FREQ_DET_TRIG (1u<<31) - - #define OSC_FREQ_DET_STATUS 0x5c --#define OSC_FREQ_DET_BUSY (1<<31) --#define OSC_FREQ_DET_CNT_MASK 0xFFFF -+#define OSC_FREQ_DET_BUSYu (1<<31) -+#define OSC_FREQ_DET_CNT_MASK 0xFFFFu - - #define TEGRA20_CLK_PERIPH_BANKS 3 - -@@ -1128,6 +1128,7 @@ static void __init tegra20_clock_init(struct device_node *np) - } - - pmc_base = of_iomap(node, 0); -+ of_node_put(node); - if (!pmc_base) { - pr_err("Can't map pmc registers\n"); - BUG(); -diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c -index b9099012dc7b1..499f999e91e13 100644 ---- a/drivers/clk/tegra/clk-tegra210.c -+++ b/drivers/clk/tegra/clk-tegra210.c -@@ -3748,6 +3748,7 @@ static void __init tegra210_clock_init(struct device_node *np) - } - - pmc_base = of_iomap(node, 0); -+ of_node_put(node); - if (!pmc_base) { - pr_err("Can't map pmc registers\n"); - WARN_ON(1); -diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c -index 8d4c08b034bdd..e2e59d78c173f 100644 ---- a/drivers/clk/ti/clk-dra7-atl.c -+++ b/drivers/clk/ti/clk-dra7-atl.c -@@ -251,14 +251,16 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) - if (rc) { - pr_err("%s: failed to lookup atl clock %d\n", __func__, - i); -- return -EINVAL; -+ ret = -EINVAL; -+ goto pm_put; - } - - clk = of_clk_get_from_provider(&clkspec); - if (IS_ERR(clk)) { - pr_err("%s: failed to get atl clock %d from provider\n", - __func__, i); -- return PTR_ERR(clk); -+ ret = PTR_ERR(clk); -+ goto pm_put; - } - - cdesc = to_atl_desc(__clk_get_hw(clk)); -@@ -291,8 +293,9 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) - if (cdesc->enabled) - atl_clk_enable(__clk_get_hw(clk)); - } -- pm_runtime_put_sync(cinfo->dev); - -+pm_put: -+ pm_runtime_put_sync(cinfo->dev); - return ret; - } - -diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c -index 3da33c786d77c..29eafab4353ef 100644 ---- a/drivers/clk/ti/clk.c -+++ b/drivers/clk/ti/clk.c -@@ -131,7 +131,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) - void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) - { - struct ti_dt_clk *c; -- struct device_node *node, *parent; -+ struct device_node *node, *parent, *child; - struct clk *clk; - struct of_phandle_args clkspec; - char buf[64]; -@@ -171,10 +171,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) - node = of_find_node_by_name(NULL, buf); - if (num_args && compat_mode) { - parent = node; -- node = of_get_child_by_name(parent, "clock"); -- if (!node) -- node = of_get_child_by_name(parent, "clk"); -- of_node_put(parent); -+ child = of_get_child_by_name(parent, "clock"); -+ if (!child) -+ child = of_get_child_by_name(parent, "clk"); -+ if (child) { -+ of_node_put(parent); -+ node = child; -+ } - } - - clkspec.np = node; -diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c -index 864c484bde1b4..157abc46dcf44 100644 ---- a/drivers/clk/ti/clkctrl.c -+++ b/drivers/clk/ti/clkctrl.c -@@ -267,6 +267,9 @@ static const char * __init clkctrl_get_clock_name(struct device_node *np, - if (clkctrl_name && !legacy_naming) { - clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d", - clkctrl_name, offset, index); -+ if (!clock_name) -+ return NULL; -+ - strreplace(clock_name, '_', '-'); - - return clock_name; -@@ -598,6 +601,10 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node) - if (clkctrl_name) { - provider->clkdm_name = kasprintf(GFP_KERNEL, - "%s_clkdm", clkctrl_name); -+ if (!provider->clkdm_name) { -+ kfree(provider); -+ return; -+ } - goto clkdm_found; - } - -diff --git a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c -index 5319cd3804801..3bc55ab75314b 100644 ---- a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c -+++ b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c -@@ -24,6 +24,7 @@ struct clk_hw *uniphier_clk_register_fixed_rate(struct device *dev, - - init.name = name; - init.ops = &clk_fixed_rate_ops; -+ init.flags = 0; - init.parent_names = NULL; - init.num_parents = 0; - -diff --git a/drivers/clk/x86/Kconfig b/drivers/clk/x86/Kconfig -index 69642e15fcc1f..ced99e082e3dd 100644 ---- a/drivers/clk/x86/Kconfig -+++ b/drivers/clk/x86/Kconfig -@@ -1,8 +1,9 @@ - # SPDX-License-Identifier: GPL-2.0-only - config CLK_LGM_CGU - depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST) -+ select MFD_SYSCON - select OF_EARLY_FLATTREE - bool "Clock driver for Lightning Mountain(LGM) platform" - help -- Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM) -- network processor SoC. -+ Clock Generation Unit(CGU) driver for MaxLinear's x86 based -+ Lightning Mountain(LGM) network processor SoC. -diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c -index 3179557b5f784..409dbf55f4cae 100644 ---- a/drivers/clk/x86/clk-cgu-pll.c -+++ b/drivers/clk/x86/clk-cgu-pll.c -@@ -1,8 +1,9 @@ - // SPDX-License-Identifier: GPL-2.0 - /* -+ * Copyright (C) 2020-2022 MaxLinear, Inc. - * Copyright (C) 2020 Intel Corporation. -- * Zhu YiXin -- * Rahul Tanwar -+ * Zhu Yixin -+ * Rahul Tanwar - */ - - #include -@@ -40,13 +41,10 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate) - { - struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); - unsigned int div, mult, frac; -- unsigned long flags; - -- spin_lock_irqsave(&pll->lock, flags); - mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12); - div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6); - frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24); -- spin_unlock_irqrestore(&pll->lock, flags); - - if (pll->type == TYPE_LJPLL) - div *= 4; -@@ -57,12 +55,9 @@ static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate) - static int lgm_pll_is_enabled(struct clk_hw *hw) - { - struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); -- unsigned long flags; - unsigned int ret; - -- spin_lock_irqsave(&pll->lock, flags); - ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1); -- spin_unlock_irqrestore(&pll->lock, flags); - - return ret; - } -@@ -70,15 +65,13 @@ static int lgm_pll_is_enabled(struct clk_hw *hw) - static int lgm_pll_enable(struct clk_hw *hw) - { - struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); -- unsigned long flags; - u32 val; - int ret; - -- spin_lock_irqsave(&pll->lock, flags); - lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1); -- ret = readl_poll_timeout_atomic(pll->membase + pll->reg, -- val, (val & 0x1), 1, 100); -- spin_unlock_irqrestore(&pll->lock, flags); -+ ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg, -+ val, (val & 0x1), 1, 100); -+ - - return ret; - } -@@ -86,11 +79,8 @@ static int lgm_pll_enable(struct clk_hw *hw) - static void lgm_pll_disable(struct clk_hw *hw) - { - struct lgm_clk_pll *pll = to_lgm_clk_pll(hw); -- unsigned long flags; - -- spin_lock_irqsave(&pll->lock, flags); - lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0); -- spin_unlock_irqrestore(&pll->lock, flags); - } - - static const struct clk_ops lgm_pll_ops = { -@@ -121,7 +111,6 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx, - return ERR_PTR(-ENOMEM); - - pll->membase = ctx->membase; -- pll->lock = ctx->lock; - pll->reg = list->reg; - pll->flags = list->flags; - pll->type = list->type; -diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c -index 33de600e0c38e..89b53f280aee0 100644 ---- a/drivers/clk/x86/clk-cgu.c -+++ b/drivers/clk/x86/clk-cgu.c -@@ -1,8 +1,9 @@ - // SPDX-License-Identifier: GPL-2.0 - /* -+ * Copyright (C) 2020-2022 MaxLinear, Inc. - * Copyright (C) 2020 Intel Corporation. -- * Zhu YiXin -- * Rahul Tanwar -+ * Zhu Yixin -+ * Rahul Tanwar - */ - #include - #include -@@ -24,14 +25,10 @@ - static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, - const struct lgm_clk_branch *list) - { -- unsigned long flags; - -- if (list->div_flags & CLOCK_FLAG_VAL_INIT) { -- spin_lock_irqsave(&ctx->lock, flags); -+ if (list->div_flags & CLOCK_FLAG_VAL_INIT) - lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, - list->div_width, list->div_val); -- spin_unlock_irqrestore(&ctx->lock, flags); -- } - - return clk_hw_register_fixed_rate(NULL, list->name, - list->parent_data[0].name, -@@ -41,33 +38,27 @@ static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx, - static u8 lgm_clk_mux_get_parent(struct clk_hw *hw) - { - struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); -- unsigned long flags; - u32 val; - -- spin_lock_irqsave(&mux->lock, flags); - if (mux->flags & MUX_CLK_SW) - val = mux->reg; - else - val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift, - mux->width); -- spin_unlock_irqrestore(&mux->lock, flags); - return clk_mux_val_to_index(hw, NULL, mux->flags, val); - } - - static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index) - { - struct lgm_clk_mux *mux = to_lgm_clk_mux(hw); -- unsigned long flags; - u32 val; - - val = clk_mux_index_to_val(NULL, mux->flags, index); -- spin_lock_irqsave(&mux->lock, flags); - if (mux->flags & MUX_CLK_SW) - mux->reg = val; - else - lgm_set_clk_val(mux->membase, mux->reg, mux->shift, - mux->width, val); -- spin_unlock_irqrestore(&mux->lock, flags); - - return 0; - } -@@ -90,7 +81,7 @@ static struct clk_hw * - lgm_clk_register_mux(struct lgm_clk_provider *ctx, - const struct lgm_clk_branch *list) - { -- unsigned long flags, cflags = list->mux_flags; -+ unsigned long cflags = list->mux_flags; - struct device *dev = ctx->dev; - u8 shift = list->mux_shift; - u8 width = list->mux_width; -@@ -111,7 +102,6 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx, - init.num_parents = list->num_parents; - - mux->membase = ctx->membase; -- mux->lock = ctx->lock; - mux->reg = reg; - mux->shift = shift; - mux->width = width; -@@ -123,11 +113,8 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx, - if (ret) - return ERR_PTR(ret); - -- if (cflags & CLOCK_FLAG_VAL_INIT) { -- spin_lock_irqsave(&mux->lock, flags); -+ if (cflags & CLOCK_FLAG_VAL_INIT) - lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val); -- spin_unlock_irqrestore(&mux->lock, flags); -- } - - return hw; - } -@@ -136,13 +123,10 @@ static unsigned long - lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) - { - struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); -- unsigned long flags; - unsigned int val; - -- spin_lock_irqsave(÷r->lock, flags); - val = lgm_get_clk_val(divider->membase, divider->reg, - divider->shift, divider->width); -- spin_unlock_irqrestore(÷r->lock, flags); - - return divider_recalc_rate(hw, parent_rate, val, divider->table, - divider->flags, divider->width); -@@ -163,7 +147,6 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long prate) - { - struct lgm_clk_divider *divider = to_lgm_clk_divider(hw); -- unsigned long flags; - int value; - - value = divider_get_val(rate, prate, divider->table, -@@ -171,10 +154,8 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, - if (value < 0) - return value; - -- spin_lock_irqsave(÷r->lock, flags); - lgm_set_clk_val(divider->membase, divider->reg, - divider->shift, divider->width, value); -- spin_unlock_irqrestore(÷r->lock, flags); - - return 0; - } -@@ -182,12 +163,10 @@ lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, - static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable) - { - struct lgm_clk_divider *div = to_lgm_clk_divider(hw); -- unsigned long flags; - -- spin_lock_irqsave(&div->lock, flags); -- lgm_set_clk_val(div->membase, div->reg, div->shift_gate, -- div->width_gate, enable); -- spin_unlock_irqrestore(&div->lock, flags); -+ if (div->flags != DIV_CLK_NO_MASK) -+ lgm_set_clk_val(div->membase, div->reg, div->shift_gate, -+ div->width_gate, enable); - return 0; - } - -@@ -213,7 +192,7 @@ static struct clk_hw * - lgm_clk_register_divider(struct lgm_clk_provider *ctx, - const struct lgm_clk_branch *list) - { -- unsigned long flags, cflags = list->div_flags; -+ unsigned long cflags = list->div_flags; - struct device *dev = ctx->dev; - struct lgm_clk_divider *div; - struct clk_init_data init = {}; -@@ -236,7 +215,6 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx, - init.num_parents = 1; - - div->membase = ctx->membase; -- div->lock = ctx->lock; - div->reg = reg; - div->shift = shift; - div->width = width; -@@ -251,11 +229,8 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx, - if (ret) - return ERR_PTR(ret); - -- if (cflags & CLOCK_FLAG_VAL_INIT) { -- spin_lock_irqsave(&div->lock, flags); -+ if (cflags & CLOCK_FLAG_VAL_INIT) - lgm_set_clk_val(div->membase, reg, shift, width, list->div_val); -- spin_unlock_irqrestore(&div->lock, flags); -- } - - return hw; - } -@@ -264,7 +239,6 @@ static struct clk_hw * - lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, - const struct lgm_clk_branch *list) - { -- unsigned long flags; - struct clk_hw *hw; - - hw = clk_hw_register_fixed_factor(ctx->dev, list->name, -@@ -273,12 +247,9 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, - if (IS_ERR(hw)) - return ERR_CAST(hw); - -- if (list->div_flags & CLOCK_FLAG_VAL_INIT) { -- spin_lock_irqsave(&ctx->lock, flags); -+ if (list->div_flags & CLOCK_FLAG_VAL_INIT) - lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift, - list->div_width, list->div_val); -- spin_unlock_irqrestore(&ctx->lock, flags); -- } - - return hw; - } -@@ -286,13 +257,10 @@ lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx, - static int lgm_clk_gate_enable(struct clk_hw *hw) - { - struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); -- unsigned long flags; - unsigned int reg; - -- spin_lock_irqsave(&gate->lock, flags); - reg = GATE_HW_REG_EN(gate->reg); - lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); -- spin_unlock_irqrestore(&gate->lock, flags); - - return 0; - } -@@ -300,25 +268,19 @@ static int lgm_clk_gate_enable(struct clk_hw *hw) - static void lgm_clk_gate_disable(struct clk_hw *hw) - { - struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); -- unsigned long flags; - unsigned int reg; - -- spin_lock_irqsave(&gate->lock, flags); - reg = GATE_HW_REG_DIS(gate->reg); - lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1); -- spin_unlock_irqrestore(&gate->lock, flags); - } - - static int lgm_clk_gate_is_enabled(struct clk_hw *hw) - { - struct lgm_clk_gate *gate = to_lgm_clk_gate(hw); - unsigned int reg, ret; -- unsigned long flags; - -- spin_lock_irqsave(&gate->lock, flags); - reg = GATE_HW_REG_STAT(gate->reg); - ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1); -- spin_unlock_irqrestore(&gate->lock, flags); - - return ret; - } -@@ -333,7 +295,7 @@ static struct clk_hw * - lgm_clk_register_gate(struct lgm_clk_provider *ctx, - const struct lgm_clk_branch *list) - { -- unsigned long flags, cflags = list->gate_flags; -+ unsigned long cflags = list->gate_flags; - const char *pname = list->parent_data[0].name; - struct device *dev = ctx->dev; - u8 shift = list->gate_shift; -@@ -354,7 +316,6 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx, - init.num_parents = pname ? 1 : 0; - - gate->membase = ctx->membase; -- gate->lock = ctx->lock; - gate->reg = reg; - gate->shift = shift; - gate->flags = cflags; -@@ -366,9 +327,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx, - return ERR_PTR(ret); - - if (cflags & CLOCK_FLAG_VAL_INIT) { -- spin_lock_irqsave(&gate->lock, flags); - lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val); -- spin_unlock_irqrestore(&gate->lock, flags); - } - - return hw; -@@ -396,8 +355,22 @@ int lgm_clk_register_branches(struct lgm_clk_provider *ctx, - hw = lgm_clk_register_fixed_factor(ctx, list); - break; - case CLK_TYPE_GATE: -- hw = lgm_clk_register_gate(ctx, list); -+ if (list->gate_flags & GATE_CLK_HW) { -+ hw = lgm_clk_register_gate(ctx, list); -+ } else { -+ /* -+ * GATE_CLKs can be controlled either from -+ * CGU clk driver i.e. this driver or directly -+ * from power management driver/daemon. It is -+ * dependent on the power policy/profile requirements -+ * of the end product. To override control of gate -+ * clks from this driver, provide NULL for this index -+ * of gate clk provider. -+ */ -+ hw = NULL; -+ } - break; -+ - default: - dev_err(ctx->dev, "invalid clk type\n"); - return -EINVAL; -@@ -443,24 +416,18 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) - static int lgm_clk_ddiv_enable(struct clk_hw *hw) - { - struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); -- unsigned long flags; - -- spin_lock_irqsave(&ddiv->lock, flags); - lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, - ddiv->width_gate, 1); -- spin_unlock_irqrestore(&ddiv->lock, flags); - return 0; - } - - static void lgm_clk_ddiv_disable(struct clk_hw *hw) - { - struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); -- unsigned long flags; - -- spin_lock_irqsave(&ddiv->lock, flags); - lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate, - ddiv->width_gate, 0); -- spin_unlock_irqrestore(&ddiv->lock, flags); - } - - static int -@@ -497,32 +464,25 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, - { - struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); - u32 div, ddiv1, ddiv2; -- unsigned long flags; - - div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate); - -- spin_lock_irqsave(&ddiv->lock, flags); - if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { - div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); - div = div * 2; - } - -- if (div <= 0) { -- spin_unlock_irqrestore(&ddiv->lock, flags); -+ if (div <= 0) - return -EINVAL; -- } - -- if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) { -- spin_unlock_irqrestore(&ddiv->lock, flags); -+ if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) - return -EINVAL; -- } - - lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0, - ddiv1 - 1); - - lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1, - ddiv2 - 1); -- spin_unlock_irqrestore(&ddiv->lock, flags); - - return 0; - } -@@ -533,18 +493,15 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, - { - struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); - u32 div, ddiv1, ddiv2; -- unsigned long flags; - u64 rate64; - - div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate); - - /* if predivide bit is enabled, modify div by factor of 2.5 */ -- spin_lock_irqsave(&ddiv->lock, flags); - if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { - div = div * 2; - div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); - } -- spin_unlock_irqrestore(&ddiv->lock, flags); - - if (div <= 0) - return *prate; -@@ -558,12 +515,10 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, - do_div(rate64, ddiv2); - - /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */ -- spin_lock_irqsave(&ddiv->lock, flags); - if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { - rate64 = rate64 * 2; - rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); - } -- spin_unlock_irqrestore(&ddiv->lock, flags); - - return rate64; - } -@@ -600,7 +555,6 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, - init.num_parents = 1; - - ddiv->membase = ctx->membase; -- ddiv->lock = ctx->lock; - ddiv->reg = list->reg; - ddiv->shift0 = list->shift0; - ddiv->width0 = list->width0; -diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h -index 4e22bfb223128..bcaf8aec94e5d 100644 ---- a/drivers/clk/x86/clk-cgu.h -+++ b/drivers/clk/x86/clk-cgu.h -@@ -1,28 +1,28 @@ - /* SPDX-License-Identifier: GPL-2.0 */ - /* -- * Copyright(c) 2020 Intel Corporation. -- * Zhu YiXin -- * Rahul Tanwar -+ * Copyright (C) 2020-2022 MaxLinear, Inc. -+ * Copyright (C) 2020 Intel Corporation. -+ * Zhu Yixin -+ * Rahul Tanwar - */ - - #ifndef __CLK_CGU_H - #define __CLK_CGU_H - --#include -+#include - - struct lgm_clk_mux { - struct clk_hw hw; -- void __iomem *membase; -+ struct regmap *membase; - unsigned int reg; - u8 shift; - u8 width; - unsigned long flags; -- spinlock_t lock; - }; - - struct lgm_clk_divider { - struct clk_hw hw; -- void __iomem *membase; -+ struct regmap *membase; - unsigned int reg; - u8 shift; - u8 width; -@@ -30,12 +30,11 @@ struct lgm_clk_divider { - u8 width_gate; - unsigned long flags; - const struct clk_div_table *table; -- spinlock_t lock; - }; - - struct lgm_clk_ddiv { - struct clk_hw hw; -- void __iomem *membase; -+ struct regmap *membase; - unsigned int reg; - u8 shift0; - u8 width0; -@@ -48,16 +47,14 @@ struct lgm_clk_ddiv { - unsigned int mult; - unsigned int div; - unsigned long flags; -- spinlock_t lock; - }; - - struct lgm_clk_gate { - struct clk_hw hw; -- void __iomem *membase; -+ struct regmap *membase; - unsigned int reg; - u8 shift; - unsigned long flags; -- spinlock_t lock; - }; - - enum lgm_clk_type { -@@ -77,11 +74,10 @@ enum lgm_clk_type { - * @clk_data: array of hw clocks and clk number. - */ - struct lgm_clk_provider { -- void __iomem *membase; -+ struct regmap *membase; - struct device_node *np; - struct device *dev; - struct clk_hw_onecell_data clk_data; -- spinlock_t lock; - }; - - enum pll_type { -@@ -92,11 +88,10 @@ enum pll_type { - - struct lgm_clk_pll { - struct clk_hw hw; -- void __iomem *membase; -+ struct regmap *membase; - unsigned int reg; - unsigned long flags; - enum pll_type type; -- spinlock_t lock; - }; - - /** -@@ -202,6 +197,8 @@ struct lgm_clk_branch { - /* clock flags definition */ - #define CLOCK_FLAG_VAL_INIT BIT(16) - #define MUX_CLK_SW BIT(17) -+#define GATE_CLK_HW BIT(18) -+#define DIV_CLK_NO_MASK BIT(19) - - #define LGM_MUX(_id, _name, _pdata, _f, _reg, \ - _shift, _width, _cf, _v) \ -@@ -300,29 +297,32 @@ struct lgm_clk_branch { - .div = _d, \ - } - --static inline void lgm_set_clk_val(void __iomem *membase, u32 reg, -+static inline void lgm_set_clk_val(struct regmap *membase, u32 reg, - u8 shift, u8 width, u32 set_val) - { - u32 mask = (GENMASK(width - 1, 0) << shift); -- u32 regval; - -- regval = readl(membase + reg); -- regval = (regval & ~mask) | ((set_val << shift) & mask); -- writel(regval, membase + reg); -+ regmap_update_bits(membase, reg, mask, set_val << shift); - } - --static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg, -+static inline u32 lgm_get_clk_val(struct regmap *membase, u32 reg, - u8 shift, u8 width) - { - u32 mask = (GENMASK(width - 1, 0) << shift); - u32 val; - -- val = readl(membase + reg); -+ if (regmap_read(membase, reg, &val)) { -+ WARN_ONCE(1, "Failed to read clk reg: 0x%x\n", reg); -+ return 0; -+ } -+ - val = (val & mask) >> shift; - - return val; - } - -+ -+ - int lgm_clk_register_branches(struct lgm_clk_provider *ctx, - const struct lgm_clk_branch *list, - unsigned int nr_clk); -diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c -index 020f4e83a5ccb..f69455dd1c980 100644 ---- a/drivers/clk/x86/clk-lgm.c -+++ b/drivers/clk/x86/clk-lgm.c -@@ -1,10 +1,12 @@ - // SPDX-License-Identifier: GPL-2.0 - /* -+ * Copyright (C) 2020-2022 MaxLinear, Inc. - * Copyright (C) 2020 Intel Corporation. -- * Zhu YiXin -- * Rahul Tanwar -+ * Zhu Yixin -+ * Rahul Tanwar - */ - #include -+#include - #include - #include - #include -@@ -253,8 +255,8 @@ static const struct lgm_clk_branch lgm_branch_clks[] = { - LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1, - 8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2), - LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0), -- LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR, -- 25, 3, 0, 0, 0, 0, dcl_div), -+ LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", CLK_SET_RATE_PARENT, CGU_PCMCR, -+ 25, 3, 0, 0, DIV_CLK_NO_MASK, 0, dcl_div), - LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR, - 0, 1, CLK_MUX_ROUND_CLOSEST, 0), - LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr", -@@ -433,13 +435,15 @@ static int lgm_cgu_probe(struct platform_device *pdev) - - ctx->clk_data.num = CLK_NR_CLKS; - -- ctx->membase = devm_platform_ioremap_resource(pdev, 0); -- if (IS_ERR(ctx->membase)) -+ ctx->membase = syscon_node_to_regmap(np); -+ if (IS_ERR(ctx->membase)) { -+ dev_err(dev, "Failed to get clk CGU iomem\n"); - return PTR_ERR(ctx->membase); -+ } -+ - - ctx->np = np; - ctx->dev = dev; -- spin_lock_init(&ctx->lock); - - ret = lgm_clk_register_plls(ctx, lgm_pll_clks, - ARRAY_SIZE(lgm_pll_clks)); -diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c -index eb25303eefed4..2c9da6623b84e 100644 ---- a/drivers/clk/zynqmp/clkc.c -+++ b/drivers/clk/zynqmp/clkc.c -@@ -710,6 +710,13 @@ static void zynqmp_get_clock_info(void) - FIELD_PREP(CLK_ATTR_NODE_INDEX, i); - - zynqmp_pm_clock_get_name(clock[i].clk_id, &name); -+ -+ /* -+ * Terminate with NULL character in case name provided by firmware -+ * is longer and truncated due to size limit. -+ */ -+ name.name[sizeof(name.name) - 1] = '\0'; -+ - if (!strcmp(name.name, RESERVED_CLK_NAME)) - continue; - strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN); -diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c -index 036e4ff64a2f7..bc066f300345d 100644 ---- a/drivers/clk/zynqmp/pll.c -+++ b/drivers/clk/zynqmp/pll.c -@@ -102,26 +102,25 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) - { - u32 fbdiv; -- long rate_div, f; -+ u32 mult, div; - -- /* Enable the fractional mode if needed */ -- rate_div = (rate * FRAC_DIV) / *prate; -- f = rate_div % FRAC_DIV; -- if (f) { -- if (rate > PS_PLL_VCO_MAX) { -- fbdiv = rate / PS_PLL_VCO_MAX; -- rate = rate / (fbdiv + 1); -- } -- if (rate < PS_PLL_VCO_MIN) { -- fbdiv = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate); -- rate = rate * fbdiv; -- } -- return rate; -+ /* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */ -+ if (rate > PS_PLL_VCO_MAX) { -+ div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX); -+ rate = rate / div; -+ } -+ if (rate < PS_PLL_VCO_MIN) { -+ mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate); -+ rate = rate * mult; - } - - fbdiv = DIV_ROUND_CLOSEST(rate, *prate); -- fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX); -- return *prate * fbdiv; -+ if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) { -+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX); -+ rate = *prate * fbdiv; -+ } -+ -+ return rate; - } - - /** -diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig -index 0f5e3983951a8..08f8cb944a2ac 100644 ---- a/drivers/clocksource/Kconfig -+++ b/drivers/clocksource/Kconfig -@@ -24,6 +24,7 @@ config I8253_LOCK - - config OMAP_DM_TIMER - bool -+ select TIMER_OF - - config CLKBLD_I8253 - def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK -diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c -index eb596ff9e7bb3..279ddff81ab49 100644 ---- a/drivers/clocksource/acpi_pm.c -+++ b/drivers/clocksource/acpi_pm.c -@@ -229,8 +229,10 @@ static int __init parse_pmtmr(char *arg) - int ret; - - ret = kstrtouint(arg, 16, &base); -- if (ret) -- return ret; -+ if (ret) { -+ pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg); -+ return 1; -+ } - - pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport, - base); -diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c -index 3819ef5b70989..3245eb0c602d2 100644 ---- a/drivers/clocksource/dw_apb_timer_of.c -+++ b/drivers/clocksource/dw_apb_timer_of.c -@@ -47,7 +47,7 @@ static int __init timer_get_base_and_rate(struct device_node *np, - pr_warn("pclk for %pOFn is present, but could not be activated\n", - np); - -- if (!of_property_read_u32(np, "clock-freq", rate) && -+ if (!of_property_read_u32(np, "clock-freq", rate) || - !of_property_read_u32(np, "clock-frequency", rate)) - return 0; - -diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c -index 5e3e96d3d1b98..cc2a961ddd3be 100644 ---- a/drivers/clocksource/exynos_mct.c -+++ b/drivers/clocksource/exynos_mct.c -@@ -504,11 +504,14 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) - return 0; - } - --static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) -+static int __init exynos4_timer_resources(struct device_node *np) - { -- int err, cpu; - struct clk *mct_clk, *tick_clk; - -+ reg_base = of_iomap(np, 0); -+ if (!reg_base) -+ panic("%s: unable to ioremap mct address space\n", __func__); -+ - tick_clk = of_clk_get_by_name(np, "fin_pll"); - if (IS_ERR(tick_clk)) - panic("%s: unable to determine tick clock rate\n", __func__); -@@ -519,9 +522,32 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * - panic("%s: unable to retrieve mct clock instance\n", __func__); - clk_prepare_enable(mct_clk); - -- reg_base = base; -- if (!reg_base) -- panic("%s: unable to ioremap mct address space\n", __func__); -+ return 0; -+} -+ -+static int __init exynos4_timer_interrupts(struct device_node *np, -+ unsigned int int_type) -+{ -+ int nr_irqs, i, err, cpu; -+ -+ mct_int_type = int_type; -+ -+ /* This driver uses only one global timer interrupt */ -+ mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); -+ -+ /* -+ * Find out the number of local irqs specified. The local -+ * timer irqs are specified after the four global timer -+ * irqs are specified. -+ */ -+ nr_irqs = of_irq_count(np); -+ if (nr_irqs > ARRAY_SIZE(mct_irqs)) { -+ pr_err("exynos-mct: too many (%d) interrupts configured in DT\n", -+ nr_irqs); -+ nr_irqs = ARRAY_SIZE(mct_irqs); -+ } -+ for (i = MCT_L0_IRQ; i < nr_irqs; i++) -+ mct_irqs[i] = irq_of_parse_and_map(np, i); - - if (mct_int_type == MCT_INT_PPI) { - -@@ -532,11 +558,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * - mct_irqs[MCT_L0_IRQ], err); - } else { - for_each_possible_cpu(cpu) { -- int mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; -+ int mct_irq; - struct mct_clock_event_device *pcpu_mevt = - per_cpu_ptr(&percpu_mct_tick, cpu); - - pcpu_mevt->evt.irq = -1; -+ if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs)) -+ break; -+ mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; - - irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); - if (request_irq(mct_irq, -@@ -581,24 +610,13 @@ out_irq: - - static int __init mct_init_dt(struct device_node *np, unsigned int int_type) - { -- u32 nr_irqs, i; - int ret; - -- mct_int_type = int_type; -- -- /* This driver uses only one global timer interrupt */ -- mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); -- -- /* -- * Find out the number of local irqs specified. The local -- * timer irqs are specified after the four global timer -- * irqs are specified. -- */ -- nr_irqs = of_irq_count(np); -- for (i = MCT_L0_IRQ; i < nr_irqs; i++) -- mct_irqs[i] = irq_of_parse_and_map(np, i); -+ ret = exynos4_timer_resources(np); -+ if (ret) -+ return ret; - -- ret = exynos4_timer_resources(np, of_iomap(np, 0)); -+ ret = exynos4_timer_interrupts(np, int_type); - if (ret) - return ret; - -diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c -index ff188ab68496e..bb47610bbd1c4 100644 ---- a/drivers/clocksource/hyperv_timer.c -+++ b/drivers/clocksource/hyperv_timer.c -@@ -565,4 +565,3 @@ void __init hv_init_clocksource(void) - hv_sched_clock_offset = hv_read_reference_counter(); - hv_setup_sched_clock(read_hv_sched_clock_msr); - } --EXPORT_SYMBOL_GPL(hv_init_clocksource); -diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c -index dd0956ad969c1..d35548aa026fb 100644 ---- a/drivers/clocksource/sh_cmt.c -+++ b/drivers/clocksource/sh_cmt.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -116,6 +117,7 @@ struct sh_cmt_device { - void __iomem *mapbase; - struct clk *clk; - unsigned long rate; -+ unsigned int reg_delay; - - raw_spinlock_t lock; /* Protect the shared start/stop register */ - -@@ -247,10 +249,17 @@ static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) - - static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value) - { -- if (ch->iostart) -- ch->cmt->info->write_control(ch->iostart, 0, value); -- else -- ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); -+ u32 old_value = sh_cmt_read_cmstr(ch); -+ -+ if (value != old_value) { -+ if (ch->iostart) { -+ ch->cmt->info->write_control(ch->iostart, 0, value); -+ udelay(ch->cmt->reg_delay); -+ } else { -+ ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); -+ udelay(ch->cmt->reg_delay); -+ } -+ } - } - - static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) -@@ -260,7 +269,12 @@ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) - - static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value) - { -- ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); -+ u32 old_value = sh_cmt_read_cmcsr(ch); -+ -+ if (value != old_value) { -+ ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); -+ udelay(ch->cmt->reg_delay); -+ } - } - - static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) -@@ -268,14 +282,33 @@ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) - return ch->cmt->info->read_count(ch->ioctrl, CMCNT); - } - --static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) -+static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) - { -+ /* Tests showed that we need to wait 3 clocks here */ -+ unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2); -+ u32 reg; -+ -+ if (ch->cmt->info->model > SH_CMT_16BIT) { -+ int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg, -+ !(reg & SH_CMT32_CMCSR_WRFLG), -+ 1, cmcnt_delay, false, ch); -+ if (ret < 0) -+ return ret; -+ } -+ - ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); -+ udelay(cmcnt_delay); -+ return 0; - } - - static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value) - { -- ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); -+ u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR); -+ -+ if (value != old_value) { -+ ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); -+ udelay(ch->cmt->reg_delay); -+ } - } - - static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped) -@@ -319,7 +352,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) - - static int sh_cmt_enable(struct sh_cmt_channel *ch) - { -- int k, ret; -+ int ret; - - dev_pm_syscore_device(&ch->cmt->pdev->dev, true); - -@@ -347,26 +380,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch) - } - - sh_cmt_write_cmcor(ch, 0xffffffff); -- sh_cmt_write_cmcnt(ch, 0); -- -- /* -- * According to the sh73a0 user's manual, as CMCNT can be operated -- * only by the RCLK (Pseudo 32 kHz), there's one restriction on -- * modifying CMCNT register; two RCLK cycles are necessary before -- * this register is either read or any modification of the value -- * it holds is reflected in the LSI's actual operation. -- * -- * While at it, we're supposed to clear out the CMCNT as of this -- * moment, so make sure it's processed properly here. This will -- * take RCLKx2 at maximum. -- */ -- for (k = 0; k < 100; k++) { -- if (!sh_cmt_read_cmcnt(ch)) -- break; -- udelay(1); -- } -+ ret = sh_cmt_write_cmcnt(ch, 0); - -- if (sh_cmt_read_cmcnt(ch)) { -+ if (ret || sh_cmt_read_cmcnt(ch)) { - dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", - ch->index); - ret = -ETIMEDOUT; -@@ -987,8 +1003,8 @@ MODULE_DEVICE_TABLE(of, sh_cmt_of_table); - - static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) - { -- unsigned int mask; -- unsigned int i; -+ unsigned int mask, i; -+ unsigned long rate; - int ret; - - cmt->pdev = pdev; -@@ -1024,10 +1040,16 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) - if (ret < 0) - goto err_clk_unprepare; - -- if (cmt->info->width == 16) -- cmt->rate = clk_get_rate(cmt->clk) / 512; -- else -- cmt->rate = clk_get_rate(cmt->clk) / 8; -+ rate = clk_get_rate(cmt->clk); -+ if (!rate) { -+ ret = -EINVAL; -+ goto err_clk_disable; -+ } -+ -+ /* We shall wait 2 input clks after register writes */ -+ if (cmt->info->model >= SH_CMT_48BIT) -+ cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate); -+ cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8); - - /* Map the memory resource(s). */ - ret = sh_cmt_map_memory(cmt); -diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c -index 4efd0cf3b602d..0d52e28fea4de 100644 ---- a/drivers/clocksource/timer-cadence-ttc.c -+++ b/drivers/clocksource/timer-cadence-ttc.c -@@ -486,10 +486,10 @@ static int __init ttc_timer_probe(struct platform_device *pdev) - * and use it. Note that the event timer uses the interrupt and it's the - * 2nd TTC hence the irq_of_parse_and_map(,1) - */ -- timer_baseaddr = of_iomap(timer, 0); -- if (!timer_baseaddr) { -+ timer_baseaddr = devm_of_iomap(&pdev->dev, timer, 0, NULL); -+ if (IS_ERR(timer_baseaddr)) { - pr_err("ERROR: invalid timer base address\n"); -- return -ENXIO; -+ return PTR_ERR(timer_baseaddr); - } - - irq = irq_of_parse_and_map(timer, 1); -@@ -513,20 +513,27 @@ static int __init ttc_timer_probe(struct platform_device *pdev) - clk_ce = of_clk_get(timer, clksel); - if (IS_ERR(clk_ce)) { - pr_err("ERROR: timer input clock not found\n"); -- return PTR_ERR(clk_ce); -+ ret = PTR_ERR(clk_ce); -+ goto put_clk_cs; - } - - ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width); - if (ret) -- return ret; -+ goto put_clk_ce; - - ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); - if (ret) -- return ret; -+ goto put_clk_ce; - - pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq); - - return 0; -+ -+put_clk_ce: -+ clk_put(clk_ce); -+put_clk_cs: -+ clk_put(clk_cs); -+ return ret; - } - - static const struct of_device_id ttc_timer_of_match[] = { -diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c -index 9996c05425200..b1c248498be46 100644 ---- a/drivers/clocksource/timer-davinci.c -+++ b/drivers/clocksource/timer-davinci.c -@@ -257,21 +257,25 @@ int __init davinci_timer_register(struct clk *clk, - resource_size(&timer_cfg->reg), - "davinci-timer")) { - pr_err("Unable to request memory region\n"); -- return -EBUSY; -+ rv = -EBUSY; -+ goto exit_clk_disable; - } - - base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg)); - if (!base) { - pr_err("Unable to map the register range\n"); -- return -ENOMEM; -+ rv = -ENOMEM; -+ goto exit_mem_region; - } - - davinci_timer_init(base); - tick_rate = clk_get_rate(clk); - - clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL); -- if (!clockevent) -- return -ENOMEM; -+ if (!clockevent) { -+ rv = -ENOMEM; -+ goto exit_iounmap_base; -+ } - - clockevent->dev.name = "tim12"; - clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT; -@@ -296,7 +300,7 @@ int __init davinci_timer_register(struct clk *clk, - "clockevent/tim12", clockevent); - if (rv) { - pr_err("Unable to request the clockevent interrupt\n"); -- return rv; -+ goto exit_free_clockevent; - } - - davinci_clocksource.dev.rating = 300; -@@ -323,13 +327,27 @@ int __init davinci_timer_register(struct clk *clk, - rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate); - if (rv) { - pr_err("Unable to register clocksource\n"); -- return rv; -+ goto exit_free_irq; - } - - sched_clock_register(davinci_timer_read_sched_clock, - DAVINCI_TIMER_CLKSRC_BITS, tick_rate); - - return 0; -+ -+exit_free_irq: -+ free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start, -+ clockevent); -+exit_free_clockevent: -+ kfree(clockevent); -+exit_iounmap_base: -+ iounmap(base); -+exit_mem_region: -+ release_mem_region(timer_cfg->reg.start, -+ resource_size(&timer_cfg->reg)); -+exit_clk_disable: -+ clk_disable_unprepare(clk); -+ return rv; - } - - static int __init of_davinci_timer_register(struct device_node *np) -diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c -index cbb184953510b..b8e92991c4719 100644 ---- a/drivers/clocksource/timer-ixp4xx.c -+++ b/drivers/clocksource/timer-ixp4xx.c -@@ -282,7 +282,6 @@ void __init ixp4xx_timer_setup(resource_size_t timerbase, - } - ixp4xx_timer_register(base, timer_irq, timer_freq); - } --EXPORT_SYMBOL_GPL(ixp4xx_timer_setup); - - #ifdef CONFIG_OF - static __init int ixp4xx_of_timer_init(struct device_node *np) -diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c -index cfa4ec7ef3968..790d2c9b42a70 100644 ---- a/drivers/clocksource/timer-microchip-pit64b.c -+++ b/drivers/clocksource/timer-microchip-pit64b.c -@@ -165,7 +165,7 @@ static u64 mchp_pit64b_clksrc_read(struct clocksource *cs) - return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); - } - --static u64 mchp_pit64b_sched_read_clk(void) -+static u64 notrace mchp_pit64b_sched_read_clk(void) - { - return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); - } -diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c -index 529cc6a51cdb3..c3f54d9912be7 100644 ---- a/drivers/clocksource/timer-of.c -+++ b/drivers/clocksource/timer-of.c -@@ -157,9 +157,9 @@ static __init int timer_of_base_init(struct device_node *np, - of_base->base = of_base->name ? - of_io_request_and_map(np, of_base->index, of_base->name) : - of_iomap(np, of_base->index); -- if (IS_ERR(of_base->base)) { -- pr_err("Failed to iomap (%s)\n", of_base->name); -- return PTR_ERR(of_base->base); -+ if (IS_ERR_OR_NULL(of_base->base)) { -+ pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name); -+ return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM; - } - - return 0; -diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c -index 56c0cc32d0ac6..d514b44e67dd1 100644 ---- a/drivers/clocksource/timer-oxnas-rps.c -+++ b/drivers/clocksource/timer-oxnas-rps.c -@@ -236,7 +236,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np) - } - - rps->irq = irq_of_parse_and_map(np, 0); -- if (rps->irq < 0) { -+ if (!rps->irq) { - ret = -EINVAL; - goto err_iomap; - } -diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c -index 401d592e85f5a..e6a87f4af2b50 100644 ---- a/drivers/clocksource/timer-sp804.c -+++ b/drivers/clocksource/timer-sp804.c -@@ -259,6 +259,11 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time - struct clk *clk1, *clk2; - const char *name = of_get_property(np, "compatible", NULL); - -+ if (initialized) { -+ pr_debug("%pOF: skipping further SP804 timer device\n", np); -+ return 0; -+ } -+ - base = of_iomap(np, 0); - if (!base) - return -ENXIO; -@@ -270,11 +275,6 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time - writel(0, timer1_base + timer->ctrl); - writel(0, timer2_base + timer->ctrl); - -- if (initialized || !of_device_is_available(np)) { -- ret = -EINVAL; -- goto err; -- } -- - clk1 = of_clk_get(np, 0); - if (IS_ERR(clk1)) - clk1 = NULL; -diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c -index b6f97960d8ee0..632523c1232f6 100644 ---- a/drivers/clocksource/timer-ti-dm-systimer.c -+++ b/drivers/clocksource/timer-ti-dm-systimer.c -@@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void) - bool quirk_unreliable_oscillator = false; - - /* Quirk unreliable 32 KiHz oscillator with incomplete dts */ -- if (of_machine_is_compatible("ti,omap3-beagle") || -- of_machine_is_compatible("timll,omap3-devkit8000")) { -+ if (of_machine_is_compatible("ti,omap3-beagle-ab4")) { - quirk_unreliable_oscillator = true; - counter_32k = -ENODEV; - } -@@ -346,8 +345,10 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t, - return error; - - r = clk_get_rate(clock); -- if (!r) -+ if (!r) { -+ clk_disable_unprepare(clock); - return -ENODEV; -+ } - - if (is_ick) - t->ick = clock; -@@ -695,9 +696,9 @@ static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa) - return 0; - } - -- if (pa == 0x48034000) /* dra7 dmtimer3 */ -+ if (pa == 0x4882c000) /* dra7 dmtimer15 */ - return dmtimer_percpu_timer_init(np, 0); -- else if (pa == 0x48036000) /* dra7 dmtimer4 */ -+ else if (pa == 0x4882e000) /* dra7 dmtimer16 */ - return dmtimer_percpu_timer_init(np, 1); - - return 0; -diff --git a/drivers/comedi/drivers/adv_pci1760.c b/drivers/comedi/drivers/adv_pci1760.c -index 6de8ab97d346c..d6934b6c436d1 100644 ---- a/drivers/comedi/drivers/adv_pci1760.c -+++ b/drivers/comedi/drivers/adv_pci1760.c -@@ -59,7 +59,7 @@ - #define PCI1760_CMD_CLR_IMB2 0x00 /* Clears IMB2 */ - #define PCI1760_CMD_SET_DO 0x01 /* Set output state */ - #define PCI1760_CMD_GET_DO 0x02 /* Read output status */ --#define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */ -+#define PCI1760_CMD_GET_STATUS 0x07 /* Read current status */ - #define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */ - #define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */ - #define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */ -diff --git a/drivers/comedi/drivers/dt9812.c b/drivers/comedi/drivers/dt9812.c -index 634f57730c1e0..704b04d2980d3 100644 ---- a/drivers/comedi/drivers/dt9812.c -+++ b/drivers/comedi/drivers/dt9812.c -@@ -32,6 +32,7 @@ - #include - #include - #include -+#include - #include - - #include "../comedi_usb.h" -@@ -237,22 +238,42 @@ static int dt9812_read_info(struct comedi_device *dev, - { - struct usb_device *usb = comedi_to_usb_dev(dev); - struct dt9812_private *devpriv = dev->private; -- struct dt9812_usb_cmd cmd; -+ struct dt9812_usb_cmd *cmd; -+ size_t tbuf_size; - int count, ret; -+ void *tbuf; - -- cmd.cmd = cpu_to_le32(DT9812_R_FLASH_DATA); -- cmd.u.flash_data_info.address = -+ tbuf_size = max(sizeof(*cmd), buf_size); -+ -+ tbuf = kzalloc(tbuf_size, GFP_KERNEL); -+ if (!tbuf) -+ return -ENOMEM; -+ -+ cmd = tbuf; -+ -+ cmd->cmd = cpu_to_le32(DT9812_R_FLASH_DATA); -+ cmd->u.flash_data_info.address = - cpu_to_le16(DT9812_DIAGS_BOARD_INFO_ADDR + offset); -- cmd.u.flash_data_info.numbytes = cpu_to_le16(buf_size); -+ cmd->u.flash_data_info.numbytes = cpu_to_le16(buf_size); - - /* DT9812 only responds to 32 byte writes!! */ - ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), -- &cmd, 32, &count, DT9812_USB_TIMEOUT); -+ cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); - if (ret) -- return ret; -+ goto out; -+ -+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), -+ tbuf, buf_size, &count, DT9812_USB_TIMEOUT); -+ if (!ret) { -+ if (count == buf_size) -+ memcpy(buf, tbuf, buf_size); -+ else -+ ret = -EREMOTEIO; -+ } -+out: -+ kfree(tbuf); - -- return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), -- buf, buf_size, &count, DT9812_USB_TIMEOUT); -+ return ret; - } - - static int dt9812_read_multiple_registers(struct comedi_device *dev, -@@ -261,22 +282,42 @@ static int dt9812_read_multiple_registers(struct comedi_device *dev, - { - struct usb_device *usb = comedi_to_usb_dev(dev); - struct dt9812_private *devpriv = dev->private; -- struct dt9812_usb_cmd cmd; -+ struct dt9812_usb_cmd *cmd; - int i, count, ret; -+ size_t buf_size; -+ void *buf; - -- cmd.cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG); -- cmd.u.read_multi_info.count = reg_count; -+ buf_size = max_t(size_t, sizeof(*cmd), reg_count); -+ -+ buf = kzalloc(buf_size, GFP_KERNEL); -+ if (!buf) -+ return -ENOMEM; -+ -+ cmd = buf; -+ -+ cmd->cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG); -+ cmd->u.read_multi_info.count = reg_count; - for (i = 0; i < reg_count; i++) -- cmd.u.read_multi_info.address[i] = address[i]; -+ cmd->u.read_multi_info.address[i] = address[i]; - - /* DT9812 only responds to 32 byte writes!! */ - ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), -- &cmd, 32, &count, DT9812_USB_TIMEOUT); -+ cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); - if (ret) -- return ret; -+ goto out; -+ -+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), -+ buf, reg_count, &count, DT9812_USB_TIMEOUT); -+ if (!ret) { -+ if (count == reg_count) -+ memcpy(value, buf, reg_count); -+ else -+ ret = -EREMOTEIO; -+ } -+out: -+ kfree(buf); - -- return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), -- value, reg_count, &count, DT9812_USB_TIMEOUT); -+ return ret; - } - - static int dt9812_write_multiple_registers(struct comedi_device *dev, -@@ -285,19 +326,27 @@ static int dt9812_write_multiple_registers(struct comedi_device *dev, - { - struct usb_device *usb = comedi_to_usb_dev(dev); - struct dt9812_private *devpriv = dev->private; -- struct dt9812_usb_cmd cmd; -+ struct dt9812_usb_cmd *cmd; - int i, count; -+ int ret; -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; - -- cmd.cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG); -- cmd.u.read_multi_info.count = reg_count; -+ cmd->cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG); -+ cmd->u.read_multi_info.count = reg_count; - for (i = 0; i < reg_count; i++) { -- cmd.u.write_multi_info.write[i].address = address[i]; -- cmd.u.write_multi_info.write[i].value = value[i]; -+ cmd->u.write_multi_info.write[i].address = address[i]; -+ cmd->u.write_multi_info.write[i].value = value[i]; - } - - /* DT9812 only responds to 32 byte writes!! */ -- return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), -- &cmd, 32, &count, DT9812_USB_TIMEOUT); -+ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), -+ cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); -+ kfree(cmd); -+ -+ return ret; - } - - static int dt9812_rmw_multiple_registers(struct comedi_device *dev, -@@ -306,17 +355,25 @@ static int dt9812_rmw_multiple_registers(struct comedi_device *dev, - { - struct usb_device *usb = comedi_to_usb_dev(dev); - struct dt9812_private *devpriv = dev->private; -- struct dt9812_usb_cmd cmd; -+ struct dt9812_usb_cmd *cmd; - int i, count; -+ int ret; -+ -+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); -+ if (!cmd) -+ return -ENOMEM; - -- cmd.cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG); -- cmd.u.rmw_multi_info.count = reg_count; -+ cmd->cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG); -+ cmd->u.rmw_multi_info.count = reg_count; - for (i = 0; i < reg_count; i++) -- cmd.u.rmw_multi_info.rmw[i] = rmw[i]; -+ cmd->u.rmw_multi_info.rmw[i] = rmw[i]; - - /* DT9812 only responds to 32 byte writes!! */ -- return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), -- &cmd, 32, &count, DT9812_USB_TIMEOUT); -+ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), -+ cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); -+ kfree(cmd); -+ -+ return ret; - } - - static int dt9812_digital_in(struct comedi_device *dev, u8 *bits) -diff --git a/drivers/comedi/drivers/ni_usb6501.c b/drivers/comedi/drivers/ni_usb6501.c -index 5b6d9d783b2f7..c42987b74b1dc 100644 ---- a/drivers/comedi/drivers/ni_usb6501.c -+++ b/drivers/comedi/drivers/ni_usb6501.c -@@ -144,6 +144,10 @@ static const u8 READ_COUNTER_RESPONSE[] = {0x00, 0x01, 0x00, 0x10, - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x00}; - -+/* Largest supported packets */ -+static const size_t TX_MAX_SIZE = sizeof(SET_PORT_DIR_REQUEST); -+static const size_t RX_MAX_SIZE = sizeof(READ_PORT_RESPONSE); -+ - enum commands { - READ_PORT, - WRITE_PORT, -@@ -501,6 +505,12 @@ static int ni6501_find_endpoints(struct comedi_device *dev) - if (!devpriv->ep_rx || !devpriv->ep_tx) - return -ENODEV; - -+ if (usb_endpoint_maxp(devpriv->ep_rx) < RX_MAX_SIZE) -+ return -ENODEV; -+ -+ if (usb_endpoint_maxp(devpriv->ep_tx) < TX_MAX_SIZE) -+ return -ENODEV; -+ - return 0; - } - -diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c -index 9f920819cd742..9a1d146b7ebb2 100644 ---- a/drivers/comedi/drivers/vmk80xx.c -+++ b/drivers/comedi/drivers/vmk80xx.c -@@ -90,6 +90,9 @@ enum { - #define IC3_VERSION BIT(0) - #define IC6_VERSION BIT(1) - -+#define MIN_BUF_SIZE 64 -+#define PACKET_TIMEOUT 10000 /* ms */ -+ - enum vmk80xx_model { - VMK8055_MODEL, - VMK8061_MODEL -@@ -157,22 +160,21 @@ static void vmk80xx_do_bulk_msg(struct comedi_device *dev) - __u8 rx_addr; - unsigned int tx_pipe; - unsigned int rx_pipe; -- size_t size; -+ size_t tx_size; -+ size_t rx_size; - - tx_addr = devpriv->ep_tx->bEndpointAddress; - rx_addr = devpriv->ep_rx->bEndpointAddress; - tx_pipe = usb_sndbulkpipe(usb, tx_addr); - rx_pipe = usb_rcvbulkpipe(usb, rx_addr); -+ tx_size = usb_endpoint_maxp(devpriv->ep_tx); -+ rx_size = usb_endpoint_maxp(devpriv->ep_rx); - -- /* -- * The max packet size attributes of the K8061 -- * input/output endpoints are identical -- */ -- size = usb_endpoint_maxp(devpriv->ep_tx); -+ usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf, tx_size, NULL, -+ PACKET_TIMEOUT); - -- usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf, -- size, NULL, devpriv->ep_tx->bInterval); -- usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, size, NULL, HZ * 10); -+ usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, rx_size, NULL, -+ PACKET_TIMEOUT); - } - - static int vmk80xx_read_packet(struct comedi_device *dev) -@@ -191,7 +193,7 @@ static int vmk80xx_read_packet(struct comedi_device *dev) - pipe = usb_rcvintpipe(usb, ep->bEndpointAddress); - return usb_interrupt_msg(usb, pipe, devpriv->usb_rx_buf, - usb_endpoint_maxp(ep), NULL, -- HZ * 10); -+ PACKET_TIMEOUT); - } - - static int vmk80xx_write_packet(struct comedi_device *dev, int cmd) -@@ -212,7 +214,7 @@ static int vmk80xx_write_packet(struct comedi_device *dev, int cmd) - pipe = usb_sndintpipe(usb, ep->bEndpointAddress); - return usb_interrupt_msg(usb, pipe, devpriv->usb_tx_buf, - usb_endpoint_maxp(ep), NULL, -- HZ * 10); -+ PACKET_TIMEOUT); - } - - static int vmk80xx_reset_device(struct comedi_device *dev) -@@ -678,12 +680,12 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) - struct vmk80xx_private *devpriv = dev->private; - size_t size; - -- size = usb_endpoint_maxp(devpriv->ep_rx); -+ size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE); - devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL); - if (!devpriv->usb_rx_buf) - return -ENOMEM; - -- size = usb_endpoint_maxp(devpriv->ep_tx); -+ size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE); - devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); - if (!devpriv->usb_tx_buf) - return -ENOMEM; -diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c -index 0caa60537b142..ab2c49579b289 100644 ---- a/drivers/counter/104-quad-8.c -+++ b/drivers/counter/104-quad-8.c -@@ -61,10 +61,6 @@ struct quad8 { - #define QUAD8_REG_CHAN_OP 0x11 - #define QUAD8_REG_INDEX_INPUT_LEVELS 0x16 - #define QUAD8_DIFF_ENCODER_CABLE_STATUS 0x17 --/* Borrow Toggle flip-flop */ --#define QUAD8_FLAG_BT BIT(0) --/* Carry Toggle flip-flop */ --#define QUAD8_FLAG_CT BIT(1) - /* Error flag */ - #define QUAD8_FLAG_E BIT(4) - /* Up/Down flag */ -@@ -97,6 +93,9 @@ struct quad8 { - #define QUAD8_CMR_QUADRATURE_X2 0x10 - #define QUAD8_CMR_QUADRATURE_X4 0x18 - -+/* Each Counter is 24 bits wide */ -+#define LS7267_CNTR_MAX GENMASK(23, 0) -+ - static int quad8_signal_read(struct counter_device *counter, - struct counter_signal *signal, - enum counter_signal_level *level) -@@ -117,21 +116,13 @@ static int quad8_signal_read(struct counter_device *counter, - } - - static int quad8_count_read(struct counter_device *counter, -- struct counter_count *count, unsigned long *val) -+ struct counter_count *count, u64 *val) - { - struct quad8 *const priv = counter->priv; - const int base_offset = priv->base + 2 * count->id; -- unsigned int flags; -- unsigned int borrow; -- unsigned int carry; - int i; - -- flags = inb(base_offset + 1); -- borrow = flags & QUAD8_FLAG_BT; -- carry = !!(flags & QUAD8_FLAG_CT); -- -- /* Borrow XOR Carry effectively doubles count range */ -- *val = (unsigned long)(borrow ^ carry) << 24; -+ *val = 0; - - mutex_lock(&priv->lock); - -@@ -148,14 +139,13 @@ static int quad8_count_read(struct counter_device *counter, - } - - static int quad8_count_write(struct counter_device *counter, -- struct counter_count *count, unsigned long val) -+ struct counter_count *count, u64 val) - { - struct quad8 *const priv = counter->priv; - const int base_offset = priv->base + 2 * count->id; - int i; - -- /* Only 24-bit values are supported */ -- if (val > 0xFFFFFF) -+ if (val > LS7267_CNTR_MAX) - return -ERANGE; - - mutex_lock(&priv->lock); -@@ -188,22 +178,16 @@ static int quad8_count_write(struct counter_device *counter, - return 0; - } - --enum quad8_count_function { -- QUAD8_COUNT_FUNCTION_PULSE_DIRECTION = 0, -- QUAD8_COUNT_FUNCTION_QUADRATURE_X1, -- QUAD8_COUNT_FUNCTION_QUADRATURE_X2, -- QUAD8_COUNT_FUNCTION_QUADRATURE_X4 --}; -- - static const enum counter_function quad8_count_functions_list[] = { -- [QUAD8_COUNT_FUNCTION_PULSE_DIRECTION] = COUNTER_FUNCTION_PULSE_DIRECTION, -- [QUAD8_COUNT_FUNCTION_QUADRATURE_X1] = COUNTER_FUNCTION_QUADRATURE_X1_A, -- [QUAD8_COUNT_FUNCTION_QUADRATURE_X2] = COUNTER_FUNCTION_QUADRATURE_X2_A, -- [QUAD8_COUNT_FUNCTION_QUADRATURE_X4] = COUNTER_FUNCTION_QUADRATURE_X4 -+ COUNTER_FUNCTION_PULSE_DIRECTION, -+ COUNTER_FUNCTION_QUADRATURE_X1_A, -+ COUNTER_FUNCTION_QUADRATURE_X2_A, -+ COUNTER_FUNCTION_QUADRATURE_X4, - }; - --static int quad8_function_get(struct counter_device *counter, -- struct counter_count *count, size_t *function) -+static int quad8_function_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function *function) - { - struct quad8 *const priv = counter->priv; - const int id = count->id; -@@ -213,25 +197,26 @@ static int quad8_function_get(struct counter_device *counter, - if (priv->quadrature_mode[id]) - switch (priv->quadrature_scale[id]) { - case 0: -- *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1; -+ *function = COUNTER_FUNCTION_QUADRATURE_X1_A; - break; - case 1: -- *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X2; -+ *function = COUNTER_FUNCTION_QUADRATURE_X2_A; - break; - case 2: -- *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X4; -+ *function = COUNTER_FUNCTION_QUADRATURE_X4; - break; - } - else -- *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION; -+ *function = COUNTER_FUNCTION_PULSE_DIRECTION; - - mutex_unlock(&priv->lock); - - return 0; - } - --static int quad8_function_set(struct counter_device *counter, -- struct counter_count *count, size_t function) -+static int quad8_function_write(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function function) - { - struct quad8 *const priv = counter->priv; - const int id = count->id; -@@ -247,7 +232,7 @@ static int quad8_function_set(struct counter_device *counter, - mode_cfg = priv->count_mode[id] << 1; - idr_cfg = priv->index_polarity[id] << 1; - -- if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) { -+ if (function == COUNTER_FUNCTION_PULSE_DIRECTION) { - *quadrature_mode = 0; - - /* Quadrature scaling only available in quadrature mode */ -@@ -263,15 +248,15 @@ static int quad8_function_set(struct counter_device *counter, - *quadrature_mode = 1; - - switch (function) { -- case QUAD8_COUNT_FUNCTION_QUADRATURE_X1: -+ case COUNTER_FUNCTION_QUADRATURE_X1_A: - *scale = 0; - mode_cfg |= QUAD8_CMR_QUADRATURE_X1; - break; -- case QUAD8_COUNT_FUNCTION_QUADRATURE_X2: -+ case COUNTER_FUNCTION_QUADRATURE_X2_A: - *scale = 1; - mode_cfg |= QUAD8_CMR_QUADRATURE_X2; - break; -- case QUAD8_COUNT_FUNCTION_QUADRATURE_X4: -+ case COUNTER_FUNCTION_QUADRATURE_X4: - *scale = 2; - mode_cfg |= QUAD8_CMR_QUADRATURE_X4; - break; -@@ -290,8 +275,9 @@ static int quad8_function_set(struct counter_device *counter, - return 0; - } - --static void quad8_direction_get(struct counter_device *counter, -- struct counter_count *count, enum counter_count_direction *direction) -+static int quad8_direction_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_count_direction *direction) - { - const struct quad8 *const priv = counter->priv; - unsigned int ud_flag; -@@ -302,76 +288,74 @@ static void quad8_direction_get(struct counter_device *counter, - - *direction = (ud_flag) ? COUNTER_COUNT_DIRECTION_FORWARD : - COUNTER_COUNT_DIRECTION_BACKWARD; --} - --enum quad8_synapse_action { -- QUAD8_SYNAPSE_ACTION_NONE = 0, -- QUAD8_SYNAPSE_ACTION_RISING_EDGE, -- QUAD8_SYNAPSE_ACTION_FALLING_EDGE, -- QUAD8_SYNAPSE_ACTION_BOTH_EDGES --}; -+ return 0; -+} - - static const enum counter_synapse_action quad8_index_actions_list[] = { -- [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE, -- [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE -+ COUNTER_SYNAPSE_ACTION_NONE, -+ COUNTER_SYNAPSE_ACTION_RISING_EDGE, - }; - - static const enum counter_synapse_action quad8_synapse_actions_list[] = { -- [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE, -- [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE, -- [QUAD8_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE, -- [QUAD8_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES -+ COUNTER_SYNAPSE_ACTION_NONE, -+ COUNTER_SYNAPSE_ACTION_RISING_EDGE, -+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE, -+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES, - }; - --static int quad8_action_get(struct counter_device *counter, -- struct counter_count *count, struct counter_synapse *synapse, -- size_t *action) -+static int quad8_action_read(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action *action) - { - struct quad8 *const priv = counter->priv; - int err; -- size_t function = 0; -+ enum counter_function function; - const size_t signal_a_id = count->synapses[0].signal->id; - enum counter_count_direction direction; - - /* Handle Index signals */ - if (synapse->signal->id >= 16) { -- if (priv->preset_enable[count->id]) -- *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE; -+ if (!priv->preset_enable[count->id]) -+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; - else -- *action = QUAD8_SYNAPSE_ACTION_NONE; -+ *action = COUNTER_SYNAPSE_ACTION_NONE; - - return 0; - } - -- err = quad8_function_get(counter, count, &function); -+ err = quad8_function_read(counter, count, &function); - if (err) - return err; - - /* Default action mode */ -- *action = QUAD8_SYNAPSE_ACTION_NONE; -+ *action = COUNTER_SYNAPSE_ACTION_NONE; - - /* Determine action mode based on current count function mode */ - switch (function) { -- case QUAD8_COUNT_FUNCTION_PULSE_DIRECTION: -+ case COUNTER_FUNCTION_PULSE_DIRECTION: - if (synapse->signal->id == signal_a_id) -- *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE; -+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; - return 0; -- case QUAD8_COUNT_FUNCTION_QUADRATURE_X1: -+ case COUNTER_FUNCTION_QUADRATURE_X1_A: - if (synapse->signal->id == signal_a_id) { -- quad8_direction_get(counter, count, &direction); -+ err = quad8_direction_read(counter, count, &direction); -+ if (err) -+ return err; - - if (direction == COUNTER_COUNT_DIRECTION_FORWARD) -- *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE; -+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; - else -- *action = QUAD8_SYNAPSE_ACTION_FALLING_EDGE; -+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE; - } - return 0; -- case QUAD8_COUNT_FUNCTION_QUADRATURE_X2: -+ case COUNTER_FUNCTION_QUADRATURE_X2_A: - if (synapse->signal->id == signal_a_id) -- *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES; -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - return 0; -- case QUAD8_COUNT_FUNCTION_QUADRATURE_X4: -- *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES; -+ case COUNTER_FUNCTION_QUADRATURE_X4: -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - return 0; - default: - /* should never reach this path */ -@@ -383,9 +367,9 @@ static const struct counter_ops quad8_ops = { - .signal_read = quad8_signal_read, - .count_read = quad8_count_read, - .count_write = quad8_count_write, -- .function_get = quad8_function_get, -- .function_set = quad8_function_set, -- .action_get = quad8_action_get -+ .function_read = quad8_function_read, -+ .function_write = quad8_function_write, -+ .action_read = quad8_action_read - }; - - static const char *const quad8_index_polarity_modes[] = { -@@ -394,7 +378,8 @@ static const char *const quad8_index_polarity_modes[] = { - }; - - static int quad8_index_polarity_get(struct counter_device *counter, -- struct counter_signal *signal, size_t *index_polarity) -+ struct counter_signal *signal, -+ u32 *index_polarity) - { - const struct quad8 *const priv = counter->priv; - const size_t channel_id = signal->id - 16; -@@ -405,7 +390,8 @@ static int quad8_index_polarity_get(struct counter_device *counter, - } - - static int quad8_index_polarity_set(struct counter_device *counter, -- struct counter_signal *signal, size_t index_polarity) -+ struct counter_signal *signal, -+ u32 index_polarity) - { - struct quad8 *const priv = counter->priv; - const size_t channel_id = signal->id - 16; -@@ -426,20 +412,14 @@ static int quad8_index_polarity_set(struct counter_device *counter, - return 0; - } - --static struct counter_signal_enum_ext quad8_index_pol_enum = { -- .items = quad8_index_polarity_modes, -- .num_items = ARRAY_SIZE(quad8_index_polarity_modes), -- .get = quad8_index_polarity_get, -- .set = quad8_index_polarity_set --}; -- - static const char *const quad8_synchronous_modes[] = { - "non-synchronous", - "synchronous" - }; - - static int quad8_synchronous_mode_get(struct counter_device *counter, -- struct counter_signal *signal, size_t *synchronous_mode) -+ struct counter_signal *signal, -+ u32 *synchronous_mode) - { - const struct quad8 *const priv = counter->priv; - const size_t channel_id = signal->id - 16; -@@ -450,7 +430,8 @@ static int quad8_synchronous_mode_get(struct counter_device *counter, - } - - static int quad8_synchronous_mode_set(struct counter_device *counter, -- struct counter_signal *signal, size_t synchronous_mode) -+ struct counter_signal *signal, -+ u32 synchronous_mode) - { - struct quad8 *const priv = counter->priv; - const size_t channel_id = signal->id - 16; -@@ -477,22 +458,18 @@ static int quad8_synchronous_mode_set(struct counter_device *counter, - return 0; - } - --static struct counter_signal_enum_ext quad8_syn_mode_enum = { -- .items = quad8_synchronous_modes, -- .num_items = ARRAY_SIZE(quad8_synchronous_modes), -- .get = quad8_synchronous_mode_get, -- .set = quad8_synchronous_mode_set --}; -- --static ssize_t quad8_count_floor_read(struct counter_device *counter, -- struct counter_count *count, void *private, char *buf) -+static int quad8_count_floor_read(struct counter_device *counter, -+ struct counter_count *count, u64 *floor) - { - /* Only a floor of 0 is supported */ -- return sprintf(buf, "0\n"); -+ *floor = 0; -+ -+ return 0; - } - --static int quad8_count_mode_get(struct counter_device *counter, -- struct counter_count *count, size_t *cnt_mode) -+static int quad8_count_mode_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_count_mode *cnt_mode) - { - const struct quad8 *const priv = counter->priv; - -@@ -515,26 +492,28 @@ static int quad8_count_mode_get(struct counter_device *counter, - return 0; - } - --static int quad8_count_mode_set(struct counter_device *counter, -- struct counter_count *count, size_t cnt_mode) -+static int quad8_count_mode_write(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_count_mode cnt_mode) - { - struct quad8 *const priv = counter->priv; -+ unsigned int count_mode; - unsigned int mode_cfg; - const int base_offset = priv->base + 2 * count->id + 1; - - /* Map Generic Counter count mode to 104-QUAD-8 count mode */ - switch (cnt_mode) { - case COUNTER_COUNT_MODE_NORMAL: -- cnt_mode = 0; -+ count_mode = 0; - break; - case COUNTER_COUNT_MODE_RANGE_LIMIT: -- cnt_mode = 1; -+ count_mode = 1; - break; - case COUNTER_COUNT_MODE_NON_RECYCLE: -- cnt_mode = 2; -+ count_mode = 2; - break; - case COUNTER_COUNT_MODE_MODULO_N: -- cnt_mode = 3; -+ count_mode = 3; - break; - default: - /* should never reach this path */ -@@ -543,10 +522,10 @@ static int quad8_count_mode_set(struct counter_device *counter, - - mutex_lock(&priv->lock); - -- priv->count_mode[count->id] = cnt_mode; -+ priv->count_mode[count->id] = count_mode; - - /* Set count mode configuration value */ -- mode_cfg = cnt_mode << 1; -+ mode_cfg = count_mode << 1; - - /* Add quadrature mode configuration */ - if (priv->quadrature_mode[count->id]) -@@ -560,56 +539,35 @@ static int quad8_count_mode_set(struct counter_device *counter, - return 0; - } - --static struct counter_count_enum_ext quad8_cnt_mode_enum = { -- .items = counter_count_mode_str, -- .num_items = ARRAY_SIZE(counter_count_mode_str), -- .get = quad8_count_mode_get, -- .set = quad8_count_mode_set --}; -- --static ssize_t quad8_count_direction_read(struct counter_device *counter, -- struct counter_count *count, void *priv, char *buf) --{ -- enum counter_count_direction dir; -- -- quad8_direction_get(counter, count, &dir); -- -- return sprintf(buf, "%s\n", counter_count_direction_str[dir]); --} -- --static ssize_t quad8_count_enable_read(struct counter_device *counter, -- struct counter_count *count, void *private, char *buf) -+static int quad8_count_enable_read(struct counter_device *counter, -+ struct counter_count *count, u8 *enable) - { - const struct quad8 *const priv = counter->priv; - -- return sprintf(buf, "%u\n", priv->ab_enable[count->id]); -+ *enable = priv->ab_enable[count->id]; -+ -+ return 0; - } - --static ssize_t quad8_count_enable_write(struct counter_device *counter, -- struct counter_count *count, void *private, const char *buf, size_t len) -+static int quad8_count_enable_write(struct counter_device *counter, -+ struct counter_count *count, u8 enable) - { - struct quad8 *const priv = counter->priv; - const int base_offset = priv->base + 2 * count->id; -- int err; -- bool ab_enable; - unsigned int ior_cfg; - -- err = kstrtobool(buf, &ab_enable); -- if (err) -- return err; -- - mutex_lock(&priv->lock); - -- priv->ab_enable[count->id] = ab_enable; -+ priv->ab_enable[count->id] = enable; - -- ior_cfg = ab_enable | priv->preset_enable[count->id] << 1; -+ ior_cfg = enable | priv->preset_enable[count->id] << 1; - - /* Load I/O control configuration */ - outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); - - mutex_unlock(&priv->lock); - -- return len; -+ return 0; - } - - static const char *const quad8_noise_error_states[] = { -@@ -618,7 +576,7 @@ static const char *const quad8_noise_error_states[] = { - }; - - static int quad8_error_noise_get(struct counter_device *counter, -- struct counter_count *count, size_t *noise_error) -+ struct counter_count *count, u32 *noise_error) - { - const struct quad8 *const priv = counter->priv; - const int base_offset = priv->base + 2 * count->id + 1; -@@ -628,18 +586,14 @@ static int quad8_error_noise_get(struct counter_device *counter, - return 0; - } - --static struct counter_count_enum_ext quad8_error_noise_enum = { -- .items = quad8_noise_error_states, -- .num_items = ARRAY_SIZE(quad8_noise_error_states), -- .get = quad8_error_noise_get --}; -- --static ssize_t quad8_count_preset_read(struct counter_device *counter, -- struct counter_count *count, void *private, char *buf) -+static int quad8_count_preset_read(struct counter_device *counter, -+ struct counter_count *count, u64 *preset) - { - const struct quad8 *const priv = counter->priv; - -- return sprintf(buf, "%u\n", priv->preset[count->id]); -+ *preset = priv->preset[count->id]; -+ -+ return 0; - } - - static void quad8_preset_register_set(struct quad8 *const priv, const int id, -@@ -658,19 +612,12 @@ static void quad8_preset_register_set(struct quad8 *const priv, const int id, - outb(preset >> (8 * i), base_offset); - } - --static ssize_t quad8_count_preset_write(struct counter_device *counter, -- struct counter_count *count, void *private, const char *buf, size_t len) -+static int quad8_count_preset_write(struct counter_device *counter, -+ struct counter_count *count, u64 preset) - { - struct quad8 *const priv = counter->priv; -- unsigned int preset; -- int ret; - -- ret = kstrtouint(buf, 0, &preset); -- if (ret) -- return ret; -- -- /* Only 24-bit values are supported */ -- if (preset > 0xFFFFFF) -+ if (preset > LS7267_CNTR_MAX) - return -ERANGE; - - mutex_lock(&priv->lock); -@@ -679,11 +626,11 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter, - - mutex_unlock(&priv->lock); - -- return len; -+ return 0; - } - --static ssize_t quad8_count_ceiling_read(struct counter_device *counter, -- struct counter_count *count, void *private, char *buf) -+static int quad8_count_ceiling_read(struct counter_device *counter, -+ struct counter_count *count, u64 *ceiling) - { - struct quad8 *const priv = counter->priv; - -@@ -693,29 +640,24 @@ static ssize_t quad8_count_ceiling_read(struct counter_device *counter, - switch (priv->count_mode[count->id]) { - case 1: - case 3: -- mutex_unlock(&priv->lock); -- return sprintf(buf, "%u\n", priv->preset[count->id]); -+ *ceiling = priv->preset[count->id]; -+ break; -+ default: -+ *ceiling = LS7267_CNTR_MAX; -+ break; - } - - mutex_unlock(&priv->lock); - -- /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */ -- return sprintf(buf, "33554431\n"); -+ return 0; - } - --static ssize_t quad8_count_ceiling_write(struct counter_device *counter, -- struct counter_count *count, void *private, const char *buf, size_t len) -+static int quad8_count_ceiling_write(struct counter_device *counter, -+ struct counter_count *count, u64 ceiling) - { - struct quad8 *const priv = counter->priv; -- unsigned int ceiling; -- int ret; -- -- ret = kstrtouint(buf, 0, &ceiling); -- if (ret) -- return ret; - -- /* Only 24-bit values are supported */ -- if (ceiling > 0xFFFFFF) -+ if (ceiling > LS7267_CNTR_MAX) - return -ERANGE; - - mutex_lock(&priv->lock); -@@ -726,7 +668,7 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter, - case 3: - quad8_preset_register_set(priv, count->id, ceiling); - mutex_unlock(&priv->lock); -- return len; -+ return 0; - } - - mutex_unlock(&priv->lock); -@@ -734,27 +676,25 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter, - return -EINVAL; - } - --static ssize_t quad8_count_preset_enable_read(struct counter_device *counter, -- struct counter_count *count, void *private, char *buf) -+static int quad8_count_preset_enable_read(struct counter_device *counter, -+ struct counter_count *count, -+ u8 *preset_enable) - { - const struct quad8 *const priv = counter->priv; - -- return sprintf(buf, "%u\n", !priv->preset_enable[count->id]); -+ *preset_enable = !priv->preset_enable[count->id]; -+ -+ return 0; - } - --static ssize_t quad8_count_preset_enable_write(struct counter_device *counter, -- struct counter_count *count, void *private, const char *buf, size_t len) -+static int quad8_count_preset_enable_write(struct counter_device *counter, -+ struct counter_count *count, -+ u8 preset_enable) - { - struct quad8 *const priv = counter->priv; - const int base_offset = priv->base + 2 * count->id + 1; -- bool preset_enable; -- int ret; - unsigned int ior_cfg; - -- ret = kstrtobool(buf, &preset_enable); -- if (ret) -- return ret; -- - /* Preset enable is active low in Input/Output Control register */ - preset_enable = !preset_enable; - -@@ -762,25 +702,24 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter, - - priv->preset_enable[count->id] = preset_enable; - -- ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1; -+ ior_cfg = priv->ab_enable[count->id] | preset_enable << 1; - - /* Load I/O control configuration to Input / Output Control Register */ - outb(QUAD8_CTR_IOR | ior_cfg, base_offset); - - mutex_unlock(&priv->lock); - -- return len; -+ return 0; - } - --static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter, -- struct counter_signal *signal, -- void *private, char *buf) -+static int quad8_signal_cable_fault_read(struct counter_device *counter, -+ struct counter_signal *signal, -+ u8 *cable_fault) - { - struct quad8 *const priv = counter->priv; - const size_t channel_id = signal->id / 2; - bool disabled; - unsigned int status; -- unsigned int fault; - - mutex_lock(&priv->lock); - -@@ -797,36 +736,31 @@ static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter, - mutex_unlock(&priv->lock); - - /* Mask respective channel and invert logic */ -- fault = !(status & BIT(channel_id)); -+ *cable_fault = !(status & BIT(channel_id)); - -- return sprintf(buf, "%u\n", fault); -+ return 0; - } - --static ssize_t quad8_signal_cable_fault_enable_read( -- struct counter_device *counter, struct counter_signal *signal, -- void *private, char *buf) -+static int quad8_signal_cable_fault_enable_read(struct counter_device *counter, -+ struct counter_signal *signal, -+ u8 *enable) - { - const struct quad8 *const priv = counter->priv; - const size_t channel_id = signal->id / 2; -- const unsigned int enb = !!(priv->cable_fault_enable & BIT(channel_id)); - -- return sprintf(buf, "%u\n", enb); -+ *enable = !!(priv->cable_fault_enable & BIT(channel_id)); -+ -+ return 0; - } - --static ssize_t quad8_signal_cable_fault_enable_write( -- struct counter_device *counter, struct counter_signal *signal, -- void *private, const char *buf, size_t len) -+static int quad8_signal_cable_fault_enable_write(struct counter_device *counter, -+ struct counter_signal *signal, -+ u8 enable) - { - struct quad8 *const priv = counter->priv; - const size_t channel_id = signal->id / 2; -- bool enable; -- int ret; - unsigned int cable_fault_enable; - -- ret = kstrtobool(buf, &enable); -- if (ret) -- return ret; -- - mutex_lock(&priv->lock); - - if (enable) -@@ -841,31 +775,27 @@ static ssize_t quad8_signal_cable_fault_enable_write( - - mutex_unlock(&priv->lock); - -- return len; -+ return 0; - } - --static ssize_t quad8_signal_fck_prescaler_read(struct counter_device *counter, -- struct counter_signal *signal, void *private, char *buf) -+static int quad8_signal_fck_prescaler_read(struct counter_device *counter, -+ struct counter_signal *signal, -+ u8 *prescaler) - { - const struct quad8 *const priv = counter->priv; -- const size_t channel_id = signal->id / 2; - -- return sprintf(buf, "%u\n", priv->fck_prescaler[channel_id]); -+ *prescaler = priv->fck_prescaler[signal->id / 2]; -+ -+ return 0; - } - --static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter, -- struct counter_signal *signal, void *private, const char *buf, -- size_t len) -+static int quad8_signal_fck_prescaler_write(struct counter_device *counter, -+ struct counter_signal *signal, -+ u8 prescaler) - { - struct quad8 *const priv = counter->priv; - const size_t channel_id = signal->id / 2; - const int base_offset = priv->base + 2 * channel_id; -- u8 prescaler; -- int ret; -- -- ret = kstrtou8(buf, 0, &prescaler); -- if (ret) -- return ret; - - mutex_lock(&priv->lock); - -@@ -881,31 +811,30 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter, - - mutex_unlock(&priv->lock); - -- return len; -+ return 0; - } - --static const struct counter_signal_ext quad8_signal_ext[] = { -- { -- .name = "cable_fault", -- .read = quad8_signal_cable_fault_read -- }, -- { -- .name = "cable_fault_enable", -- .read = quad8_signal_cable_fault_enable_read, -- .write = quad8_signal_cable_fault_enable_write -- }, -- { -- .name = "filter_clock_prescaler", -- .read = quad8_signal_fck_prescaler_read, -- .write = quad8_signal_fck_prescaler_write -- } -+static struct counter_comp quad8_signal_ext[] = { -+ COUNTER_COMP_SIGNAL_BOOL("cable_fault", quad8_signal_cable_fault_read, -+ NULL), -+ COUNTER_COMP_SIGNAL_BOOL("cable_fault_enable", -+ quad8_signal_cable_fault_enable_read, -+ quad8_signal_cable_fault_enable_write), -+ COUNTER_COMP_SIGNAL_U8("filter_clock_prescaler", -+ quad8_signal_fck_prescaler_read, -+ quad8_signal_fck_prescaler_write) - }; - --static const struct counter_signal_ext quad8_index_ext[] = { -- COUNTER_SIGNAL_ENUM("index_polarity", &quad8_index_pol_enum), -- COUNTER_SIGNAL_ENUM_AVAILABLE("index_polarity", &quad8_index_pol_enum), -- COUNTER_SIGNAL_ENUM("synchronous_mode", &quad8_syn_mode_enum), -- COUNTER_SIGNAL_ENUM_AVAILABLE("synchronous_mode", &quad8_syn_mode_enum) -+static DEFINE_COUNTER_ENUM(quad8_index_pol_enum, quad8_index_polarity_modes); -+static DEFINE_COUNTER_ENUM(quad8_synch_mode_enum, quad8_synchronous_modes); -+ -+static struct counter_comp quad8_index_ext[] = { -+ COUNTER_COMP_SIGNAL_ENUM("index_polarity", quad8_index_polarity_get, -+ quad8_index_polarity_set, -+ quad8_index_pol_enum), -+ COUNTER_COMP_SIGNAL_ENUM("synchronous_mode", quad8_synchronous_mode_get, -+ quad8_synchronous_mode_set, -+ quad8_synch_mode_enum), - }; - - #define QUAD8_QUAD_SIGNAL(_id, _name) { \ -@@ -974,39 +903,30 @@ static struct counter_synapse quad8_count_synapses[][3] = { - QUAD8_COUNT_SYNAPSES(6), QUAD8_COUNT_SYNAPSES(7) - }; - --static const struct counter_count_ext quad8_count_ext[] = { -- { -- .name = "ceiling", -- .read = quad8_count_ceiling_read, -- .write = quad8_count_ceiling_write -- }, -- { -- .name = "floor", -- .read = quad8_count_floor_read -- }, -- COUNTER_COUNT_ENUM("count_mode", &quad8_cnt_mode_enum), -- COUNTER_COUNT_ENUM_AVAILABLE("count_mode", &quad8_cnt_mode_enum), -- { -- .name = "direction", -- .read = quad8_count_direction_read -- }, -- { -- .name = "enable", -- .read = quad8_count_enable_read, -- .write = quad8_count_enable_write -- }, -- COUNTER_COUNT_ENUM("error_noise", &quad8_error_noise_enum), -- COUNTER_COUNT_ENUM_AVAILABLE("error_noise", &quad8_error_noise_enum), -- { -- .name = "preset", -- .read = quad8_count_preset_read, -- .write = quad8_count_preset_write -- }, -- { -- .name = "preset_enable", -- .read = quad8_count_preset_enable_read, -- .write = quad8_count_preset_enable_write -- } -+static const enum counter_count_mode quad8_cnt_modes[] = { -+ COUNTER_COUNT_MODE_NORMAL, -+ COUNTER_COUNT_MODE_RANGE_LIMIT, -+ COUNTER_COUNT_MODE_NON_RECYCLE, -+ COUNTER_COUNT_MODE_MODULO_N, -+}; -+ -+static DEFINE_COUNTER_AVAILABLE(quad8_count_mode_available, quad8_cnt_modes); -+ -+static DEFINE_COUNTER_ENUM(quad8_error_noise_enum, quad8_noise_error_states); -+ -+static struct counter_comp quad8_count_ext[] = { -+ COUNTER_COMP_CEILING(quad8_count_ceiling_read, -+ quad8_count_ceiling_write), -+ COUNTER_COMP_FLOOR(quad8_count_floor_read, NULL), -+ COUNTER_COMP_COUNT_MODE(quad8_count_mode_read, quad8_count_mode_write, -+ quad8_count_mode_available), -+ COUNTER_COMP_DIRECTION(quad8_direction_read), -+ COUNTER_COMP_ENABLE(quad8_count_enable_read, quad8_count_enable_write), -+ COUNTER_COMP_COUNT_ENUM("error_noise", quad8_error_noise_get, NULL, -+ quad8_error_noise_enum), -+ COUNTER_COMP_PRESET(quad8_count_preset_read, quad8_count_preset_write), -+ COUNTER_COMP_PRESET_ENABLE(quad8_count_preset_enable_read, -+ quad8_count_preset_enable_write), - }; - - #define QUAD8_COUNT(_id, _cntname) { \ -diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile -index 19742e6f5e3eb..1ab7e087fdc26 100644 ---- a/drivers/counter/Makefile -+++ b/drivers/counter/Makefile -@@ -4,6 +4,7 @@ - # - - obj-$(CONFIG_COUNTER) += counter.o -+counter-y := counter-core.o counter-sysfs.o - - obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o - obj-$(CONFIG_INTERRUPT_CNT) += interrupt-cnt.o -diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c -new file mode 100644 -index 0000000000000..c533a6ff12cf7 ---- /dev/null -+++ b/drivers/counter/counter-core.c -@@ -0,0 +1,142 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Generic Counter interface -+ * Copyright (C) 2020 William Breathitt Gray -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "counter-sysfs.h" -+ -+/* Provides a unique ID for each counter device */ -+static DEFINE_IDA(counter_ida); -+ -+static void counter_device_release(struct device *dev) -+{ -+ ida_free(&counter_ida, dev->id); -+} -+ -+static struct device_type counter_device_type = { -+ .name = "counter_device", -+ .release = counter_device_release, -+}; -+ -+static struct bus_type counter_bus_type = { -+ .name = "counter", -+ .dev_name = "counter", -+}; -+ -+/** -+ * counter_register - register Counter to the system -+ * @counter: pointer to Counter to register -+ * -+ * This function registers a Counter to the system. A sysfs "counter" directory -+ * will be created and populated with sysfs attributes correlating with the -+ * Counter Signals, Synapses, and Counts respectively. -+ */ -+int counter_register(struct counter_device *const counter) -+{ -+ struct device *const dev = &counter->dev; -+ int id; -+ int err; -+ -+ /* Acquire unique ID */ -+ id = ida_alloc(&counter_ida, GFP_KERNEL); -+ if (id < 0) -+ return id; -+ -+ /* Configure device structure for Counter */ -+ dev->id = id; -+ dev->type = &counter_device_type; -+ dev->bus = &counter_bus_type; -+ if (counter->parent) { -+ dev->parent = counter->parent; -+ dev->of_node = counter->parent->of_node; -+ } -+ device_initialize(dev); -+ dev_set_drvdata(dev, counter); -+ -+ /* Add Counter sysfs attributes */ -+ err = counter_sysfs_add(counter); -+ if (err < 0) -+ goto err_free_id; -+ -+ /* Add device to system */ -+ err = device_add(dev); -+ if (err < 0) -+ goto err_free_id; -+ -+ return 0; -+ -+err_free_id: -+ put_device(dev); -+ return err; -+} -+EXPORT_SYMBOL_GPL(counter_register); -+ -+/** -+ * counter_unregister - unregister Counter from the system -+ * @counter: pointer to Counter to unregister -+ * -+ * The Counter is unregistered from the system. -+ */ -+void counter_unregister(struct counter_device *const counter) -+{ -+ if (!counter) -+ return; -+ -+ device_unregister(&counter->dev); -+} -+EXPORT_SYMBOL_GPL(counter_unregister); -+ -+static void devm_counter_release(void *counter) -+{ -+ counter_unregister(counter); -+} -+ -+/** -+ * devm_counter_register - Resource-managed counter_register -+ * @dev: device to allocate counter_device for -+ * @counter: pointer to Counter to register -+ * -+ * Managed counter_register. The Counter registered with this function is -+ * automatically unregistered on driver detach. This function calls -+ * counter_register internally. Refer to that function for more information. -+ * -+ * RETURNS: -+ * 0 on success, negative error number on failure. -+ */ -+int devm_counter_register(struct device *dev, -+ struct counter_device *const counter) -+{ -+ int err; -+ -+ err = counter_register(counter); -+ if (err < 0) -+ return err; -+ -+ return devm_add_action_or_reset(dev, devm_counter_release, counter); -+} -+EXPORT_SYMBOL_GPL(devm_counter_register); -+ -+static int __init counter_init(void) -+{ -+ return bus_register(&counter_bus_type); -+} -+ -+static void __exit counter_exit(void) -+{ -+ bus_unregister(&counter_bus_type); -+} -+ -+subsys_initcall(counter_init); -+module_exit(counter_exit); -+ -+MODULE_AUTHOR("William Breathitt Gray "); -+MODULE_DESCRIPTION("Generic Counter interface"); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/counter/counter-sysfs.c b/drivers/counter/counter-sysfs.c -new file mode 100644 -index 0000000000000..108cbd838eb92 ---- /dev/null -+++ b/drivers/counter/counter-sysfs.c -@@ -0,0 +1,849 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Generic Counter sysfs interface -+ * Copyright (C) 2020 William Breathitt Gray -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "counter-sysfs.h" -+ -+/** -+ * struct counter_attribute - Counter sysfs attribute -+ * @dev_attr: device attribute for sysfs -+ * @l: node to add Counter attribute to attribute group list -+ * @comp: Counter component callbacks and data -+ * @scope: Counter scope of the attribute -+ * @parent: pointer to the parent component -+ */ -+struct counter_attribute { -+ struct device_attribute dev_attr; -+ struct list_head l; -+ -+ struct counter_comp comp; -+ enum counter_scope scope; -+ void *parent; -+}; -+ -+#define to_counter_attribute(_dev_attr) \ -+ container_of(_dev_attr, struct counter_attribute, dev_attr) -+ -+/** -+ * struct counter_attribute_group - container for attribute group -+ * @name: name of the attribute group -+ * @attr_list: list to keep track of created attributes -+ * @num_attr: number of attributes -+ */ -+struct counter_attribute_group { -+ const char *name; -+ struct list_head attr_list; -+ size_t num_attr; -+}; -+ -+static const char *const counter_function_str[] = { -+ [COUNTER_FUNCTION_INCREASE] = "increase", -+ [COUNTER_FUNCTION_DECREASE] = "decrease", -+ [COUNTER_FUNCTION_PULSE_DIRECTION] = "pulse-direction", -+ [COUNTER_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a", -+ [COUNTER_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b", -+ [COUNTER_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a", -+ [COUNTER_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b", -+ [COUNTER_FUNCTION_QUADRATURE_X4] = "quadrature x4" -+}; -+ -+static const char *const counter_signal_value_str[] = { -+ [COUNTER_SIGNAL_LEVEL_LOW] = "low", -+ [COUNTER_SIGNAL_LEVEL_HIGH] = "high" -+}; -+ -+static const char *const counter_synapse_action_str[] = { -+ [COUNTER_SYNAPSE_ACTION_NONE] = "none", -+ [COUNTER_SYNAPSE_ACTION_RISING_EDGE] = "rising edge", -+ [COUNTER_SYNAPSE_ACTION_FALLING_EDGE] = "falling edge", -+ [COUNTER_SYNAPSE_ACTION_BOTH_EDGES] = "both edges" -+}; -+ -+static const char *const counter_count_direction_str[] = { -+ [COUNTER_COUNT_DIRECTION_FORWARD] = "forward", -+ [COUNTER_COUNT_DIRECTION_BACKWARD] = "backward" -+}; -+ -+static const char *const counter_count_mode_str[] = { -+ [COUNTER_COUNT_MODE_NORMAL] = "normal", -+ [COUNTER_COUNT_MODE_RANGE_LIMIT] = "range limit", -+ [COUNTER_COUNT_MODE_NON_RECYCLE] = "non-recycle", -+ [COUNTER_COUNT_MODE_MODULO_N] = "modulo-n" -+}; -+ -+static ssize_t counter_comp_u8_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ const struct counter_attribute *const a = to_counter_attribute(attr); -+ struct counter_device *const counter = dev_get_drvdata(dev); -+ int err; -+ u8 data = 0; -+ -+ switch (a->scope) { -+ case COUNTER_SCOPE_DEVICE: -+ err = a->comp.device_u8_read(counter, &data); -+ break; -+ case COUNTER_SCOPE_SIGNAL: -+ err = a->comp.signal_u8_read(counter, a->parent, &data); -+ break; -+ case COUNTER_SCOPE_COUNT: -+ err = a->comp.count_u8_read(counter, a->parent, &data); -+ break; -+ default: -+ return -EINVAL; -+ } -+ if (err < 0) -+ return err; -+ -+ if (a->comp.type == COUNTER_COMP_BOOL) -+ /* data should already be boolean but ensure just to be safe */ -+ data = !!data; -+ -+ return sprintf(buf, "%u\n", (unsigned int)data); -+} -+ -+static ssize_t counter_comp_u8_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ const struct counter_attribute *const a = to_counter_attribute(attr); -+ struct counter_device *const counter = dev_get_drvdata(dev); -+ int err; -+ bool bool_data = 0; -+ u8 data = 0; -+ -+ if (a->comp.type == COUNTER_COMP_BOOL) { -+ err = kstrtobool(buf, &bool_data); -+ data = bool_data; -+ } else -+ err = kstrtou8(buf, 0, &data); -+ if (err < 0) -+ return err; -+ -+ switch (a->scope) { -+ case COUNTER_SCOPE_DEVICE: -+ err = a->comp.device_u8_write(counter, data); -+ break; -+ case COUNTER_SCOPE_SIGNAL: -+ err = a->comp.signal_u8_write(counter, a->parent, data); -+ break; -+ case COUNTER_SCOPE_COUNT: -+ err = a->comp.count_u8_write(counter, a->parent, data); -+ break; -+ default: -+ return -EINVAL; -+ } -+ if (err < 0) -+ return err; -+ -+ return len; -+} -+ -+static ssize_t counter_comp_u32_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ const struct counter_attribute *const a = to_counter_attribute(attr); -+ struct counter_device *const counter = dev_get_drvdata(dev); -+ const struct counter_available *const avail = a->comp.priv; -+ int err; -+ u32 data = 0; -+ -+ switch (a->scope) { -+ case COUNTER_SCOPE_DEVICE: -+ err = a->comp.device_u32_read(counter, &data); -+ break; -+ case COUNTER_SCOPE_SIGNAL: -+ err = a->comp.signal_u32_read(counter, a->parent, &data); -+ break; -+ case COUNTER_SCOPE_COUNT: -+ if (a->comp.type == COUNTER_COMP_SYNAPSE_ACTION) -+ err = a->comp.action_read(counter, a->parent, -+ a->comp.priv, &data); -+ else -+ err = a->comp.count_u32_read(counter, a->parent, &data); -+ break; -+ default: -+ return -EINVAL; -+ } -+ if (err < 0) -+ return err; -+ -+ switch (a->comp.type) { -+ case COUNTER_COMP_FUNCTION: -+ return sysfs_emit(buf, "%s\n", counter_function_str[data]); -+ case COUNTER_COMP_SIGNAL_LEVEL: -+ return sysfs_emit(buf, "%s\n", counter_signal_value_str[data]); -+ case COUNTER_COMP_SYNAPSE_ACTION: -+ return sysfs_emit(buf, "%s\n", counter_synapse_action_str[data]); -+ case COUNTER_COMP_ENUM: -+ return sysfs_emit(buf, "%s\n", avail->strs[data]); -+ case COUNTER_COMP_COUNT_DIRECTION: -+ return sysfs_emit(buf, "%s\n", counter_count_direction_str[data]); -+ case COUNTER_COMP_COUNT_MODE: -+ return sysfs_emit(buf, "%s\n", counter_count_mode_str[data]); -+ default: -+ return sprintf(buf, "%u\n", (unsigned int)data); -+ } -+} -+ -+static int counter_find_enum(u32 *const enum_item, const u32 *const enums, -+ const size_t num_enums, const char *const buf, -+ const char *const string_array[]) -+{ -+ size_t index; -+ -+ for (index = 0; index < num_enums; index++) { -+ *enum_item = enums[index]; -+ if (sysfs_streq(buf, string_array[*enum_item])) -+ return 0; -+ } -+ -+ return -EINVAL; -+} -+ -+static ssize_t counter_comp_u32_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ const struct counter_attribute *const a = to_counter_attribute(attr); -+ struct counter_device *const counter = dev_get_drvdata(dev); -+ struct counter_count *const count = a->parent; -+ struct counter_synapse *const synapse = a->comp.priv; -+ const struct counter_available *const avail = a->comp.priv; -+ int err; -+ u32 data = 0; -+ -+ switch (a->comp.type) { -+ case COUNTER_COMP_FUNCTION: -+ err = counter_find_enum(&data, count->functions_list, -+ count->num_functions, buf, -+ counter_function_str); -+ break; -+ case COUNTER_COMP_SYNAPSE_ACTION: -+ err = counter_find_enum(&data, synapse->actions_list, -+ synapse->num_actions, buf, -+ counter_synapse_action_str); -+ break; -+ case COUNTER_COMP_ENUM: -+ err = __sysfs_match_string(avail->strs, avail->num_items, buf); -+ data = err; -+ break; -+ case COUNTER_COMP_COUNT_MODE: -+ err = counter_find_enum(&data, avail->enums, avail->num_items, -+ buf, counter_count_mode_str); -+ break; -+ default: -+ err = kstrtou32(buf, 0, &data); -+ break; -+ } -+ if (err < 0) -+ return err; -+ -+ switch (a->scope) { -+ case COUNTER_SCOPE_DEVICE: -+ err = a->comp.device_u32_write(counter, data); -+ break; -+ case COUNTER_SCOPE_SIGNAL: -+ err = a->comp.signal_u32_write(counter, a->parent, data); -+ break; -+ case COUNTER_SCOPE_COUNT: -+ if (a->comp.type == COUNTER_COMP_SYNAPSE_ACTION) -+ err = a->comp.action_write(counter, count, synapse, -+ data); -+ else -+ err = a->comp.count_u32_write(counter, count, data); -+ break; -+ default: -+ return -EINVAL; -+ } -+ if (err < 0) -+ return err; -+ -+ return len; -+} -+ -+static ssize_t counter_comp_u64_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ const struct counter_attribute *const a = to_counter_attribute(attr); -+ struct counter_device *const counter = dev_get_drvdata(dev); -+ int err; -+ u64 data = 0; -+ -+ switch (a->scope) { -+ case COUNTER_SCOPE_DEVICE: -+ err = a->comp.device_u64_read(counter, &data); -+ break; -+ case COUNTER_SCOPE_SIGNAL: -+ err = a->comp.signal_u64_read(counter, a->parent, &data); -+ break; -+ case COUNTER_SCOPE_COUNT: -+ err = a->comp.count_u64_read(counter, a->parent, &data); -+ break; -+ default: -+ return -EINVAL; -+ } -+ if (err < 0) -+ return err; -+ -+ return sprintf(buf, "%llu\n", (unsigned long long)data); -+} -+ -+static ssize_t counter_comp_u64_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ const struct counter_attribute *const a = to_counter_attribute(attr); -+ struct counter_device *const counter = dev_get_drvdata(dev); -+ int err; -+ u64 data = 0; -+ -+ err = kstrtou64(buf, 0, &data); -+ if (err < 0) -+ return err; -+ -+ switch (a->scope) { -+ case COUNTER_SCOPE_DEVICE: -+ err = a->comp.device_u64_write(counter, data); -+ break; -+ case COUNTER_SCOPE_SIGNAL: -+ err = a->comp.signal_u64_write(counter, a->parent, data); -+ break; -+ case COUNTER_SCOPE_COUNT: -+ err = a->comp.count_u64_write(counter, a->parent, data); -+ break; -+ default: -+ return -EINVAL; -+ } -+ if (err < 0) -+ return err; -+ -+ return len; -+} -+ -+static ssize_t enums_available_show(const u32 *const enums, -+ const size_t num_enums, -+ const char *const strs[], char *buf) -+{ -+ size_t len = 0; -+ size_t index; -+ -+ for (index = 0; index < num_enums; index++) -+ len += sysfs_emit_at(buf, len, "%s\n", strs[enums[index]]); -+ -+ return len; -+} -+ -+static ssize_t strs_available_show(const struct counter_available *const avail, -+ char *buf) -+{ -+ size_t len = 0; -+ size_t index; -+ -+ for (index = 0; index < avail->num_items; index++) -+ len += sysfs_emit_at(buf, len, "%s\n", avail->strs[index]); -+ -+ return len; -+} -+ -+static ssize_t counter_comp_available_show(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ const struct counter_attribute *const a = to_counter_attribute(attr); -+ const struct counter_count *const count = a->parent; -+ const struct counter_synapse *const synapse = a->comp.priv; -+ const struct counter_available *const avail = a->comp.priv; -+ -+ switch (a->comp.type) { -+ case COUNTER_COMP_FUNCTION: -+ return enums_available_show(count->functions_list, -+ count->num_functions, -+ counter_function_str, buf); -+ case COUNTER_COMP_SYNAPSE_ACTION: -+ return enums_available_show(synapse->actions_list, -+ synapse->num_actions, -+ counter_synapse_action_str, buf); -+ case COUNTER_COMP_ENUM: -+ return strs_available_show(avail, buf); -+ case COUNTER_COMP_COUNT_MODE: -+ return enums_available_show(avail->enums, avail->num_items, -+ counter_count_mode_str, buf); -+ default: -+ return -EINVAL; -+ } -+} -+ -+static int counter_avail_attr_create(struct device *const dev, -+ struct counter_attribute_group *const group, -+ const struct counter_comp *const comp, void *const parent) -+{ -+ struct counter_attribute *counter_attr; -+ struct device_attribute *dev_attr; -+ -+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL); -+ if (!counter_attr) -+ return -ENOMEM; -+ -+ /* Configure Counter attribute */ -+ counter_attr->comp.type = comp->type; -+ counter_attr->comp.priv = comp->priv; -+ counter_attr->parent = parent; -+ -+ /* Initialize sysfs attribute */ -+ dev_attr = &counter_attr->dev_attr; -+ sysfs_attr_init(&dev_attr->attr); -+ -+ /* Configure device attribute */ -+ dev_attr->attr.name = devm_kasprintf(dev, GFP_KERNEL, "%s_available", -+ comp->name); -+ if (!dev_attr->attr.name) -+ return -ENOMEM; -+ dev_attr->attr.mode = 0444; -+ dev_attr->show = counter_comp_available_show; -+ -+ /* Store list node */ -+ list_add(&counter_attr->l, &group->attr_list); -+ group->num_attr++; -+ -+ return 0; -+} -+ -+static int counter_attr_create(struct device *const dev, -+ struct counter_attribute_group *const group, -+ const struct counter_comp *const comp, -+ const enum counter_scope scope, -+ void *const parent) -+{ -+ struct counter_attribute *counter_attr; -+ struct device_attribute *dev_attr; -+ -+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL); -+ if (!counter_attr) -+ return -ENOMEM; -+ -+ /* Configure Counter attribute */ -+ counter_attr->comp = *comp; -+ counter_attr->scope = scope; -+ counter_attr->parent = parent; -+ -+ /* Configure device attribute */ -+ dev_attr = &counter_attr->dev_attr; -+ sysfs_attr_init(&dev_attr->attr); -+ dev_attr->attr.name = comp->name; -+ switch (comp->type) { -+ case COUNTER_COMP_U8: -+ case COUNTER_COMP_BOOL: -+ if (comp->device_u8_read) { -+ dev_attr->attr.mode |= 0444; -+ dev_attr->show = counter_comp_u8_show; -+ } -+ if (comp->device_u8_write) { -+ dev_attr->attr.mode |= 0200; -+ dev_attr->store = counter_comp_u8_store; -+ } -+ break; -+ case COUNTER_COMP_SIGNAL_LEVEL: -+ case COUNTER_COMP_FUNCTION: -+ case COUNTER_COMP_SYNAPSE_ACTION: -+ case COUNTER_COMP_ENUM: -+ case COUNTER_COMP_COUNT_DIRECTION: -+ case COUNTER_COMP_COUNT_MODE: -+ if (comp->device_u32_read) { -+ dev_attr->attr.mode |= 0444; -+ dev_attr->show = counter_comp_u32_show; -+ } -+ if (comp->device_u32_write) { -+ dev_attr->attr.mode |= 0200; -+ dev_attr->store = counter_comp_u32_store; -+ } -+ break; -+ case COUNTER_COMP_U64: -+ if (comp->device_u64_read) { -+ dev_attr->attr.mode |= 0444; -+ dev_attr->show = counter_comp_u64_show; -+ } -+ if (comp->device_u64_write) { -+ dev_attr->attr.mode |= 0200; -+ dev_attr->store = counter_comp_u64_store; -+ } -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ /* Store list node */ -+ list_add(&counter_attr->l, &group->attr_list); -+ group->num_attr++; -+ -+ /* Create "*_available" attribute if needed */ -+ switch (comp->type) { -+ case COUNTER_COMP_FUNCTION: -+ case COUNTER_COMP_SYNAPSE_ACTION: -+ case COUNTER_COMP_ENUM: -+ case COUNTER_COMP_COUNT_MODE: -+ return counter_avail_attr_create(dev, group, comp, parent); -+ default: -+ return 0; -+ } -+} -+ -+static ssize_t counter_comp_name_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ return sysfs_emit(buf, "%s\n", to_counter_attribute(attr)->comp.name); -+} -+ -+static int counter_name_attr_create(struct device *const dev, -+ struct counter_attribute_group *const group, -+ const char *const name) -+{ -+ struct counter_attribute *counter_attr; -+ -+ counter_attr = devm_kzalloc(dev, sizeof(*counter_attr), GFP_KERNEL); -+ if (!counter_attr) -+ return -ENOMEM; -+ -+ /* Configure Counter attribute */ -+ counter_attr->comp.name = name; -+ -+ /* Configure device attribute */ -+ sysfs_attr_init(&counter_attr->dev_attr.attr); -+ counter_attr->dev_attr.attr.name = "name"; -+ counter_attr->dev_attr.attr.mode = 0444; -+ counter_attr->dev_attr.show = counter_comp_name_show; -+ -+ /* Store list node */ -+ list_add(&counter_attr->l, &group->attr_list); -+ group->num_attr++; -+ -+ return 0; -+} -+ -+static struct counter_comp counter_signal_comp = { -+ .type = COUNTER_COMP_SIGNAL_LEVEL, -+ .name = "signal", -+}; -+ -+static int counter_signal_attrs_create(struct counter_device *const counter, -+ struct counter_attribute_group *const cattr_group, -+ struct counter_signal *const signal) -+{ -+ const enum counter_scope scope = COUNTER_SCOPE_SIGNAL; -+ struct device *const dev = &counter->dev; -+ int err; -+ struct counter_comp comp; -+ size_t i; -+ -+ /* Create main Signal attribute */ -+ comp = counter_signal_comp; -+ comp.signal_u32_read = counter->ops->signal_read; -+ err = counter_attr_create(dev, cattr_group, &comp, scope, signal); -+ if (err < 0) -+ return err; -+ -+ /* Create Signal name attribute */ -+ err = counter_name_attr_create(dev, cattr_group, signal->name); -+ if (err < 0) -+ return err; -+ -+ /* Create an attribute for each extension */ -+ for (i = 0; i < signal->num_ext; i++) { -+ err = counter_attr_create(dev, cattr_group, signal->ext + i, -+ scope, signal); -+ if (err < 0) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int counter_sysfs_signals_add(struct counter_device *const counter, -+ struct counter_attribute_group *const groups) -+{ -+ size_t i; -+ int err; -+ -+ /* Add each Signal */ -+ for (i = 0; i < counter->num_signals; i++) { -+ /* Generate Signal attribute directory name */ -+ groups[i].name = devm_kasprintf(&counter->dev, GFP_KERNEL, -+ "signal%zu", i); -+ if (!groups[i].name) -+ return -ENOMEM; -+ -+ /* Create all attributes associated with Signal */ -+ err = counter_signal_attrs_create(counter, groups + i, -+ counter->signals + i); -+ if (err < 0) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int counter_sysfs_synapses_add(struct counter_device *const counter, -+ struct counter_attribute_group *const group, -+ struct counter_count *const count) -+{ -+ size_t i; -+ -+ /* Add each Synapse */ -+ for (i = 0; i < count->num_synapses; i++) { -+ struct device *const dev = &counter->dev; -+ struct counter_synapse *synapse; -+ size_t id; -+ struct counter_comp comp; -+ int err; -+ -+ synapse = count->synapses + i; -+ -+ /* Generate Synapse action name */ -+ id = synapse->signal - counter->signals; -+ comp.name = devm_kasprintf(dev, GFP_KERNEL, "signal%zu_action", -+ id); -+ if (!comp.name) -+ return -ENOMEM; -+ -+ /* Create action attribute */ -+ comp.type = COUNTER_COMP_SYNAPSE_ACTION; -+ comp.action_read = counter->ops->action_read; -+ comp.action_write = counter->ops->action_write; -+ comp.priv = synapse; -+ err = counter_attr_create(dev, group, &comp, -+ COUNTER_SCOPE_COUNT, count); -+ if (err < 0) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static struct counter_comp counter_count_comp = -+ COUNTER_COMP_COUNT_U64("count", NULL, NULL); -+ -+static struct counter_comp counter_function_comp = { -+ .type = COUNTER_COMP_FUNCTION, -+ .name = "function", -+}; -+ -+static int counter_count_attrs_create(struct counter_device *const counter, -+ struct counter_attribute_group *const cattr_group, -+ struct counter_count *const count) -+{ -+ const enum counter_scope scope = COUNTER_SCOPE_COUNT; -+ struct device *const dev = &counter->dev; -+ int err; -+ struct counter_comp comp; -+ size_t i; -+ -+ /* Create main Count attribute */ -+ comp = counter_count_comp; -+ comp.count_u64_read = counter->ops->count_read; -+ comp.count_u64_write = counter->ops->count_write; -+ err = counter_attr_create(dev, cattr_group, &comp, scope, count); -+ if (err < 0) -+ return err; -+ -+ /* Create Count name attribute */ -+ err = counter_name_attr_create(dev, cattr_group, count->name); -+ if (err < 0) -+ return err; -+ -+ /* Create Count function attribute */ -+ comp = counter_function_comp; -+ comp.count_u32_read = counter->ops->function_read; -+ comp.count_u32_write = counter->ops->function_write; -+ err = counter_attr_create(dev, cattr_group, &comp, scope, count); -+ if (err < 0) -+ return err; -+ -+ /* Create an attribute for each extension */ -+ for (i = 0; i < count->num_ext; i++) { -+ err = counter_attr_create(dev, cattr_group, count->ext + i, -+ scope, count); -+ if (err < 0) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int counter_sysfs_counts_add(struct counter_device *const counter, -+ struct counter_attribute_group *const groups) -+{ -+ size_t i; -+ struct counter_count *count; -+ int err; -+ -+ /* Add each Count */ -+ for (i = 0; i < counter->num_counts; i++) { -+ count = counter->counts + i; -+ -+ /* Generate Count attribute directory name */ -+ groups[i].name = devm_kasprintf(&counter->dev, GFP_KERNEL, -+ "count%zu", i); -+ if (!groups[i].name) -+ return -ENOMEM; -+ -+ /* Add sysfs attributes of the Synapses */ -+ err = counter_sysfs_synapses_add(counter, groups + i, count); -+ if (err < 0) -+ return err; -+ -+ /* Create all attributes associated with Count */ -+ err = counter_count_attrs_create(counter, groups + i, count); -+ if (err < 0) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int counter_num_signals_read(struct counter_device *counter, u8 *val) -+{ -+ *val = counter->num_signals; -+ return 0; -+} -+ -+static int counter_num_counts_read(struct counter_device *counter, u8 *val) -+{ -+ *val = counter->num_counts; -+ return 0; -+} -+ -+static struct counter_comp counter_num_signals_comp = -+ COUNTER_COMP_DEVICE_U8("num_signals", counter_num_signals_read, NULL); -+ -+static struct counter_comp counter_num_counts_comp = -+ COUNTER_COMP_DEVICE_U8("num_counts", counter_num_counts_read, NULL); -+ -+static int counter_sysfs_attr_add(struct counter_device *const counter, -+ struct counter_attribute_group *cattr_group) -+{ -+ const enum counter_scope scope = COUNTER_SCOPE_DEVICE; -+ struct device *const dev = &counter->dev; -+ int err; -+ size_t i; -+ -+ /* Add Signals sysfs attributes */ -+ err = counter_sysfs_signals_add(counter, cattr_group); -+ if (err < 0) -+ return err; -+ cattr_group += counter->num_signals; -+ -+ /* Add Counts sysfs attributes */ -+ err = counter_sysfs_counts_add(counter, cattr_group); -+ if (err < 0) -+ return err; -+ cattr_group += counter->num_counts; -+ -+ /* Create name attribute */ -+ err = counter_name_attr_create(dev, cattr_group, counter->name); -+ if (err < 0) -+ return err; -+ -+ /* Create num_signals attribute */ -+ err = counter_attr_create(dev, cattr_group, &counter_num_signals_comp, -+ scope, NULL); -+ if (err < 0) -+ return err; -+ -+ /* Create num_counts attribute */ -+ err = counter_attr_create(dev, cattr_group, &counter_num_counts_comp, -+ scope, NULL); -+ if (err < 0) -+ return err; -+ -+ /* Create an attribute for each extension */ -+ for (i = 0; i < counter->num_ext; i++) { -+ err = counter_attr_create(dev, cattr_group, counter->ext + i, -+ scope, NULL); -+ if (err < 0) -+ return err; -+ } -+ -+ return 0; -+} -+ -+/** -+ * counter_sysfs_add - Adds Counter sysfs attributes to the device structure -+ * @counter: Pointer to the Counter device structure -+ * -+ * Counter sysfs attributes are created and added to the respective device -+ * structure for later registration to the system. Resource-managed memory -+ * allocation is performed by this function, and this memory should be freed -+ * when no longer needed (automatically by a device_unregister call, or -+ * manually by a devres_release_all call). -+ */ -+int counter_sysfs_add(struct counter_device *const counter) -+{ -+ struct device *const dev = &counter->dev; -+ const size_t num_groups = counter->num_signals + counter->num_counts + 1; -+ struct counter_attribute_group *cattr_groups; -+ size_t i, j; -+ int err; -+ struct attribute_group *groups; -+ struct counter_attribute *p; -+ -+ /* Allocate space for attribute groups (signals, counts, and ext) */ -+ cattr_groups = devm_kcalloc(dev, num_groups, sizeof(*cattr_groups), -+ GFP_KERNEL); -+ if (!cattr_groups) -+ return -ENOMEM; -+ -+ /* Initialize attribute lists */ -+ for (i = 0; i < num_groups; i++) -+ INIT_LIST_HEAD(&cattr_groups[i].attr_list); -+ -+ /* Add Counter device sysfs attributes */ -+ err = counter_sysfs_attr_add(counter, cattr_groups); -+ if (err < 0) -+ return err; -+ -+ /* Allocate attribute group pointers for association with device */ -+ dev->groups = devm_kcalloc(dev, num_groups + 1, sizeof(*dev->groups), -+ GFP_KERNEL); -+ if (!dev->groups) -+ return -ENOMEM; -+ -+ /* Allocate space for attribute groups */ -+ groups = devm_kcalloc(dev, num_groups, sizeof(*groups), GFP_KERNEL); -+ if (!groups) -+ return -ENOMEM; -+ -+ /* Prepare each group of attributes for association */ -+ for (i = 0; i < num_groups; i++) { -+ groups[i].name = cattr_groups[i].name; -+ -+ /* Allocate space for attribute pointers */ -+ groups[i].attrs = devm_kcalloc(dev, -+ cattr_groups[i].num_attr + 1, -+ sizeof(*groups[i].attrs), -+ GFP_KERNEL); -+ if (!groups[i].attrs) -+ return -ENOMEM; -+ -+ /* Add attribute pointers to attribute group */ -+ j = 0; -+ list_for_each_entry(p, &cattr_groups[i].attr_list, l) -+ groups[i].attrs[j++] = &p->dev_attr.attr; -+ -+ /* Associate attribute group */ -+ dev->groups[i] = &groups[i]; -+ } -+ -+ return 0; -+} -diff --git a/drivers/counter/counter-sysfs.h b/drivers/counter/counter-sysfs.h -new file mode 100644 -index 0000000000000..14fe566aca0e0 ---- /dev/null -+++ b/drivers/counter/counter-sysfs.h -@@ -0,0 +1,13 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Counter sysfs interface -+ * Copyright (C) 2020 William Breathitt Gray -+ */ -+#ifndef _COUNTER_SYSFS_H_ -+#define _COUNTER_SYSFS_H_ -+ -+#include -+ -+int counter_sysfs_add(struct counter_device *const counter); -+ -+#endif /* _COUNTER_SYSFS_H_ */ -diff --git a/drivers/counter/counter.c b/drivers/counter/counter.c -deleted file mode 100644 -index de921e8a3f721..0000000000000 ---- a/drivers/counter/counter.c -+++ /dev/null -@@ -1,1496 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 --/* -- * Generic Counter interface -- * Copyright (C) 2018 William Breathitt Gray -- */ --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --const char *const counter_count_direction_str[2] = { -- [COUNTER_COUNT_DIRECTION_FORWARD] = "forward", -- [COUNTER_COUNT_DIRECTION_BACKWARD] = "backward" --}; --EXPORT_SYMBOL_GPL(counter_count_direction_str); -- --const char *const counter_count_mode_str[4] = { -- [COUNTER_COUNT_MODE_NORMAL] = "normal", -- [COUNTER_COUNT_MODE_RANGE_LIMIT] = "range limit", -- [COUNTER_COUNT_MODE_NON_RECYCLE] = "non-recycle", -- [COUNTER_COUNT_MODE_MODULO_N] = "modulo-n" --}; --EXPORT_SYMBOL_GPL(counter_count_mode_str); -- --ssize_t counter_signal_enum_read(struct counter_device *counter, -- struct counter_signal *signal, void *priv, -- char *buf) --{ -- const struct counter_signal_enum_ext *const e = priv; -- int err; -- size_t index; -- -- if (!e->get) -- return -EINVAL; -- -- err = e->get(counter, signal, &index); -- if (err) -- return err; -- -- if (index >= e->num_items) -- return -EINVAL; -- -- return sprintf(buf, "%s\n", e->items[index]); --} --EXPORT_SYMBOL_GPL(counter_signal_enum_read); -- --ssize_t counter_signal_enum_write(struct counter_device *counter, -- struct counter_signal *signal, void *priv, -- const char *buf, size_t len) --{ -- const struct counter_signal_enum_ext *const e = priv; -- ssize_t index; -- int err; -- -- if (!e->set) -- return -EINVAL; -- -- index = __sysfs_match_string(e->items, e->num_items, buf); -- if (index < 0) -- return index; -- -- err = e->set(counter, signal, index); -- if (err) -- return err; -- -- return len; --} --EXPORT_SYMBOL_GPL(counter_signal_enum_write); -- --ssize_t counter_signal_enum_available_read(struct counter_device *counter, -- struct counter_signal *signal, -- void *priv, char *buf) --{ -- const struct counter_signal_enum_ext *const e = priv; -- size_t i; -- size_t len = 0; -- -- if (!e->num_items) -- return 0; -- -- for (i = 0; i < e->num_items; i++) -- len += sprintf(buf + len, "%s\n", e->items[i]); -- -- return len; --} --EXPORT_SYMBOL_GPL(counter_signal_enum_available_read); -- --ssize_t counter_count_enum_read(struct counter_device *counter, -- struct counter_count *count, void *priv, -- char *buf) --{ -- const struct counter_count_enum_ext *const e = priv; -- int err; -- size_t index; -- -- if (!e->get) -- return -EINVAL; -- -- err = e->get(counter, count, &index); -- if (err) -- return err; -- -- if (index >= e->num_items) -- return -EINVAL; -- -- return sprintf(buf, "%s\n", e->items[index]); --} --EXPORT_SYMBOL_GPL(counter_count_enum_read); -- --ssize_t counter_count_enum_write(struct counter_device *counter, -- struct counter_count *count, void *priv, -- const char *buf, size_t len) --{ -- const struct counter_count_enum_ext *const e = priv; -- ssize_t index; -- int err; -- -- if (!e->set) -- return -EINVAL; -- -- index = __sysfs_match_string(e->items, e->num_items, buf); -- if (index < 0) -- return index; -- -- err = e->set(counter, count, index); -- if (err) -- return err; -- -- return len; --} --EXPORT_SYMBOL_GPL(counter_count_enum_write); -- --ssize_t counter_count_enum_available_read(struct counter_device *counter, -- struct counter_count *count, -- void *priv, char *buf) --{ -- const struct counter_count_enum_ext *const e = priv; -- size_t i; -- size_t len = 0; -- -- if (!e->num_items) -- return 0; -- -- for (i = 0; i < e->num_items; i++) -- len += sprintf(buf + len, "%s\n", e->items[i]); -- -- return len; --} --EXPORT_SYMBOL_GPL(counter_count_enum_available_read); -- --ssize_t counter_device_enum_read(struct counter_device *counter, void *priv, -- char *buf) --{ -- const struct counter_device_enum_ext *const e = priv; -- int err; -- size_t index; -- -- if (!e->get) -- return -EINVAL; -- -- err = e->get(counter, &index); -- if (err) -- return err; -- -- if (index >= e->num_items) -- return -EINVAL; -- -- return sprintf(buf, "%s\n", e->items[index]); --} --EXPORT_SYMBOL_GPL(counter_device_enum_read); -- --ssize_t counter_device_enum_write(struct counter_device *counter, void *priv, -- const char *buf, size_t len) --{ -- const struct counter_device_enum_ext *const e = priv; -- ssize_t index; -- int err; -- -- if (!e->set) -- return -EINVAL; -- -- index = __sysfs_match_string(e->items, e->num_items, buf); -- if (index < 0) -- return index; -- -- err = e->set(counter, index); -- if (err) -- return err; -- -- return len; --} --EXPORT_SYMBOL_GPL(counter_device_enum_write); -- --ssize_t counter_device_enum_available_read(struct counter_device *counter, -- void *priv, char *buf) --{ -- const struct counter_device_enum_ext *const e = priv; -- size_t i; -- size_t len = 0; -- -- if (!e->num_items) -- return 0; -- -- for (i = 0; i < e->num_items; i++) -- len += sprintf(buf + len, "%s\n", e->items[i]); -- -- return len; --} --EXPORT_SYMBOL_GPL(counter_device_enum_available_read); -- --struct counter_attr_parm { -- struct counter_device_attr_group *group; -- const char *prefix; -- const char *name; -- ssize_t (*show)(struct device *dev, struct device_attribute *attr, -- char *buf); -- ssize_t (*store)(struct device *dev, struct device_attribute *attr, -- const char *buf, size_t len); -- void *component; --}; -- --struct counter_device_attr { -- struct device_attribute dev_attr; -- struct list_head l; -- void *component; --}; -- --static int counter_attribute_create(const struct counter_attr_parm *const parm) --{ -- struct counter_device_attr *counter_attr; -- struct device_attribute *dev_attr; -- int err; -- struct list_head *const attr_list = &parm->group->attr_list; -- -- /* Allocate a Counter device attribute */ -- counter_attr = kzalloc(sizeof(*counter_attr), GFP_KERNEL); -- if (!counter_attr) -- return -ENOMEM; -- dev_attr = &counter_attr->dev_attr; -- -- sysfs_attr_init(&dev_attr->attr); -- -- /* Configure device attribute */ -- dev_attr->attr.name = kasprintf(GFP_KERNEL, "%s%s", parm->prefix, -- parm->name); -- if (!dev_attr->attr.name) { -- err = -ENOMEM; -- goto err_free_counter_attr; -- } -- if (parm->show) { -- dev_attr->attr.mode |= 0444; -- dev_attr->show = parm->show; -- } -- if (parm->store) { -- dev_attr->attr.mode |= 0200; -- dev_attr->store = parm->store; -- } -- -- /* Store associated Counter component with attribute */ -- counter_attr->component = parm->component; -- -- /* Keep track of the attribute for later cleanup */ -- list_add(&counter_attr->l, attr_list); -- parm->group->num_attr++; -- -- return 0; -- --err_free_counter_attr: -- kfree(counter_attr); -- return err; --} -- --#define to_counter_attr(_dev_attr) \ -- container_of(_dev_attr, struct counter_device_attr, dev_attr) -- --struct counter_signal_unit { -- struct counter_signal *signal; --}; -- --static const char *const counter_signal_level_str[] = { -- [COUNTER_SIGNAL_LEVEL_LOW] = "low", -- [COUNTER_SIGNAL_LEVEL_HIGH] = "high" --}; -- --static ssize_t counter_signal_show(struct device *dev, -- struct device_attribute *attr, char *buf) --{ -- struct counter_device *const counter = dev_get_drvdata(dev); -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_signal_unit *const component = devattr->component; -- struct counter_signal *const signal = component->signal; -- int err; -- enum counter_signal_level level; -- -- err = counter->ops->signal_read(counter, signal, &level); -- if (err) -- return err; -- -- return sprintf(buf, "%s\n", counter_signal_level_str[level]); --} -- --struct counter_name_unit { -- const char *name; --}; -- --static ssize_t counter_device_attr_name_show(struct device *dev, -- struct device_attribute *attr, -- char *buf) --{ -- const struct counter_name_unit *const comp = to_counter_attr(attr)->component; -- -- return sprintf(buf, "%s\n", comp->name); --} -- --static int counter_name_attribute_create( -- struct counter_device_attr_group *const group, -- const char *const name) --{ -- struct counter_name_unit *name_comp; -- struct counter_attr_parm parm; -- int err; -- -- /* Skip if no name */ -- if (!name) -- return 0; -- -- /* Allocate name attribute component */ -- name_comp = kmalloc(sizeof(*name_comp), GFP_KERNEL); -- if (!name_comp) -- return -ENOMEM; -- name_comp->name = name; -- -- /* Allocate Signal name attribute */ -- parm.group = group; -- parm.prefix = ""; -- parm.name = "name"; -- parm.show = counter_device_attr_name_show; -- parm.store = NULL; -- parm.component = name_comp; -- err = counter_attribute_create(&parm); -- if (err) -- goto err_free_name_comp; -- -- return 0; -- --err_free_name_comp: -- kfree(name_comp); -- return err; --} -- --struct counter_signal_ext_unit { -- struct counter_signal *signal; -- const struct counter_signal_ext *ext; --}; -- --static ssize_t counter_signal_ext_show(struct device *dev, -- struct device_attribute *attr, char *buf) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_signal_ext_unit *const comp = devattr->component; -- const struct counter_signal_ext *const ext = comp->ext; -- -- return ext->read(dev_get_drvdata(dev), comp->signal, ext->priv, buf); --} -- --static ssize_t counter_signal_ext_store(struct device *dev, -- struct device_attribute *attr, -- const char *buf, size_t len) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_signal_ext_unit *const comp = devattr->component; -- const struct counter_signal_ext *const ext = comp->ext; -- -- return ext->write(dev_get_drvdata(dev), comp->signal, ext->priv, buf, -- len); --} -- --static void counter_device_attr_list_free(struct list_head *attr_list) --{ -- struct counter_device_attr *p, *n; -- -- list_for_each_entry_safe(p, n, attr_list, l) { -- /* free attribute name and associated component memory */ -- kfree(p->dev_attr.attr.name); -- kfree(p->component); -- list_del(&p->l); -- kfree(p); -- } --} -- --static int counter_signal_ext_register( -- struct counter_device_attr_group *const group, -- struct counter_signal *const signal) --{ -- const size_t num_ext = signal->num_ext; -- size_t i; -- const struct counter_signal_ext *ext; -- struct counter_signal_ext_unit *signal_ext_comp; -- struct counter_attr_parm parm; -- int err; -- -- /* Create an attribute for each extension */ -- for (i = 0 ; i < num_ext; i++) { -- ext = signal->ext + i; -- -- /* Allocate signal_ext attribute component */ -- signal_ext_comp = kmalloc(sizeof(*signal_ext_comp), GFP_KERNEL); -- if (!signal_ext_comp) { -- err = -ENOMEM; -- goto err_free_attr_list; -- } -- signal_ext_comp->signal = signal; -- signal_ext_comp->ext = ext; -- -- /* Allocate a Counter device attribute */ -- parm.group = group; -- parm.prefix = ""; -- parm.name = ext->name; -- parm.show = (ext->read) ? counter_signal_ext_show : NULL; -- parm.store = (ext->write) ? counter_signal_ext_store : NULL; -- parm.component = signal_ext_comp; -- err = counter_attribute_create(&parm); -- if (err) { -- kfree(signal_ext_comp); -- goto err_free_attr_list; -- } -- } -- -- return 0; -- --err_free_attr_list: -- counter_device_attr_list_free(&group->attr_list); -- return err; --} -- --static int counter_signal_attributes_create( -- struct counter_device_attr_group *const group, -- const struct counter_device *const counter, -- struct counter_signal *const signal) --{ -- struct counter_signal_unit *signal_comp; -- struct counter_attr_parm parm; -- int err; -- -- /* Allocate Signal attribute component */ -- signal_comp = kmalloc(sizeof(*signal_comp), GFP_KERNEL); -- if (!signal_comp) -- return -ENOMEM; -- signal_comp->signal = signal; -- -- /* Create main Signal attribute */ -- parm.group = group; -- parm.prefix = ""; -- parm.name = "signal"; -- parm.show = (counter->ops->signal_read) ? counter_signal_show : NULL; -- parm.store = NULL; -- parm.component = signal_comp; -- err = counter_attribute_create(&parm); -- if (err) { -- kfree(signal_comp); -- return err; -- } -- -- /* Create Signal name attribute */ -- err = counter_name_attribute_create(group, signal->name); -- if (err) -- goto err_free_attr_list; -- -- /* Register Signal extension attributes */ -- err = counter_signal_ext_register(group, signal); -- if (err) -- goto err_free_attr_list; -- -- return 0; -- --err_free_attr_list: -- counter_device_attr_list_free(&group->attr_list); -- return err; --} -- --static int counter_signals_register( -- struct counter_device_attr_group *const groups_list, -- const struct counter_device *const counter) --{ -- const size_t num_signals = counter->num_signals; -- size_t i; -- struct counter_signal *signal; -- const char *name; -- int err; -- -- /* Register each Signal */ -- for (i = 0; i < num_signals; i++) { -- signal = counter->signals + i; -- -- /* Generate Signal attribute directory name */ -- name = kasprintf(GFP_KERNEL, "signal%d", signal->id); -- if (!name) { -- err = -ENOMEM; -- goto err_free_attr_groups; -- } -- groups_list[i].attr_group.name = name; -- -- /* Create all attributes associated with Signal */ -- err = counter_signal_attributes_create(groups_list + i, counter, -- signal); -- if (err) -- goto err_free_attr_groups; -- } -- -- return 0; -- --err_free_attr_groups: -- do { -- kfree(groups_list[i].attr_group.name); -- counter_device_attr_list_free(&groups_list[i].attr_list); -- } while (i--); -- return err; --} -- --static const char *const counter_synapse_action_str[] = { -- [COUNTER_SYNAPSE_ACTION_NONE] = "none", -- [COUNTER_SYNAPSE_ACTION_RISING_EDGE] = "rising edge", -- [COUNTER_SYNAPSE_ACTION_FALLING_EDGE] = "falling edge", -- [COUNTER_SYNAPSE_ACTION_BOTH_EDGES] = "both edges" --}; -- --struct counter_action_unit { -- struct counter_synapse *synapse; -- struct counter_count *count; --}; -- --static ssize_t counter_action_show(struct device *dev, -- struct device_attribute *attr, char *buf) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- int err; -- struct counter_device *const counter = dev_get_drvdata(dev); -- const struct counter_action_unit *const component = devattr->component; -- struct counter_count *const count = component->count; -- struct counter_synapse *const synapse = component->synapse; -- size_t action_index; -- enum counter_synapse_action action; -- -- err = counter->ops->action_get(counter, count, synapse, &action_index); -- if (err) -- return err; -- -- synapse->action = action_index; -- -- action = synapse->actions_list[action_index]; -- return sprintf(buf, "%s\n", counter_synapse_action_str[action]); --} -- --static ssize_t counter_action_store(struct device *dev, -- struct device_attribute *attr, -- const char *buf, size_t len) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_action_unit *const component = devattr->component; -- struct counter_synapse *const synapse = component->synapse; -- size_t action_index; -- const size_t num_actions = synapse->num_actions; -- enum counter_synapse_action action; -- int err; -- struct counter_device *const counter = dev_get_drvdata(dev); -- struct counter_count *const count = component->count; -- -- /* Find requested action mode */ -- for (action_index = 0; action_index < num_actions; action_index++) { -- action = synapse->actions_list[action_index]; -- if (sysfs_streq(buf, counter_synapse_action_str[action])) -- break; -- } -- /* If requested action mode not found */ -- if (action_index >= num_actions) -- return -EINVAL; -- -- err = counter->ops->action_set(counter, count, synapse, action_index); -- if (err) -- return err; -- -- synapse->action = action_index; -- -- return len; --} -- --struct counter_action_avail_unit { -- const enum counter_synapse_action *actions_list; -- size_t num_actions; --}; -- --static ssize_t counter_synapse_action_available_show(struct device *dev, -- struct device_attribute *attr, char *buf) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_action_avail_unit *const component = devattr->component; -- size_t i; -- enum counter_synapse_action action; -- ssize_t len = 0; -- -- for (i = 0; i < component->num_actions; i++) { -- action = component->actions_list[i]; -- len += sprintf(buf + len, "%s\n", -- counter_synapse_action_str[action]); -- } -- -- return len; --} -- --static int counter_synapses_register( -- struct counter_device_attr_group *const group, -- const struct counter_device *const counter, -- struct counter_count *const count, const char *const count_attr_name) --{ -- size_t i; -- struct counter_synapse *synapse; -- const char *prefix; -- struct counter_action_unit *action_comp; -- struct counter_attr_parm parm; -- int err; -- struct counter_action_avail_unit *avail_comp; -- -- /* Register each Synapse */ -- for (i = 0; i < count->num_synapses; i++) { -- synapse = count->synapses + i; -- -- /* Generate attribute prefix */ -- prefix = kasprintf(GFP_KERNEL, "signal%d_", -- synapse->signal->id); -- if (!prefix) { -- err = -ENOMEM; -- goto err_free_attr_list; -- } -- -- /* Allocate action attribute component */ -- action_comp = kmalloc(sizeof(*action_comp), GFP_KERNEL); -- if (!action_comp) { -- err = -ENOMEM; -- goto err_free_prefix; -- } -- action_comp->synapse = synapse; -- action_comp->count = count; -- -- /* Create action attribute */ -- parm.group = group; -- parm.prefix = prefix; -- parm.name = "action"; -- parm.show = (counter->ops->action_get) ? counter_action_show : NULL; -- parm.store = (counter->ops->action_set) ? counter_action_store : NULL; -- parm.component = action_comp; -- err = counter_attribute_create(&parm); -- if (err) { -- kfree(action_comp); -- goto err_free_prefix; -- } -- -- /* Allocate action available attribute component */ -- avail_comp = kmalloc(sizeof(*avail_comp), GFP_KERNEL); -- if (!avail_comp) { -- err = -ENOMEM; -- goto err_free_prefix; -- } -- avail_comp->actions_list = synapse->actions_list; -- avail_comp->num_actions = synapse->num_actions; -- -- /* Create action_available attribute */ -- parm.group = group; -- parm.prefix = prefix; -- parm.name = "action_available"; -- parm.show = counter_synapse_action_available_show; -- parm.store = NULL; -- parm.component = avail_comp; -- err = counter_attribute_create(&parm); -- if (err) { -- kfree(avail_comp); -- goto err_free_prefix; -- } -- -- kfree(prefix); -- } -- -- return 0; -- --err_free_prefix: -- kfree(prefix); --err_free_attr_list: -- counter_device_attr_list_free(&group->attr_list); -- return err; --} -- --struct counter_count_unit { -- struct counter_count *count; --}; -- --static ssize_t counter_count_show(struct device *dev, -- struct device_attribute *attr, -- char *buf) --{ -- struct counter_device *const counter = dev_get_drvdata(dev); -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_count_unit *const component = devattr->component; -- struct counter_count *const count = component->count; -- int err; -- unsigned long val; -- -- err = counter->ops->count_read(counter, count, &val); -- if (err) -- return err; -- -- return sprintf(buf, "%lu\n", val); --} -- --static ssize_t counter_count_store(struct device *dev, -- struct device_attribute *attr, -- const char *buf, size_t len) --{ -- struct counter_device *const counter = dev_get_drvdata(dev); -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_count_unit *const component = devattr->component; -- struct counter_count *const count = component->count; -- int err; -- unsigned long val; -- -- err = kstrtoul(buf, 0, &val); -- if (err) -- return err; -- -- err = counter->ops->count_write(counter, count, val); -- if (err) -- return err; -- -- return len; --} -- --static const char *const counter_function_str[] = { -- [COUNTER_FUNCTION_INCREASE] = "increase", -- [COUNTER_FUNCTION_DECREASE] = "decrease", -- [COUNTER_FUNCTION_PULSE_DIRECTION] = "pulse-direction", -- [COUNTER_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a", -- [COUNTER_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b", -- [COUNTER_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a", -- [COUNTER_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b", -- [COUNTER_FUNCTION_QUADRATURE_X4] = "quadrature x4" --}; -- --static ssize_t counter_function_show(struct device *dev, -- struct device_attribute *attr, char *buf) --{ -- int err; -- struct counter_device *const counter = dev_get_drvdata(dev); -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_count_unit *const component = devattr->component; -- struct counter_count *const count = component->count; -- size_t func_index; -- enum counter_function function; -- -- err = counter->ops->function_get(counter, count, &func_index); -- if (err) -- return err; -- -- count->function = func_index; -- -- function = count->functions_list[func_index]; -- return sprintf(buf, "%s\n", counter_function_str[function]); --} -- --static ssize_t counter_function_store(struct device *dev, -- struct device_attribute *attr, -- const char *buf, size_t len) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_count_unit *const component = devattr->component; -- struct counter_count *const count = component->count; -- const size_t num_functions = count->num_functions; -- size_t func_index; -- enum counter_function function; -- int err; -- struct counter_device *const counter = dev_get_drvdata(dev); -- -- /* Find requested Count function mode */ -- for (func_index = 0; func_index < num_functions; func_index++) { -- function = count->functions_list[func_index]; -- if (sysfs_streq(buf, counter_function_str[function])) -- break; -- } -- /* Return error if requested Count function mode not found */ -- if (func_index >= num_functions) -- return -EINVAL; -- -- err = counter->ops->function_set(counter, count, func_index); -- if (err) -- return err; -- -- count->function = func_index; -- -- return len; --} -- --struct counter_count_ext_unit { -- struct counter_count *count; -- const struct counter_count_ext *ext; --}; -- --static ssize_t counter_count_ext_show(struct device *dev, -- struct device_attribute *attr, char *buf) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_count_ext_unit *const comp = devattr->component; -- const struct counter_count_ext *const ext = comp->ext; -- -- return ext->read(dev_get_drvdata(dev), comp->count, ext->priv, buf); --} -- --static ssize_t counter_count_ext_store(struct device *dev, -- struct device_attribute *attr, -- const char *buf, size_t len) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_count_ext_unit *const comp = devattr->component; -- const struct counter_count_ext *const ext = comp->ext; -- -- return ext->write(dev_get_drvdata(dev), comp->count, ext->priv, buf, -- len); --} -- --static int counter_count_ext_register( -- struct counter_device_attr_group *const group, -- struct counter_count *const count) --{ -- size_t i; -- const struct counter_count_ext *ext; -- struct counter_count_ext_unit *count_ext_comp; -- struct counter_attr_parm parm; -- int err; -- -- /* Create an attribute for each extension */ -- for (i = 0 ; i < count->num_ext; i++) { -- ext = count->ext + i; -- -- /* Allocate count_ext attribute component */ -- count_ext_comp = kmalloc(sizeof(*count_ext_comp), GFP_KERNEL); -- if (!count_ext_comp) { -- err = -ENOMEM; -- goto err_free_attr_list; -- } -- count_ext_comp->count = count; -- count_ext_comp->ext = ext; -- -- /* Allocate count_ext attribute */ -- parm.group = group; -- parm.prefix = ""; -- parm.name = ext->name; -- parm.show = (ext->read) ? counter_count_ext_show : NULL; -- parm.store = (ext->write) ? counter_count_ext_store : NULL; -- parm.component = count_ext_comp; -- err = counter_attribute_create(&parm); -- if (err) { -- kfree(count_ext_comp); -- goto err_free_attr_list; -- } -- } -- -- return 0; -- --err_free_attr_list: -- counter_device_attr_list_free(&group->attr_list); -- return err; --} -- --struct counter_func_avail_unit { -- const enum counter_function *functions_list; -- size_t num_functions; --}; -- --static ssize_t counter_function_available_show(struct device *dev, -- struct device_attribute *attr, char *buf) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_func_avail_unit *const component = devattr->component; -- const enum counter_function *const func_list = component->functions_list; -- const size_t num_functions = component->num_functions; -- size_t i; -- enum counter_function function; -- ssize_t len = 0; -- -- for (i = 0; i < num_functions; i++) { -- function = func_list[i]; -- len += sprintf(buf + len, "%s\n", -- counter_function_str[function]); -- } -- -- return len; --} -- --static int counter_count_attributes_create( -- struct counter_device_attr_group *const group, -- const struct counter_device *const counter, -- struct counter_count *const count) --{ -- struct counter_count_unit *count_comp; -- struct counter_attr_parm parm; -- int err; -- struct counter_count_unit *func_comp; -- struct counter_func_avail_unit *avail_comp; -- -- /* Allocate count attribute component */ -- count_comp = kmalloc(sizeof(*count_comp), GFP_KERNEL); -- if (!count_comp) -- return -ENOMEM; -- count_comp->count = count; -- -- /* Create main Count attribute */ -- parm.group = group; -- parm.prefix = ""; -- parm.name = "count"; -- parm.show = (counter->ops->count_read) ? counter_count_show : NULL; -- parm.store = (counter->ops->count_write) ? counter_count_store : NULL; -- parm.component = count_comp; -- err = counter_attribute_create(&parm); -- if (err) { -- kfree(count_comp); -- return err; -- } -- -- /* Allocate function attribute component */ -- func_comp = kmalloc(sizeof(*func_comp), GFP_KERNEL); -- if (!func_comp) { -- err = -ENOMEM; -- goto err_free_attr_list; -- } -- func_comp->count = count; -- -- /* Create Count function attribute */ -- parm.group = group; -- parm.prefix = ""; -- parm.name = "function"; -- parm.show = (counter->ops->function_get) ? counter_function_show : NULL; -- parm.store = (counter->ops->function_set) ? counter_function_store : NULL; -- parm.component = func_comp; -- err = counter_attribute_create(&parm); -- if (err) { -- kfree(func_comp); -- goto err_free_attr_list; -- } -- -- /* Allocate function available attribute component */ -- avail_comp = kmalloc(sizeof(*avail_comp), GFP_KERNEL); -- if (!avail_comp) { -- err = -ENOMEM; -- goto err_free_attr_list; -- } -- avail_comp->functions_list = count->functions_list; -- avail_comp->num_functions = count->num_functions; -- -- /* Create Count function_available attribute */ -- parm.group = group; -- parm.prefix = ""; -- parm.name = "function_available"; -- parm.show = counter_function_available_show; -- parm.store = NULL; -- parm.component = avail_comp; -- err = counter_attribute_create(&parm); -- if (err) { -- kfree(avail_comp); -- goto err_free_attr_list; -- } -- -- /* Create Count name attribute */ -- err = counter_name_attribute_create(group, count->name); -- if (err) -- goto err_free_attr_list; -- -- /* Register Count extension attributes */ -- err = counter_count_ext_register(group, count); -- if (err) -- goto err_free_attr_list; -- -- return 0; -- --err_free_attr_list: -- counter_device_attr_list_free(&group->attr_list); -- return err; --} -- --static int counter_counts_register( -- struct counter_device_attr_group *const groups_list, -- const struct counter_device *const counter) --{ -- size_t i; -- struct counter_count *count; -- const char *name; -- int err; -- -- /* Register each Count */ -- for (i = 0; i < counter->num_counts; i++) { -- count = counter->counts + i; -- -- /* Generate Count attribute directory name */ -- name = kasprintf(GFP_KERNEL, "count%d", count->id); -- if (!name) { -- err = -ENOMEM; -- goto err_free_attr_groups; -- } -- groups_list[i].attr_group.name = name; -- -- /* Register the Synapses associated with each Count */ -- err = counter_synapses_register(groups_list + i, counter, count, -- name); -- if (err) -- goto err_free_attr_groups; -- -- /* Create all attributes associated with Count */ -- err = counter_count_attributes_create(groups_list + i, counter, -- count); -- if (err) -- goto err_free_attr_groups; -- } -- -- return 0; -- --err_free_attr_groups: -- do { -- kfree(groups_list[i].attr_group.name); -- counter_device_attr_list_free(&groups_list[i].attr_list); -- } while (i--); -- return err; --} -- --struct counter_size_unit { -- size_t size; --}; -- --static ssize_t counter_device_attr_size_show(struct device *dev, -- struct device_attribute *attr, -- char *buf) --{ -- const struct counter_size_unit *const comp = to_counter_attr(attr)->component; -- -- return sprintf(buf, "%zu\n", comp->size); --} -- --static int counter_size_attribute_create( -- struct counter_device_attr_group *const group, -- const size_t size, const char *const name) --{ -- struct counter_size_unit *size_comp; -- struct counter_attr_parm parm; -- int err; -- -- /* Allocate size attribute component */ -- size_comp = kmalloc(sizeof(*size_comp), GFP_KERNEL); -- if (!size_comp) -- return -ENOMEM; -- size_comp->size = size; -- -- parm.group = group; -- parm.prefix = ""; -- parm.name = name; -- parm.show = counter_device_attr_size_show; -- parm.store = NULL; -- parm.component = size_comp; -- err = counter_attribute_create(&parm); -- if (err) -- goto err_free_size_comp; -- -- return 0; -- --err_free_size_comp: -- kfree(size_comp); -- return err; --} -- --struct counter_ext_unit { -- const struct counter_device_ext *ext; --}; -- --static ssize_t counter_device_ext_show(struct device *dev, -- struct device_attribute *attr, char *buf) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_ext_unit *const component = devattr->component; -- const struct counter_device_ext *const ext = component->ext; -- -- return ext->read(dev_get_drvdata(dev), ext->priv, buf); --} -- --static ssize_t counter_device_ext_store(struct device *dev, -- struct device_attribute *attr, -- const char *buf, size_t len) --{ -- const struct counter_device_attr *const devattr = to_counter_attr(attr); -- const struct counter_ext_unit *const component = devattr->component; -- const struct counter_device_ext *const ext = component->ext; -- -- return ext->write(dev_get_drvdata(dev), ext->priv, buf, len); --} -- --static int counter_device_ext_register( -- struct counter_device_attr_group *const group, -- struct counter_device *const counter) --{ -- size_t i; -- struct counter_ext_unit *ext_comp; -- struct counter_attr_parm parm; -- int err; -- -- /* Create an attribute for each extension */ -- for (i = 0 ; i < counter->num_ext; i++) { -- /* Allocate extension attribute component */ -- ext_comp = kmalloc(sizeof(*ext_comp), GFP_KERNEL); -- if (!ext_comp) { -- err = -ENOMEM; -- goto err_free_attr_list; -- } -- -- ext_comp->ext = counter->ext + i; -- -- /* Allocate extension attribute */ -- parm.group = group; -- parm.prefix = ""; -- parm.name = counter->ext[i].name; -- parm.show = (counter->ext[i].read) ? counter_device_ext_show : NULL; -- parm.store = (counter->ext[i].write) ? counter_device_ext_store : NULL; -- parm.component = ext_comp; -- err = counter_attribute_create(&parm); -- if (err) { -- kfree(ext_comp); -- goto err_free_attr_list; -- } -- } -- -- return 0; -- --err_free_attr_list: -- counter_device_attr_list_free(&group->attr_list); -- return err; --} -- --static int counter_global_attr_register( -- struct counter_device_attr_group *const group, -- struct counter_device *const counter) --{ -- int err; -- -- /* Create name attribute */ -- err = counter_name_attribute_create(group, counter->name); -- if (err) -- return err; -- -- /* Create num_counts attribute */ -- err = counter_size_attribute_create(group, counter->num_counts, -- "num_counts"); -- if (err) -- goto err_free_attr_list; -- -- /* Create num_signals attribute */ -- err = counter_size_attribute_create(group, counter->num_signals, -- "num_signals"); -- if (err) -- goto err_free_attr_list; -- -- /* Register Counter device extension attributes */ -- err = counter_device_ext_register(group, counter); -- if (err) -- goto err_free_attr_list; -- -- return 0; -- --err_free_attr_list: -- counter_device_attr_list_free(&group->attr_list); -- return err; --} -- --static void counter_device_groups_list_free( -- struct counter_device_attr_group *const groups_list, -- const size_t num_groups) --{ -- struct counter_device_attr_group *group; -- size_t i; -- -- /* loop through all attribute groups (signals, counts, global, etc.) */ -- for (i = 0; i < num_groups; i++) { -- group = groups_list + i; -- -- /* free all attribute group and associated attributes memory */ -- kfree(group->attr_group.name); -- kfree(group->attr_group.attrs); -- counter_device_attr_list_free(&group->attr_list); -- } -- -- kfree(groups_list); --} -- --static int counter_device_groups_list_prepare( -- struct counter_device *const counter) --{ -- const size_t total_num_groups = -- counter->num_signals + counter->num_counts + 1; -- struct counter_device_attr_group *groups_list; -- size_t i; -- int err; -- size_t num_groups = 0; -- -- /* Allocate space for attribute groups (signals, counts, and ext) */ -- groups_list = kcalloc(total_num_groups, sizeof(*groups_list), -- GFP_KERNEL); -- if (!groups_list) -- return -ENOMEM; -- -- /* Initialize attribute lists */ -- for (i = 0; i < total_num_groups; i++) -- INIT_LIST_HEAD(&groups_list[i].attr_list); -- -- /* Register Signals */ -- err = counter_signals_register(groups_list, counter); -- if (err) -- goto err_free_groups_list; -- num_groups += counter->num_signals; -- -- /* Register Counts and respective Synapses */ -- err = counter_counts_register(groups_list + num_groups, counter); -- if (err) -- goto err_free_groups_list; -- num_groups += counter->num_counts; -- -- /* Register Counter global attributes */ -- err = counter_global_attr_register(groups_list + num_groups, counter); -- if (err) -- goto err_free_groups_list; -- num_groups++; -- -- /* Store groups_list in device_state */ -- counter->device_state->groups_list = groups_list; -- counter->device_state->num_groups = num_groups; -- -- return 0; -- --err_free_groups_list: -- counter_device_groups_list_free(groups_list, num_groups); -- return err; --} -- --static int counter_device_groups_prepare( -- struct counter_device_state *const device_state) --{ -- size_t i, j; -- struct counter_device_attr_group *group; -- int err; -- struct counter_device_attr *p; -- -- /* Allocate attribute groups for association with device */ -- device_state->groups = kcalloc(device_state->num_groups + 1, -- sizeof(*device_state->groups), -- GFP_KERNEL); -- if (!device_state->groups) -- return -ENOMEM; -- -- /* Prepare each group of attributes for association */ -- for (i = 0; i < device_state->num_groups; i++) { -- group = device_state->groups_list + i; -- -- /* Allocate space for attribute pointers in attribute group */ -- group->attr_group.attrs = kcalloc(group->num_attr + 1, -- sizeof(*group->attr_group.attrs), GFP_KERNEL); -- if (!group->attr_group.attrs) { -- err = -ENOMEM; -- goto err_free_groups; -- } -- -- /* Add attribute pointers to attribute group */ -- j = 0; -- list_for_each_entry(p, &group->attr_list, l) -- group->attr_group.attrs[j++] = &p->dev_attr.attr; -- -- /* Group attributes in attribute group */ -- device_state->groups[i] = &group->attr_group; -- } -- /* Associate attributes with device */ -- device_state->dev.groups = device_state->groups; -- -- return 0; -- --err_free_groups: -- do { -- group = device_state->groups_list + i; -- kfree(group->attr_group.attrs); -- group->attr_group.attrs = NULL; -- } while (i--); -- kfree(device_state->groups); -- return err; --} -- --/* Provides a unique ID for each counter device */ --static DEFINE_IDA(counter_ida); -- --static void counter_device_release(struct device *dev) --{ -- struct counter_device *const counter = dev_get_drvdata(dev); -- struct counter_device_state *const device_state = counter->device_state; -- -- kfree(device_state->groups); -- counter_device_groups_list_free(device_state->groups_list, -- device_state->num_groups); -- ida_simple_remove(&counter_ida, device_state->id); -- kfree(device_state); --} -- --static struct device_type counter_device_type = { -- .name = "counter_device", -- .release = counter_device_release --}; -- --static struct bus_type counter_bus_type = { -- .name = "counter" --}; -- --/** -- * counter_register - register Counter to the system -- * @counter: pointer to Counter to register -- * -- * This function registers a Counter to the system. A sysfs "counter" directory -- * will be created and populated with sysfs attributes correlating with the -- * Counter Signals, Synapses, and Counts respectively. -- */ --int counter_register(struct counter_device *const counter) --{ -- struct counter_device_state *device_state; -- int err; -- -- /* Allocate internal state container for Counter device */ -- device_state = kzalloc(sizeof(*device_state), GFP_KERNEL); -- if (!device_state) -- return -ENOMEM; -- counter->device_state = device_state; -- -- /* Acquire unique ID */ -- device_state->id = ida_simple_get(&counter_ida, 0, 0, GFP_KERNEL); -- if (device_state->id < 0) { -- err = device_state->id; -- goto err_free_device_state; -- } -- -- /* Configure device structure for Counter */ -- device_state->dev.type = &counter_device_type; -- device_state->dev.bus = &counter_bus_type; -- if (counter->parent) { -- device_state->dev.parent = counter->parent; -- device_state->dev.of_node = counter->parent->of_node; -- } -- dev_set_name(&device_state->dev, "counter%d", device_state->id); -- device_initialize(&device_state->dev); -- dev_set_drvdata(&device_state->dev, counter); -- -- /* Prepare device attributes */ -- err = counter_device_groups_list_prepare(counter); -- if (err) -- goto err_free_id; -- -- /* Organize device attributes to groups and match to device */ -- err = counter_device_groups_prepare(device_state); -- if (err) -- goto err_free_groups_list; -- -- /* Add device to system */ -- err = device_add(&device_state->dev); -- if (err) -- goto err_free_groups; -- -- return 0; -- --err_free_groups: -- kfree(device_state->groups); --err_free_groups_list: -- counter_device_groups_list_free(device_state->groups_list, -- device_state->num_groups); --err_free_id: -- ida_simple_remove(&counter_ida, device_state->id); --err_free_device_state: -- kfree(device_state); -- return err; --} --EXPORT_SYMBOL_GPL(counter_register); -- --/** -- * counter_unregister - unregister Counter from the system -- * @counter: pointer to Counter to unregister -- * -- * The Counter is unregistered from the system; all allocated memory is freed. -- */ --void counter_unregister(struct counter_device *const counter) --{ -- if (counter) -- device_del(&counter->device_state->dev); --} --EXPORT_SYMBOL_GPL(counter_unregister); -- --static void devm_counter_unreg(struct device *dev, void *res) --{ -- counter_unregister(*(struct counter_device **)res); --} -- --/** -- * devm_counter_register - Resource-managed counter_register -- * @dev: device to allocate counter_device for -- * @counter: pointer to Counter to register -- * -- * Managed counter_register. The Counter registered with this function is -- * automatically unregistered on driver detach. This function calls -- * counter_register internally. Refer to that function for more information. -- * -- * If an Counter registered with this function needs to be unregistered -- * separately, devm_counter_unregister must be used. -- * -- * RETURNS: -- * 0 on success, negative error number on failure. -- */ --int devm_counter_register(struct device *dev, -- struct counter_device *const counter) --{ -- struct counter_device **ptr; -- int ret; -- -- ptr = devres_alloc(devm_counter_unreg, sizeof(*ptr), GFP_KERNEL); -- if (!ptr) -- return -ENOMEM; -- -- ret = counter_register(counter); -- if (!ret) { -- *ptr = counter; -- devres_add(dev, ptr); -- } else { -- devres_free(ptr); -- } -- -- return ret; --} --EXPORT_SYMBOL_GPL(devm_counter_register); -- --static int devm_counter_match(struct device *dev, void *res, void *data) --{ -- struct counter_device **r = res; -- -- if (!r || !*r) { -- WARN_ON(!r || !*r); -- return 0; -- } -- -- return *r == data; --} -- --/** -- * devm_counter_unregister - Resource-managed counter_unregister -- * @dev: device this counter_device belongs to -- * @counter: pointer to Counter associated with the device -- * -- * Unregister Counter registered with devm_counter_register. -- */ --void devm_counter_unregister(struct device *dev, -- struct counter_device *const counter) --{ -- int rc; -- -- rc = devres_release(dev, devm_counter_unreg, devm_counter_match, -- counter); -- WARN_ON(rc); --} --EXPORT_SYMBOL_GPL(devm_counter_unregister); -- --static int __init counter_init(void) --{ -- return bus_register(&counter_bus_type); --} -- --static void __exit counter_exit(void) --{ -- bus_unregister(&counter_bus_type); --} -- --subsys_initcall(counter_init); --module_exit(counter_exit); -- --MODULE_AUTHOR("William Breathitt Gray "); --MODULE_DESCRIPTION("Generic Counter interface"); --MODULE_LICENSE("GPL v2"); -diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c -index 53c15f84909b9..5ef0478709cd8 100644 ---- a/drivers/counter/ftm-quaddec.c -+++ b/drivers/counter/ftm-quaddec.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - - #define FTM_FIELD_UPDATE(ftm, offset, mask, val) \ - ({ \ -@@ -115,8 +116,7 @@ static void ftm_quaddec_disable(void *ftm) - } - - static int ftm_quaddec_get_prescaler(struct counter_device *counter, -- struct counter_count *count, -- size_t *cnt_mode) -+ struct counter_count *count, u32 *cnt_mode) - { - struct ftm_quaddec *ftm = counter->priv; - uint32_t scflags; -@@ -129,8 +129,7 @@ static int ftm_quaddec_get_prescaler(struct counter_device *counter, - } - - static int ftm_quaddec_set_prescaler(struct counter_device *counter, -- struct counter_count *count, -- size_t cnt_mode) -+ struct counter_count *count, u32 cnt_mode) - { - struct ftm_quaddec *ftm = counter->priv; - -@@ -151,33 +150,17 @@ static const char * const ftm_quaddec_prescaler[] = { - "1", "2", "4", "8", "16", "32", "64", "128" - }; - --static struct counter_count_enum_ext ftm_quaddec_prescaler_enum = { -- .items = ftm_quaddec_prescaler, -- .num_items = ARRAY_SIZE(ftm_quaddec_prescaler), -- .get = ftm_quaddec_get_prescaler, -- .set = ftm_quaddec_set_prescaler --}; -- --enum ftm_quaddec_synapse_action { -- FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES, --}; -- - static const enum counter_synapse_action ftm_quaddec_synapse_actions[] = { -- [FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES] = - COUNTER_SYNAPSE_ACTION_BOTH_EDGES - }; - --enum ftm_quaddec_count_function { -- FTM_QUADDEC_COUNT_ENCODER_MODE_1, --}; -- - static const enum counter_function ftm_quaddec_count_functions[] = { -- [FTM_QUADDEC_COUNT_ENCODER_MODE_1] = COUNTER_FUNCTION_QUADRATURE_X4 -+ COUNTER_FUNCTION_QUADRATURE_X4 - }; - - static int ftm_quaddec_count_read(struct counter_device *counter, - struct counter_count *count, -- unsigned long *val) -+ u64 *val) - { - struct ftm_quaddec *const ftm = counter->priv; - uint32_t cntval; -@@ -191,7 +174,7 @@ static int ftm_quaddec_count_read(struct counter_device *counter, - - static int ftm_quaddec_count_write(struct counter_device *counter, - struct counter_count *count, -- const unsigned long val) -+ const u64 val) - { - struct ftm_quaddec *const ftm = counter->priv; - -@@ -205,21 +188,21 @@ static int ftm_quaddec_count_write(struct counter_device *counter, - return 0; - } - --static int ftm_quaddec_count_function_get(struct counter_device *counter, -- struct counter_count *count, -- size_t *function) -+static int ftm_quaddec_count_function_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function *function) - { -- *function = FTM_QUADDEC_COUNT_ENCODER_MODE_1; -+ *function = COUNTER_FUNCTION_QUADRATURE_X4; - - return 0; - } - --static int ftm_quaddec_action_get(struct counter_device *counter, -- struct counter_count *count, -- struct counter_synapse *synapse, -- size_t *action) -+static int ftm_quaddec_action_read(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action *action) - { -- *action = FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES; -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - - return 0; - } -@@ -227,8 +210,8 @@ static int ftm_quaddec_action_get(struct counter_device *counter, - static const struct counter_ops ftm_quaddec_cnt_ops = { - .count_read = ftm_quaddec_count_read, - .count_write = ftm_quaddec_count_write, -- .function_get = ftm_quaddec_count_function_get, -- .action_get = ftm_quaddec_action_get, -+ .function_read = ftm_quaddec_count_function_read, -+ .action_read = ftm_quaddec_action_read, - }; - - static struct counter_signal ftm_quaddec_signals[] = { -@@ -255,9 +238,12 @@ static struct counter_synapse ftm_quaddec_count_synapses[] = { - } - }; - --static const struct counter_count_ext ftm_quaddec_count_ext[] = { -- COUNTER_COUNT_ENUM("prescaler", &ftm_quaddec_prescaler_enum), -- COUNTER_COUNT_ENUM_AVAILABLE("prescaler", &ftm_quaddec_prescaler_enum), -+static DEFINE_COUNTER_ENUM(ftm_quaddec_prescaler_enum, ftm_quaddec_prescaler); -+ -+static struct counter_comp ftm_quaddec_count_ext[] = { -+ COUNTER_COMP_COUNT_ENUM("prescaler", ftm_quaddec_get_prescaler, -+ ftm_quaddec_set_prescaler, -+ ftm_quaddec_prescaler_enum), - }; - - static struct counter_count ftm_quaddec_counts = { -diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c -index 8a6847d5fb2bd..0924d16de6e26 100644 ---- a/drivers/counter/intel-qep.c -+++ b/drivers/counter/intel-qep.c -@@ -62,13 +62,6 @@ - - #define INTEL_QEP_CLK_PERIOD_NS 10 - --#define INTEL_QEP_COUNTER_EXT_RW(_name) \ --{ \ -- .name = #_name, \ -- .read = _name##_read, \ -- .write = _name##_write, \ --} -- - struct intel_qep { - struct counter_device counter; - struct mutex lock; -@@ -114,8 +107,7 @@ static void intel_qep_init(struct intel_qep *qep) - } - - static int intel_qep_count_read(struct counter_device *counter, -- struct counter_count *count, -- unsigned long *val) -+ struct counter_count *count, u64 *val) - { - struct intel_qep *const qep = counter->priv; - -@@ -130,11 +122,11 @@ static const enum counter_function intel_qep_count_functions[] = { - COUNTER_FUNCTION_QUADRATURE_X4, - }; - --static int intel_qep_function_get(struct counter_device *counter, -- struct counter_count *count, -- size_t *function) -+static int intel_qep_function_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function *function) - { -- *function = 0; -+ *function = COUNTER_FUNCTION_QUADRATURE_X4; - - return 0; - } -@@ -143,19 +135,19 @@ static const enum counter_synapse_action intel_qep_synapse_actions[] = { - COUNTER_SYNAPSE_ACTION_BOTH_EDGES, - }; - --static int intel_qep_action_get(struct counter_device *counter, -- struct counter_count *count, -- struct counter_synapse *synapse, -- size_t *action) -+static int intel_qep_action_read(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action *action) - { -- *action = 0; -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - return 0; - } - - static const struct counter_ops intel_qep_counter_ops = { - .count_read = intel_qep_count_read, -- .function_get = intel_qep_function_get, -- .action_get = intel_qep_action_get, -+ .function_read = intel_qep_function_read, -+ .action_read = intel_qep_action_read, - }; - - #define INTEL_QEP_SIGNAL(_id, _name) { \ -@@ -181,31 +173,27 @@ static struct counter_synapse intel_qep_count_synapses[] = { - INTEL_QEP_SYNAPSE(2), - }; - --static ssize_t ceiling_read(struct counter_device *counter, -- struct counter_count *count, -- void *priv, char *buf) -+static int intel_qep_ceiling_read(struct counter_device *counter, -+ struct counter_count *count, u64 *ceiling) - { - struct intel_qep *qep = counter->priv; -- u32 reg; - - pm_runtime_get_sync(qep->dev); -- reg = intel_qep_readl(qep, INTEL_QEPMAX); -+ *ceiling = intel_qep_readl(qep, INTEL_QEPMAX); - pm_runtime_put(qep->dev); - -- return sysfs_emit(buf, "%u\n", reg); -+ return 0; - } - --static ssize_t ceiling_write(struct counter_device *counter, -- struct counter_count *count, -- void *priv, const char *buf, size_t len) -+static int intel_qep_ceiling_write(struct counter_device *counter, -+ struct counter_count *count, u64 max) - { - struct intel_qep *qep = counter->priv; -- u32 max; -- int ret; -+ int ret = 0; - -- ret = kstrtou32(buf, 0, &max); -- if (ret < 0) -- return ret; -+ /* Intel QEP ceiling configuration only supports 32-bit values */ -+ if (max != (u32)max) -+ return -ERANGE; - - mutex_lock(&qep->lock); - if (qep->enabled) { -@@ -216,34 +204,28 @@ static ssize_t ceiling_write(struct counter_device *counter, - pm_runtime_get_sync(qep->dev); - intel_qep_writel(qep, INTEL_QEPMAX, max); - pm_runtime_put(qep->dev); -- ret = len; - - out: - mutex_unlock(&qep->lock); - return ret; - } - --static ssize_t enable_read(struct counter_device *counter, -- struct counter_count *count, -- void *priv, char *buf) -+static int intel_qep_enable_read(struct counter_device *counter, -+ struct counter_count *count, u8 *enable) - { - struct intel_qep *qep = counter->priv; - -- return sysfs_emit(buf, "%u\n", qep->enabled); -+ *enable = qep->enabled; -+ -+ return 0; - } - --static ssize_t enable_write(struct counter_device *counter, -- struct counter_count *count, -- void *priv, const char *buf, size_t len) -+static int intel_qep_enable_write(struct counter_device *counter, -+ struct counter_count *count, u8 val) - { - struct intel_qep *qep = counter->priv; - u32 reg; -- bool val, changed; -- int ret; -- -- ret = kstrtobool(buf, &val); -- if (ret) -- return ret; -+ bool changed; - - mutex_lock(&qep->lock); - changed = val ^ qep->enabled; -@@ -267,12 +249,12 @@ static ssize_t enable_write(struct counter_device *counter, - - out: - mutex_unlock(&qep->lock); -- return len; -+ return 0; - } - --static ssize_t spike_filter_ns_read(struct counter_device *counter, -- struct counter_count *count, -- void *priv, char *buf) -+static int intel_qep_spike_filter_ns_read(struct counter_device *counter, -+ struct counter_count *count, -+ u64 *length) - { - struct intel_qep *qep = counter->priv; - u32 reg; -@@ -281,33 +263,31 @@ static ssize_t spike_filter_ns_read(struct counter_device *counter, - reg = intel_qep_readl(qep, INTEL_QEPCON); - if (!(reg & INTEL_QEPCON_FLT_EN)) { - pm_runtime_put(qep->dev); -- return sysfs_emit(buf, "0\n"); -+ return 0; - } - reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT)); - pm_runtime_put(qep->dev); - -- return sysfs_emit(buf, "%u\n", (reg + 2) * INTEL_QEP_CLK_PERIOD_NS); -+ *length = (reg + 2) * INTEL_QEP_CLK_PERIOD_NS; -+ -+ return 0; - } - --static ssize_t spike_filter_ns_write(struct counter_device *counter, -- struct counter_count *count, -- void *priv, const char *buf, size_t len) -+static int intel_qep_spike_filter_ns_write(struct counter_device *counter, -+ struct counter_count *count, -+ u64 length) - { - struct intel_qep *qep = counter->priv; -- u32 reg, length; -+ u32 reg; - bool enable; -- int ret; -- -- ret = kstrtou32(buf, 0, &length); -- if (ret < 0) -- return ret; -+ int ret = 0; - - /* - * Spike filter length is (MAX_COUNT + 2) clock periods. - * Disable filter when userspace writes 0, enable for valid - * nanoseconds values and error out otherwise. - */ -- length /= INTEL_QEP_CLK_PERIOD_NS; -+ do_div(length, INTEL_QEP_CLK_PERIOD_NS); - if (length == 0) { - enable = false; - length = 0; -@@ -336,16 +316,15 @@ static ssize_t spike_filter_ns_write(struct counter_device *counter, - intel_qep_writel(qep, INTEL_QEPFLT, length); - intel_qep_writel(qep, INTEL_QEPCON, reg); - pm_runtime_put(qep->dev); -- ret = len; - - out: - mutex_unlock(&qep->lock); - return ret; - } - --static ssize_t preset_enable_read(struct counter_device *counter, -- struct counter_count *count, -- void *priv, char *buf) -+static int intel_qep_preset_enable_read(struct counter_device *counter, -+ struct counter_count *count, -+ u8 *preset_enable) - { - struct intel_qep *qep = counter->priv; - u32 reg; -@@ -353,21 +332,18 @@ static ssize_t preset_enable_read(struct counter_device *counter, - pm_runtime_get_sync(qep->dev); - reg = intel_qep_readl(qep, INTEL_QEPCON); - pm_runtime_put(qep->dev); -- return sysfs_emit(buf, "%u\n", !(reg & INTEL_QEPCON_COUNT_RST_MODE)); -+ -+ *preset_enable = !(reg & INTEL_QEPCON_COUNT_RST_MODE); -+ -+ return 0; - } - --static ssize_t preset_enable_write(struct counter_device *counter, -- struct counter_count *count, -- void *priv, const char *buf, size_t len) -+static int intel_qep_preset_enable_write(struct counter_device *counter, -+ struct counter_count *count, u8 val) - { - struct intel_qep *qep = counter->priv; - u32 reg; -- bool val; -- int ret; -- -- ret = kstrtobool(buf, &val); -- if (ret) -- return ret; -+ int ret = 0; - - mutex_lock(&qep->lock); - if (qep->enabled) { -@@ -384,7 +360,6 @@ static ssize_t preset_enable_write(struct counter_device *counter, - - intel_qep_writel(qep, INTEL_QEPCON, reg); - pm_runtime_put(qep->dev); -- ret = len; - - out: - mutex_unlock(&qep->lock); -@@ -392,11 +367,14 @@ out: - return ret; - } - --static const struct counter_count_ext intel_qep_count_ext[] = { -- INTEL_QEP_COUNTER_EXT_RW(ceiling), -- INTEL_QEP_COUNTER_EXT_RW(enable), -- INTEL_QEP_COUNTER_EXT_RW(spike_filter_ns), -- INTEL_QEP_COUNTER_EXT_RW(preset_enable) -+static struct counter_comp intel_qep_count_ext[] = { -+ COUNTER_COMP_ENABLE(intel_qep_enable_read, intel_qep_enable_write), -+ COUNTER_COMP_CEILING(intel_qep_ceiling_read, intel_qep_ceiling_write), -+ COUNTER_COMP_PRESET_ENABLE(intel_qep_preset_enable_read, -+ intel_qep_preset_enable_write), -+ COUNTER_COMP_COUNT_U64("spike_filter_ns", -+ intel_qep_spike_filter_ns_read, -+ intel_qep_spike_filter_ns_write), - }; - - static struct counter_count intel_qep_counter_count[] = { -diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c -index 1de4243db488c..8514a87fcbee0 100644 ---- a/drivers/counter/interrupt-cnt.c -+++ b/drivers/counter/interrupt-cnt.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - - #define INTERRUPT_CNT_NAME "interrupt-cnt" - -@@ -33,30 +34,23 @@ static irqreturn_t interrupt_cnt_isr(int irq, void *dev_id) - return IRQ_HANDLED; - } - --static ssize_t interrupt_cnt_enable_read(struct counter_device *counter, -- struct counter_count *count, -- void *private, char *buf) -+static int interrupt_cnt_enable_read(struct counter_device *counter, -+ struct counter_count *count, u8 *enable) - { - struct interrupt_cnt_priv *priv = counter->priv; - -- return sysfs_emit(buf, "%d\n", priv->enabled); -+ *enable = priv->enabled; -+ -+ return 0; - } - --static ssize_t interrupt_cnt_enable_write(struct counter_device *counter, -- struct counter_count *count, -- void *private, const char *buf, -- size_t len) -+static int interrupt_cnt_enable_write(struct counter_device *counter, -+ struct counter_count *count, u8 enable) - { - struct interrupt_cnt_priv *priv = counter->priv; -- bool enable; -- ssize_t ret; -- -- ret = kstrtobool(buf, &enable); -- if (ret) -- return ret; - - if (priv->enabled == enable) -- return len; -+ return 0; - - if (enable) { - priv->enabled = true; -@@ -66,33 +60,30 @@ static ssize_t interrupt_cnt_enable_write(struct counter_device *counter, - priv->enabled = false; - } - -- return len; -+ return 0; - } - --static const struct counter_count_ext interrupt_cnt_ext[] = { -- { -- .name = "enable", -- .read = interrupt_cnt_enable_read, -- .write = interrupt_cnt_enable_write, -- }, -+static struct counter_comp interrupt_cnt_ext[] = { -+ COUNTER_COMP_ENABLE(interrupt_cnt_enable_read, -+ interrupt_cnt_enable_write), - }; - - static const enum counter_synapse_action interrupt_cnt_synapse_actions[] = { - COUNTER_SYNAPSE_ACTION_RISING_EDGE, - }; - --static int interrupt_cnt_action_get(struct counter_device *counter, -- struct counter_count *count, -- struct counter_synapse *synapse, -- size_t *action) -+static int interrupt_cnt_action_read(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action *action) - { -- *action = 0; -+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; - - return 0; - } - - static int interrupt_cnt_read(struct counter_device *counter, -- struct counter_count *count, unsigned long *val) -+ struct counter_count *count, u64 *val) - { - struct interrupt_cnt_priv *priv = counter->priv; - -@@ -102,8 +93,7 @@ static int interrupt_cnt_read(struct counter_device *counter, - } - - static int interrupt_cnt_write(struct counter_device *counter, -- struct counter_count *count, -- const unsigned long val) -+ struct counter_count *count, const u64 val) - { - struct interrupt_cnt_priv *priv = counter->priv; - -@@ -119,11 +109,11 @@ static const enum counter_function interrupt_cnt_functions[] = { - COUNTER_FUNCTION_INCREASE, - }; - --static int interrupt_cnt_function_get(struct counter_device *counter, -- struct counter_count *count, -- size_t *function) -+static int interrupt_cnt_function_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function *function) - { -- *function = 0; -+ *function = COUNTER_FUNCTION_INCREASE; - - return 0; - } -@@ -148,10 +138,10 @@ static int interrupt_cnt_signal_read(struct counter_device *counter, - } - - static const struct counter_ops interrupt_cnt_ops = { -- .action_get = interrupt_cnt_action_get, -+ .action_read = interrupt_cnt_action_read, - .count_read = interrupt_cnt_read, - .count_write = interrupt_cnt_write, -- .function_get = interrupt_cnt_function_get, -+ .function_read = interrupt_cnt_function_read, - .signal_read = interrupt_cnt_signal_read, - }; - -diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c -index 1aa70b9c48330..4edfe1f8fff7a 100644 ---- a/drivers/counter/microchip-tcb-capture.c -+++ b/drivers/counter/microchip-tcb-capture.c -@@ -29,31 +29,18 @@ struct mchp_tc_data { - int qdec_mode; - int num_channels; - int channel[2]; -- bool trig_inverted; --}; -- --enum mchp_tc_count_function { -- MCHP_TC_FUNCTION_INCREASE, -- MCHP_TC_FUNCTION_QUADRATURE, - }; - - static const enum counter_function mchp_tc_count_functions[] = { -- [MCHP_TC_FUNCTION_INCREASE] = COUNTER_FUNCTION_INCREASE, -- [MCHP_TC_FUNCTION_QUADRATURE] = COUNTER_FUNCTION_QUADRATURE_X4, --}; -- --enum mchp_tc_synapse_action { -- MCHP_TC_SYNAPSE_ACTION_NONE = 0, -- MCHP_TC_SYNAPSE_ACTION_RISING_EDGE, -- MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE, -- MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE -+ COUNTER_FUNCTION_INCREASE, -+ COUNTER_FUNCTION_QUADRATURE_X4, - }; - - static const enum counter_synapse_action mchp_tc_synapse_actions[] = { -- [MCHP_TC_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE, -- [MCHP_TC_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE, -- [MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE, -- [MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES, -+ COUNTER_SYNAPSE_ACTION_NONE, -+ COUNTER_SYNAPSE_ACTION_RISING_EDGE, -+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE, -+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES, - }; - - static struct counter_signal mchp_tc_count_signals[] = { -@@ -80,23 +67,23 @@ static struct counter_synapse mchp_tc_count_synapses[] = { - } - }; - --static int mchp_tc_count_function_get(struct counter_device *counter, -- struct counter_count *count, -- size_t *function) -+static int mchp_tc_count_function_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function *function) - { - struct mchp_tc_data *const priv = counter->priv; - - if (priv->qdec_mode) -- *function = MCHP_TC_FUNCTION_QUADRATURE; -+ *function = COUNTER_FUNCTION_QUADRATURE_X4; - else -- *function = MCHP_TC_FUNCTION_INCREASE; -+ *function = COUNTER_FUNCTION_INCREASE; - - return 0; - } - --static int mchp_tc_count_function_set(struct counter_device *counter, -- struct counter_count *count, -- size_t function) -+static int mchp_tc_count_function_write(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function function) - { - struct mchp_tc_data *const priv = counter->priv; - u32 bmr, cmr; -@@ -108,7 +95,7 @@ static int mchp_tc_count_function_set(struct counter_device *counter, - cmr &= ~ATMEL_TC_WAVE; - - switch (function) { -- case MCHP_TC_FUNCTION_INCREASE: -+ case COUNTER_FUNCTION_INCREASE: - priv->qdec_mode = 0; - /* Set highest rate based on whether soc has gclk or not */ - bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN); -@@ -120,7 +107,7 @@ static int mchp_tc_count_function_set(struct counter_device *counter, - cmr |= ATMEL_TC_CMR_MASK; - cmr &= ~(ATMEL_TC_ABETRG | ATMEL_TC_XC0); - break; -- case MCHP_TC_FUNCTION_QUADRATURE: -+ case COUNTER_FUNCTION_QUADRATURE_X4: - if (!priv->tc_cfg->has_qdec) - return -EINVAL; - /* In QDEC mode settings both channels 0 and 1 are required */ -@@ -166,7 +153,7 @@ static int mchp_tc_count_signal_read(struct counter_device *counter, - - regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr); - -- if (priv->trig_inverted) -+ if (signal->id == 1) - sigstatus = (sr & ATMEL_TC_MTIOB); - else - sigstatus = (sr & ATMEL_TC_MTIOA); -@@ -176,57 +163,68 @@ static int mchp_tc_count_signal_read(struct counter_device *counter, - return 0; - } - --static int mchp_tc_count_action_get(struct counter_device *counter, -- struct counter_count *count, -- struct counter_synapse *synapse, -- size_t *action) -+static int mchp_tc_count_action_read(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action *action) - { - struct mchp_tc_data *const priv = counter->priv; - u32 cmr; - -+ if (priv->qdec_mode) { -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; -+ return 0; -+ } -+ -+ /* Only TIOA signal is evaluated in non-QDEC mode */ -+ if (synapse->signal->id != 0) { -+ *action = COUNTER_SYNAPSE_ACTION_NONE; -+ return 0; -+ } -+ - regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr); - - switch (cmr & ATMEL_TC_ETRGEDG) { - default: -- *action = MCHP_TC_SYNAPSE_ACTION_NONE; -+ *action = COUNTER_SYNAPSE_ACTION_NONE; - break; - case ATMEL_TC_ETRGEDG_RISING: -- *action = MCHP_TC_SYNAPSE_ACTION_RISING_EDGE; -+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; - break; - case ATMEL_TC_ETRGEDG_FALLING: -- *action = MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE; -+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE; - break; - case ATMEL_TC_ETRGEDG_BOTH: -- *action = MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE; -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - break; - } - - return 0; - } - --static int mchp_tc_count_action_set(struct counter_device *counter, -- struct counter_count *count, -- struct counter_synapse *synapse, -- size_t action) -+static int mchp_tc_count_action_write(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action action) - { - struct mchp_tc_data *const priv = counter->priv; - u32 edge = ATMEL_TC_ETRGEDG_NONE; - -- /* QDEC mode is rising edge only */ -- if (priv->qdec_mode) -+ /* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */ -+ if (priv->qdec_mode || synapse->signal->id != 0) - return -EINVAL; - - switch (action) { -- case MCHP_TC_SYNAPSE_ACTION_NONE: -+ case COUNTER_SYNAPSE_ACTION_NONE: - edge = ATMEL_TC_ETRGEDG_NONE; - break; -- case MCHP_TC_SYNAPSE_ACTION_RISING_EDGE: -+ case COUNTER_SYNAPSE_ACTION_RISING_EDGE: - edge = ATMEL_TC_ETRGEDG_RISING; - break; -- case MCHP_TC_SYNAPSE_ACTION_FALLING_EDGE: -+ case COUNTER_SYNAPSE_ACTION_FALLING_EDGE: - edge = ATMEL_TC_ETRGEDG_FALLING; - break; -- case MCHP_TC_SYNAPSE_ACTION_BOTH_EDGE: -+ case COUNTER_SYNAPSE_ACTION_BOTH_EDGES: - edge = ATMEL_TC_ETRGEDG_BOTH; - break; - default: -@@ -240,8 +238,7 @@ static int mchp_tc_count_action_set(struct counter_device *counter, - } - - static int mchp_tc_count_read(struct counter_device *counter, -- struct counter_count *count, -- unsigned long *val) -+ struct counter_count *count, u64 *val) - { - struct mchp_tc_data *const priv = counter->priv; - u32 cnt; -@@ -264,12 +261,12 @@ static struct counter_count mchp_tc_counts[] = { - }; - - static const struct counter_ops mchp_tc_ops = { -- .signal_read = mchp_tc_count_signal_read, -- .count_read = mchp_tc_count_read, -- .function_get = mchp_tc_count_function_get, -- .function_set = mchp_tc_count_function_set, -- .action_get = mchp_tc_count_action_get, -- .action_set = mchp_tc_count_action_set -+ .signal_read = mchp_tc_count_signal_read, -+ .count_read = mchp_tc_count_read, -+ .function_read = mchp_tc_count_function_read, -+ .function_write = mchp_tc_count_function_write, -+ .action_read = mchp_tc_count_action_read, -+ .action_write = mchp_tc_count_action_write - }; - - static const struct atmel_tcb_config tcb_rm9200_config = { -diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c -index 13656957c45f1..637b3f0b4fa34 100644 ---- a/drivers/counter/stm32-lptimer-cnt.c -+++ b/drivers/counter/stm32-lptimer-cnt.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - - struct stm32_lptim_cnt { - struct counter_device counter; -@@ -69,7 +70,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv, - - /* ensure CMP & ARR registers are properly written */ - ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, -- (val & STM32_LPTIM_CMPOK_ARROK), -+ (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, - 100, 1000); - if (ret) - return ret; -@@ -107,11 +108,7 @@ static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable) - return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val); - } - --/** -- * enum stm32_lptim_cnt_function - enumerates LPTimer counter & encoder modes -- * @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges -- * @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature) -- * -+/* - * In non-quadrature mode, device counts up on active edge. - * In quadrature mode, encoder counting scenarios are as follows: - * +---------+----------+--------------------+--------------------+ -@@ -129,33 +126,20 @@ static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable) - * | edges | Low -> | Up | Down | Down | Up | - * +---------+----------+----------+---------+----------+---------+ - */ --enum stm32_lptim_cnt_function { -- STM32_LPTIM_COUNTER_INCREASE, -- STM32_LPTIM_ENCODER_BOTH_EDGE, --}; -- - static const enum counter_function stm32_lptim_cnt_functions[] = { -- [STM32_LPTIM_COUNTER_INCREASE] = COUNTER_FUNCTION_INCREASE, -- [STM32_LPTIM_ENCODER_BOTH_EDGE] = COUNTER_FUNCTION_QUADRATURE_X4, --}; -- --enum stm32_lptim_synapse_action { -- STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE, -- STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE, -- STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES, -- STM32_LPTIM_SYNAPSE_ACTION_NONE, -+ COUNTER_FUNCTION_INCREASE, -+ COUNTER_FUNCTION_QUADRATURE_X4, - }; - - static const enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = { -- /* Index must match with stm32_lptim_cnt_polarity[] (priv->polarity) */ -- [STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE, -- [STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE, -- [STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES, -- [STM32_LPTIM_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE, -+ COUNTER_SYNAPSE_ACTION_RISING_EDGE, -+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE, -+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES, -+ COUNTER_SYNAPSE_ACTION_NONE, - }; - - static int stm32_lptim_cnt_read(struct counter_device *counter, -- struct counter_count *count, unsigned long *val) -+ struct counter_count *count, u64 *val) - { - struct stm32_lptim_cnt *const priv = counter->priv; - u32 cnt; -@@ -170,28 +154,28 @@ static int stm32_lptim_cnt_read(struct counter_device *counter, - return 0; - } - --static int stm32_lptim_cnt_function_get(struct counter_device *counter, -- struct counter_count *count, -- size_t *function) -+static int stm32_lptim_cnt_function_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function *function) - { - struct stm32_lptim_cnt *const priv = counter->priv; - - if (!priv->quadrature_mode) { -- *function = STM32_LPTIM_COUNTER_INCREASE; -+ *function = COUNTER_FUNCTION_INCREASE; - return 0; - } - -- if (priv->polarity == STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES) { -- *function = STM32_LPTIM_ENCODER_BOTH_EDGE; -+ if (priv->polarity == STM32_LPTIM_CKPOL_BOTH_EDGES) { -+ *function = COUNTER_FUNCTION_QUADRATURE_X4; - return 0; - } - - return -EINVAL; - } - --static int stm32_lptim_cnt_function_set(struct counter_device *counter, -- struct counter_count *count, -- size_t function) -+static int stm32_lptim_cnt_function_write(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function function) - { - struct stm32_lptim_cnt *const priv = counter->priv; - -@@ -199,12 +183,12 @@ static int stm32_lptim_cnt_function_set(struct counter_device *counter, - return -EBUSY; - - switch (function) { -- case STM32_LPTIM_COUNTER_INCREASE: -+ case COUNTER_FUNCTION_INCREASE: - priv->quadrature_mode = 0; - return 0; -- case STM32_LPTIM_ENCODER_BOTH_EDGE: -+ case COUNTER_FUNCTION_QUADRATURE_X4: - priv->quadrature_mode = 1; -- priv->polarity = STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES; -+ priv->polarity = STM32_LPTIM_CKPOL_BOTH_EDGES; - return 0; - default: - /* should never reach this path */ -@@ -212,9 +196,9 @@ static int stm32_lptim_cnt_function_set(struct counter_device *counter, - } - } - --static ssize_t stm32_lptim_cnt_enable_read(struct counter_device *counter, -- struct counter_count *count, -- void *private, char *buf) -+static int stm32_lptim_cnt_enable_read(struct counter_device *counter, -+ struct counter_count *count, -+ u8 *enable) - { - struct stm32_lptim_cnt *const priv = counter->priv; - int ret; -@@ -223,22 +207,18 @@ static ssize_t stm32_lptim_cnt_enable_read(struct counter_device *counter, - if (ret < 0) - return ret; - -- return scnprintf(buf, PAGE_SIZE, "%u\n", ret); -+ *enable = ret; -+ -+ return 0; - } - --static ssize_t stm32_lptim_cnt_enable_write(struct counter_device *counter, -- struct counter_count *count, -- void *private, -- const char *buf, size_t len) -+static int stm32_lptim_cnt_enable_write(struct counter_device *counter, -+ struct counter_count *count, -+ u8 enable) - { - struct stm32_lptim_cnt *const priv = counter->priv; -- bool enable; - int ret; - -- ret = kstrtobool(buf, &enable); -- if (ret) -- return ret; -- - /* Check nobody uses the timer, or already disabled/enabled */ - ret = stm32_lptim_is_enabled(priv); - if ((ret < 0) || (!ret && !enable)) -@@ -254,78 +234,81 @@ static ssize_t stm32_lptim_cnt_enable_write(struct counter_device *counter, - if (ret) - return ret; - -- return len; -+ return 0; - } - --static ssize_t stm32_lptim_cnt_ceiling_read(struct counter_device *counter, -- struct counter_count *count, -- void *private, char *buf) -+static int stm32_lptim_cnt_ceiling_read(struct counter_device *counter, -+ struct counter_count *count, -+ u64 *ceiling) - { - struct stm32_lptim_cnt *const priv = counter->priv; - -- return snprintf(buf, PAGE_SIZE, "%u\n", priv->ceiling); -+ *ceiling = priv->ceiling; -+ -+ return 0; - } - --static ssize_t stm32_lptim_cnt_ceiling_write(struct counter_device *counter, -- struct counter_count *count, -- void *private, -- const char *buf, size_t len) -+static int stm32_lptim_cnt_ceiling_write(struct counter_device *counter, -+ struct counter_count *count, -+ u64 ceiling) - { - struct stm32_lptim_cnt *const priv = counter->priv; -- unsigned int ceiling; -- int ret; - - if (stm32_lptim_is_enabled(priv)) - return -EBUSY; - -- ret = kstrtouint(buf, 0, &ceiling); -- if (ret) -- return ret; -- - if (ceiling > STM32_LPTIM_MAX_ARR) - return -ERANGE; - - priv->ceiling = ceiling; - -- return len; -+ return 0; - } - --static const struct counter_count_ext stm32_lptim_cnt_ext[] = { -- { -- .name = "enable", -- .read = stm32_lptim_cnt_enable_read, -- .write = stm32_lptim_cnt_enable_write -- }, -- { -- .name = "ceiling", -- .read = stm32_lptim_cnt_ceiling_read, -- .write = stm32_lptim_cnt_ceiling_write -- }, -+static struct counter_comp stm32_lptim_cnt_ext[] = { -+ COUNTER_COMP_ENABLE(stm32_lptim_cnt_enable_read, -+ stm32_lptim_cnt_enable_write), -+ COUNTER_COMP_CEILING(stm32_lptim_cnt_ceiling_read, -+ stm32_lptim_cnt_ceiling_write), - }; - --static int stm32_lptim_cnt_action_get(struct counter_device *counter, -- struct counter_count *count, -- struct counter_synapse *synapse, -- size_t *action) -+static int stm32_lptim_cnt_action_read(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action *action) - { - struct stm32_lptim_cnt *const priv = counter->priv; -- size_t function; -+ enum counter_function function; - int err; - -- err = stm32_lptim_cnt_function_get(counter, count, &function); -+ err = stm32_lptim_cnt_function_read(counter, count, &function); - if (err) - return err; - - switch (function) { -- case STM32_LPTIM_COUNTER_INCREASE: -+ case COUNTER_FUNCTION_INCREASE: - /* LP Timer acts as up-counter on input 1 */ -- if (synapse->signal->id == count->synapses[0].signal->id) -- *action = priv->polarity; -- else -- *action = STM32_LPTIM_SYNAPSE_ACTION_NONE; -- return 0; -- case STM32_LPTIM_ENCODER_BOTH_EDGE: -- *action = priv->polarity; -+ if (synapse->signal->id != count->synapses[0].signal->id) { -+ *action = COUNTER_SYNAPSE_ACTION_NONE; -+ return 0; -+ } -+ -+ switch (priv->polarity) { -+ case STM32_LPTIM_CKPOL_RISING_EDGE: -+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; -+ return 0; -+ case STM32_LPTIM_CKPOL_FALLING_EDGE: -+ *action = COUNTER_SYNAPSE_ACTION_FALLING_EDGE; -+ return 0; -+ case STM32_LPTIM_CKPOL_BOTH_EDGES: -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; -+ return 0; -+ default: -+ /* should never reach this path */ -+ return -EINVAL; -+ } -+ case COUNTER_FUNCTION_QUADRATURE_X4: -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - return 0; - default: - /* should never reach this path */ -@@ -333,43 +316,48 @@ static int stm32_lptim_cnt_action_get(struct counter_device *counter, - } - } - --static int stm32_lptim_cnt_action_set(struct counter_device *counter, -- struct counter_count *count, -- struct counter_synapse *synapse, -- size_t action) -+static int stm32_lptim_cnt_action_write(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action action) - { - struct stm32_lptim_cnt *const priv = counter->priv; -- size_t function; -+ enum counter_function function; - int err; - - if (stm32_lptim_is_enabled(priv)) - return -EBUSY; - -- err = stm32_lptim_cnt_function_get(counter, count, &function); -+ err = stm32_lptim_cnt_function_read(counter, count, &function); - if (err) - return err; - - /* only set polarity when in counter mode (on input 1) */ -- if (function == STM32_LPTIM_COUNTER_INCREASE -- && synapse->signal->id == count->synapses[0].signal->id) { -- switch (action) { -- case STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE: -- case STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE: -- case STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES: -- priv->polarity = action; -- return 0; -- } -- } -+ if (function != COUNTER_FUNCTION_INCREASE -+ || synapse->signal->id != count->synapses[0].signal->id) -+ return -EINVAL; - -- return -EINVAL; -+ switch (action) { -+ case COUNTER_SYNAPSE_ACTION_RISING_EDGE: -+ priv->polarity = STM32_LPTIM_CKPOL_RISING_EDGE; -+ return 0; -+ case COUNTER_SYNAPSE_ACTION_FALLING_EDGE: -+ priv->polarity = STM32_LPTIM_CKPOL_FALLING_EDGE; -+ return 0; -+ case COUNTER_SYNAPSE_ACTION_BOTH_EDGES: -+ priv->polarity = STM32_LPTIM_CKPOL_BOTH_EDGES; -+ return 0; -+ default: -+ return -EINVAL; -+ } - } - - static const struct counter_ops stm32_lptim_cnt_ops = { - .count_read = stm32_lptim_cnt_read, -- .function_get = stm32_lptim_cnt_function_get, -- .function_set = stm32_lptim_cnt_function_set, -- .action_get = stm32_lptim_cnt_action_get, -- .action_set = stm32_lptim_cnt_action_set, -+ .function_read = stm32_lptim_cnt_function_read, -+ .function_write = stm32_lptim_cnt_function_write, -+ .action_read = stm32_lptim_cnt_action_read, -+ .action_write = stm32_lptim_cnt_action_write, - }; - - static struct counter_signal stm32_lptim_cnt_signals[] = { -diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c -index 3fb0debd7425d..0546e932db0c1 100644 ---- a/drivers/counter/stm32-timer-cnt.c -+++ b/drivers/counter/stm32-timer-cnt.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - #define TIM_CCMR_CCXS (BIT(8) | BIT(0)) - #define TIM_CCMR_MASK (TIM_CCMR_CC1S | TIM_CCMR_CC2S | \ -@@ -36,29 +37,15 @@ struct stm32_timer_cnt { - struct stm32_timer_regs bak; - }; - --/** -- * enum stm32_count_function - enumerates stm32 timer counter encoder modes -- * @STM32_COUNT_SLAVE_MODE_DISABLED: counts on internal clock when CEN=1 -- * @STM32_COUNT_ENCODER_MODE_1: counts TI1FP1 edges, depending on TI2FP2 level -- * @STM32_COUNT_ENCODER_MODE_2: counts TI2FP2 edges, depending on TI1FP1 level -- * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges -- */ --enum stm32_count_function { -- STM32_COUNT_SLAVE_MODE_DISABLED, -- STM32_COUNT_ENCODER_MODE_1, -- STM32_COUNT_ENCODER_MODE_2, -- STM32_COUNT_ENCODER_MODE_3, --}; -- - static const enum counter_function stm32_count_functions[] = { -- [STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_FUNCTION_INCREASE, -- [STM32_COUNT_ENCODER_MODE_1] = COUNTER_FUNCTION_QUADRATURE_X2_A, -- [STM32_COUNT_ENCODER_MODE_2] = COUNTER_FUNCTION_QUADRATURE_X2_B, -- [STM32_COUNT_ENCODER_MODE_3] = COUNTER_FUNCTION_QUADRATURE_X4, -+ COUNTER_FUNCTION_INCREASE, -+ COUNTER_FUNCTION_QUADRATURE_X2_A, -+ COUNTER_FUNCTION_QUADRATURE_X2_B, -+ COUNTER_FUNCTION_QUADRATURE_X4, - }; - - static int stm32_count_read(struct counter_device *counter, -- struct counter_count *count, unsigned long *val) -+ struct counter_count *count, u64 *val) - { - struct stm32_timer_cnt *const priv = counter->priv; - u32 cnt; -@@ -70,8 +57,7 @@ static int stm32_count_read(struct counter_device *counter, - } - - static int stm32_count_write(struct counter_device *counter, -- struct counter_count *count, -- const unsigned long val) -+ struct counter_count *count, const u64 val) - { - struct stm32_timer_cnt *const priv = counter->priv; - u32 ceiling; -@@ -83,9 +69,9 @@ static int stm32_count_write(struct counter_device *counter, - return regmap_write(priv->regmap, TIM_CNT, val); - } - --static int stm32_count_function_get(struct counter_device *counter, -- struct counter_count *count, -- size_t *function) -+static int stm32_count_function_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function *function) - { - struct stm32_timer_cnt *const priv = counter->priv; - u32 smcr; -@@ -93,42 +79,42 @@ static int stm32_count_function_get(struct counter_device *counter, - regmap_read(priv->regmap, TIM_SMCR, &smcr); - - switch (smcr & TIM_SMCR_SMS) { -- case 0: -- *function = STM32_COUNT_SLAVE_MODE_DISABLED; -+ case TIM_SMCR_SMS_SLAVE_MODE_DISABLED: -+ *function = COUNTER_FUNCTION_INCREASE; - return 0; -- case 1: -- *function = STM32_COUNT_ENCODER_MODE_1; -+ case TIM_SMCR_SMS_ENCODER_MODE_1: -+ *function = COUNTER_FUNCTION_QUADRATURE_X2_A; - return 0; -- case 2: -- *function = STM32_COUNT_ENCODER_MODE_2; -+ case TIM_SMCR_SMS_ENCODER_MODE_2: -+ *function = COUNTER_FUNCTION_QUADRATURE_X2_B; - return 0; -- case 3: -- *function = STM32_COUNT_ENCODER_MODE_3; -+ case TIM_SMCR_SMS_ENCODER_MODE_3: -+ *function = COUNTER_FUNCTION_QUADRATURE_X4; - return 0; - default: - return -EINVAL; - } - } - --static int stm32_count_function_set(struct counter_device *counter, -- struct counter_count *count, -- size_t function) -+static int stm32_count_function_write(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function function) - { - struct stm32_timer_cnt *const priv = counter->priv; - u32 cr1, sms; - - switch (function) { -- case STM32_COUNT_SLAVE_MODE_DISABLED: -- sms = 0; -+ case COUNTER_FUNCTION_INCREASE: -+ sms = TIM_SMCR_SMS_SLAVE_MODE_DISABLED; - break; -- case STM32_COUNT_ENCODER_MODE_1: -- sms = 1; -+ case COUNTER_FUNCTION_QUADRATURE_X2_A: -+ sms = TIM_SMCR_SMS_ENCODER_MODE_1; - break; -- case STM32_COUNT_ENCODER_MODE_2: -- sms = 2; -+ case COUNTER_FUNCTION_QUADRATURE_X2_B: -+ sms = TIM_SMCR_SMS_ENCODER_MODE_2; - break; -- case STM32_COUNT_ENCODER_MODE_3: -- sms = 3; -+ case COUNTER_FUNCTION_QUADRATURE_X4: -+ sms = TIM_SMCR_SMS_ENCODER_MODE_3; - break; - default: - return -EINVAL; -@@ -150,44 +136,37 @@ static int stm32_count_function_set(struct counter_device *counter, - return 0; - } - --static ssize_t stm32_count_direction_read(struct counter_device *counter, -+static int stm32_count_direction_read(struct counter_device *counter, - struct counter_count *count, -- void *private, char *buf) -+ enum counter_count_direction *direction) - { - struct stm32_timer_cnt *const priv = counter->priv; -- const char *direction; - u32 cr1; - - regmap_read(priv->regmap, TIM_CR1, &cr1); -- direction = (cr1 & TIM_CR1_DIR) ? "backward" : "forward"; -+ *direction = (cr1 & TIM_CR1_DIR) ? COUNTER_COUNT_DIRECTION_BACKWARD : -+ COUNTER_COUNT_DIRECTION_FORWARD; - -- return scnprintf(buf, PAGE_SIZE, "%s\n", direction); -+ return 0; - } - --static ssize_t stm32_count_ceiling_read(struct counter_device *counter, -- struct counter_count *count, -- void *private, char *buf) -+static int stm32_count_ceiling_read(struct counter_device *counter, -+ struct counter_count *count, u64 *ceiling) - { - struct stm32_timer_cnt *const priv = counter->priv; - u32 arr; - - regmap_read(priv->regmap, TIM_ARR, &arr); - -- return snprintf(buf, PAGE_SIZE, "%u\n", arr); -+ *ceiling = arr; -+ -+ return 0; - } - --static ssize_t stm32_count_ceiling_write(struct counter_device *counter, -- struct counter_count *count, -- void *private, -- const char *buf, size_t len) -+static int stm32_count_ceiling_write(struct counter_device *counter, -+ struct counter_count *count, u64 ceiling) - { - struct stm32_timer_cnt *const priv = counter->priv; -- unsigned int ceiling; -- int ret; -- -- ret = kstrtouint(buf, 0, &ceiling); -- if (ret) -- return ret; - - if (ceiling > priv->max_arr) - return -ERANGE; -@@ -196,34 +175,27 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter, - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); - regmap_write(priv->regmap, TIM_ARR, ceiling); - -- return len; -+ return 0; - } - --static ssize_t stm32_count_enable_read(struct counter_device *counter, -- struct counter_count *count, -- void *private, char *buf) -+static int stm32_count_enable_read(struct counter_device *counter, -+ struct counter_count *count, u8 *enable) - { - struct stm32_timer_cnt *const priv = counter->priv; - u32 cr1; - - regmap_read(priv->regmap, TIM_CR1, &cr1); - -- return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)(cr1 & TIM_CR1_CEN)); -+ *enable = cr1 & TIM_CR1_CEN; -+ -+ return 0; - } - --static ssize_t stm32_count_enable_write(struct counter_device *counter, -- struct counter_count *count, -- void *private, -- const char *buf, size_t len) -+static int stm32_count_enable_write(struct counter_device *counter, -+ struct counter_count *count, u8 enable) - { - struct stm32_timer_cnt *const priv = counter->priv; -- int err; - u32 cr1; -- bool enable; -- -- err = kstrtobool(buf, &enable); -- if (err) -- return err; - - if (enable) { - regmap_read(priv->regmap, TIM_CR1, &cr1); -@@ -242,70 +214,55 @@ static ssize_t stm32_count_enable_write(struct counter_device *counter, - /* Keep enabled state to properly handle low power states */ - priv->enabled = enable; - -- return len; -+ return 0; - } - --static const struct counter_count_ext stm32_count_ext[] = { -- { -- .name = "direction", -- .read = stm32_count_direction_read, -- }, -- { -- .name = "enable", -- .read = stm32_count_enable_read, -- .write = stm32_count_enable_write -- }, -- { -- .name = "ceiling", -- .read = stm32_count_ceiling_read, -- .write = stm32_count_ceiling_write -- }, --}; -- --enum stm32_synapse_action { -- STM32_SYNAPSE_ACTION_NONE, -- STM32_SYNAPSE_ACTION_BOTH_EDGES -+static struct counter_comp stm32_count_ext[] = { -+ COUNTER_COMP_DIRECTION(stm32_count_direction_read), -+ COUNTER_COMP_ENABLE(stm32_count_enable_read, stm32_count_enable_write), -+ COUNTER_COMP_CEILING(stm32_count_ceiling_read, -+ stm32_count_ceiling_write), - }; - - static const enum counter_synapse_action stm32_synapse_actions[] = { -- [STM32_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE, -- [STM32_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES -+ COUNTER_SYNAPSE_ACTION_NONE, -+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES - }; - --static int stm32_action_get(struct counter_device *counter, -- struct counter_count *count, -- struct counter_synapse *synapse, -- size_t *action) -+static int stm32_action_read(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action *action) - { -- size_t function; -+ enum counter_function function; - int err; - -- err = stm32_count_function_get(counter, count, &function); -+ err = stm32_count_function_read(counter, count, &function); - if (err) - return err; - - switch (function) { -- case STM32_COUNT_SLAVE_MODE_DISABLED: -+ case COUNTER_FUNCTION_INCREASE: - /* counts on internal clock when CEN=1 */ -- *action = STM32_SYNAPSE_ACTION_NONE; -+ *action = COUNTER_SYNAPSE_ACTION_NONE; - return 0; -- case STM32_COUNT_ENCODER_MODE_1: -+ case COUNTER_FUNCTION_QUADRATURE_X2_A: - /* counts up/down on TI1FP1 edge depending on TI2FP2 level */ - if (synapse->signal->id == count->synapses[0].signal->id) -- *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - else -- *action = STM32_SYNAPSE_ACTION_NONE; -+ *action = COUNTER_SYNAPSE_ACTION_NONE; - return 0; -- case STM32_COUNT_ENCODER_MODE_2: -+ case COUNTER_FUNCTION_QUADRATURE_X2_B: - /* counts up/down on TI2FP2 edge depending on TI1FP1 level */ - if (synapse->signal->id == count->synapses[1].signal->id) -- *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - else -- *action = STM32_SYNAPSE_ACTION_NONE; -+ *action = COUNTER_SYNAPSE_ACTION_NONE; - return 0; -- case STM32_COUNT_ENCODER_MODE_3: -+ case COUNTER_FUNCTION_QUADRATURE_X4: - /* counts up/down on both TI1FP1 and TI2FP2 edges */ -- *action = STM32_SYNAPSE_ACTION_BOTH_EDGES; -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - return 0; - default: - return -EINVAL; -@@ -315,9 +272,9 @@ static int stm32_action_get(struct counter_device *counter, - static const struct counter_ops stm32_timer_cnt_ops = { - .count_read = stm32_count_read, - .count_write = stm32_count_write, -- .function_get = stm32_count_function_get, -- .function_set = stm32_count_function_set, -- .action_get = stm32_action_get, -+ .function_read = stm32_count_function_read, -+ .function_write = stm32_count_function_write, -+ .action_read = stm32_action_read, - }; - - static struct counter_signal stm32_signals[] = { -diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c -index 94fe58bb3eab3..09817c953f9ab 100644 ---- a/drivers/counter/ti-eqep.c -+++ b/drivers/counter/ti-eqep.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - /* 32-bit registers */ - #define QPOSCNT 0x0 -@@ -73,19 +74,13 @@ enum { - }; - - /* Position Counter Input Modes */ --enum { -+enum ti_eqep_count_func { - TI_EQEP_COUNT_FUNC_QUAD_COUNT, - TI_EQEP_COUNT_FUNC_DIR_COUNT, - TI_EQEP_COUNT_FUNC_UP_COUNT, - TI_EQEP_COUNT_FUNC_DOWN_COUNT, - }; - --enum { -- TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES, -- TI_EQEP_SYNAPSE_ACTION_RISING_EDGE, -- TI_EQEP_SYNAPSE_ACTION_NONE, --}; -- - struct ti_eqep_cnt { - struct counter_device counter; - struct regmap *regmap32; -@@ -93,7 +88,7 @@ struct ti_eqep_cnt { - }; - - static int ti_eqep_count_read(struct counter_device *counter, -- struct counter_count *count, unsigned long *val) -+ struct counter_count *count, u64 *val) - { - struct ti_eqep_cnt *priv = counter->priv; - u32 cnt; -@@ -105,7 +100,7 @@ static int ti_eqep_count_read(struct counter_device *counter, - } - - static int ti_eqep_count_write(struct counter_device *counter, -- struct counter_count *count, unsigned long val) -+ struct counter_count *count, u64 val) - { - struct ti_eqep_cnt *priv = counter->priv; - u32 max; -@@ -117,64 +112,100 @@ static int ti_eqep_count_write(struct counter_device *counter, - return regmap_write(priv->regmap32, QPOSCNT, val); - } - --static int ti_eqep_function_get(struct counter_device *counter, -- struct counter_count *count, size_t *function) -+static int ti_eqep_function_read(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function *function) - { - struct ti_eqep_cnt *priv = counter->priv; - u32 qdecctl; - - regmap_read(priv->regmap16, QDECCTL, &qdecctl); -- *function = (qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT; -+ -+ switch ((qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT) { -+ case TI_EQEP_COUNT_FUNC_QUAD_COUNT: -+ *function = COUNTER_FUNCTION_QUADRATURE_X4; -+ break; -+ case TI_EQEP_COUNT_FUNC_DIR_COUNT: -+ *function = COUNTER_FUNCTION_PULSE_DIRECTION; -+ break; -+ case TI_EQEP_COUNT_FUNC_UP_COUNT: -+ *function = COUNTER_FUNCTION_INCREASE; -+ break; -+ case TI_EQEP_COUNT_FUNC_DOWN_COUNT: -+ *function = COUNTER_FUNCTION_DECREASE; -+ break; -+ } - - return 0; - } - --static int ti_eqep_function_set(struct counter_device *counter, -- struct counter_count *count, size_t function) -+static int ti_eqep_function_write(struct counter_device *counter, -+ struct counter_count *count, -+ enum counter_function function) - { - struct ti_eqep_cnt *priv = counter->priv; -+ enum ti_eqep_count_func qsrc; -+ -+ switch (function) { -+ case COUNTER_FUNCTION_QUADRATURE_X4: -+ qsrc = TI_EQEP_COUNT_FUNC_QUAD_COUNT; -+ break; -+ case COUNTER_FUNCTION_PULSE_DIRECTION: -+ qsrc = TI_EQEP_COUNT_FUNC_DIR_COUNT; -+ break; -+ case COUNTER_FUNCTION_INCREASE: -+ qsrc = TI_EQEP_COUNT_FUNC_UP_COUNT; -+ break; -+ case COUNTER_FUNCTION_DECREASE: -+ qsrc = TI_EQEP_COUNT_FUNC_DOWN_COUNT; -+ break; -+ default: -+ /* should never reach this path */ -+ return -EINVAL; -+ } - - return regmap_write_bits(priv->regmap16, QDECCTL, QDECCTL_QSRC, -- function << QDECCTL_QSRC_SHIFT); -+ qsrc << QDECCTL_QSRC_SHIFT); - } - --static int ti_eqep_action_get(struct counter_device *counter, -- struct counter_count *count, -- struct counter_synapse *synapse, size_t *action) -+static int ti_eqep_action_read(struct counter_device *counter, -+ struct counter_count *count, -+ struct counter_synapse *synapse, -+ enum counter_synapse_action *action) - { - struct ti_eqep_cnt *priv = counter->priv; -- size_t function; -+ enum counter_function function; - u32 qdecctl; - int err; - -- err = ti_eqep_function_get(counter, count, &function); -+ err = ti_eqep_function_read(counter, count, &function); - if (err) - return err; - - switch (function) { -- case TI_EQEP_COUNT_FUNC_QUAD_COUNT: -+ case COUNTER_FUNCTION_QUADRATURE_X4: - /* In quadrature mode, the rising and falling edge of both - * QEPA and QEPB trigger QCLK. - */ -- *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES; -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - return 0; -- case TI_EQEP_COUNT_FUNC_DIR_COUNT: -+ case COUNTER_FUNCTION_PULSE_DIRECTION: - /* In direction-count mode only rising edge of QEPA is counted - * and QEPB gives direction. - */ - switch (synapse->signal->id) { - case TI_EQEP_SIGNAL_QEPA: -- *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE; -+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; - return 0; - case TI_EQEP_SIGNAL_QEPB: -- *action = TI_EQEP_SYNAPSE_ACTION_NONE; -+ *action = COUNTER_SYNAPSE_ACTION_NONE; - return 0; - default: - /* should never reach this path */ - return -EINVAL; - } -- case TI_EQEP_COUNT_FUNC_UP_COUNT: -- case TI_EQEP_COUNT_FUNC_DOWN_COUNT: -+ case COUNTER_FUNCTION_INCREASE: -+ case COUNTER_FUNCTION_DECREASE: - /* In up/down-count modes only QEPA is counted and QEPB is not - * used. - */ -@@ -185,12 +216,12 @@ static int ti_eqep_action_get(struct counter_device *counter, - return err; - - if (qdecctl & QDECCTL_XCR) -- *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES; -+ *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; - else -- *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE; -+ *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; - return 0; - case TI_EQEP_SIGNAL_QEPB: -- *action = TI_EQEP_SYNAPSE_ACTION_NONE; -+ *action = COUNTER_SYNAPSE_ACTION_NONE; - return 0; - default: - /* should never reach this path */ -@@ -205,82 +236,67 @@ static int ti_eqep_action_get(struct counter_device *counter, - static const struct counter_ops ti_eqep_counter_ops = { - .count_read = ti_eqep_count_read, - .count_write = ti_eqep_count_write, -- .function_get = ti_eqep_function_get, -- .function_set = ti_eqep_function_set, -- .action_get = ti_eqep_action_get, -+ .function_read = ti_eqep_function_read, -+ .function_write = ti_eqep_function_write, -+ .action_read = ti_eqep_action_read, - }; - --static ssize_t ti_eqep_position_ceiling_read(struct counter_device *counter, -- struct counter_count *count, -- void *ext_priv, char *buf) -+static int ti_eqep_position_ceiling_read(struct counter_device *counter, -+ struct counter_count *count, -+ u64 *ceiling) - { - struct ti_eqep_cnt *priv = counter->priv; - u32 qposmax; - - regmap_read(priv->regmap32, QPOSMAX, &qposmax); - -- return sprintf(buf, "%u\n", qposmax); -+ *ceiling = qposmax; -+ -+ return 0; - } - --static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter, -- struct counter_count *count, -- void *ext_priv, const char *buf, -- size_t len) -+static int ti_eqep_position_ceiling_write(struct counter_device *counter, -+ struct counter_count *count, -+ u64 ceiling) - { - struct ti_eqep_cnt *priv = counter->priv; -- int err; -- u32 res; - -- err = kstrtouint(buf, 0, &res); -- if (err < 0) -- return err; -+ if (ceiling != (u32)ceiling) -+ return -ERANGE; - -- regmap_write(priv->regmap32, QPOSMAX, res); -+ regmap_write(priv->regmap32, QPOSMAX, ceiling); - -- return len; -+ return 0; - } - --static ssize_t ti_eqep_position_enable_read(struct counter_device *counter, -- struct counter_count *count, -- void *ext_priv, char *buf) -+static int ti_eqep_position_enable_read(struct counter_device *counter, -+ struct counter_count *count, u8 *enable) - { - struct ti_eqep_cnt *priv = counter->priv; - u32 qepctl; - - regmap_read(priv->regmap16, QEPCTL, &qepctl); - -- return sprintf(buf, "%u\n", !!(qepctl & QEPCTL_PHEN)); -+ *enable = !!(qepctl & QEPCTL_PHEN); -+ -+ return 0; - } - --static ssize_t ti_eqep_position_enable_write(struct counter_device *counter, -- struct counter_count *count, -- void *ext_priv, const char *buf, -- size_t len) -+static int ti_eqep_position_enable_write(struct counter_device *counter, -+ struct counter_count *count, u8 enable) - { - struct ti_eqep_cnt *priv = counter->priv; -- int err; -- bool res; -- -- err = kstrtobool(buf, &res); -- if (err < 0) -- return err; - -- regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, res ? -1 : 0); -+ regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, enable ? -1 : 0); - -- return len; -+ return 0; - } - --static struct counter_count_ext ti_eqep_position_ext[] = { -- { -- .name = "ceiling", -- .read = ti_eqep_position_ceiling_read, -- .write = ti_eqep_position_ceiling_write, -- }, -- { -- .name = "enable", -- .read = ti_eqep_position_enable_read, -- .write = ti_eqep_position_enable_write, -- }, -+static struct counter_comp ti_eqep_position_ext[] = { -+ COUNTER_COMP_CEILING(ti_eqep_position_ceiling_read, -+ ti_eqep_position_ceiling_write), -+ COUNTER_COMP_ENABLE(ti_eqep_position_enable_read, -+ ti_eqep_position_enable_write), - }; - - static struct counter_signal ti_eqep_signals[] = { -@@ -295,16 +311,16 @@ static struct counter_signal ti_eqep_signals[] = { - }; - - static const enum counter_function ti_eqep_position_functions[] = { -- [TI_EQEP_COUNT_FUNC_QUAD_COUNT] = COUNTER_FUNCTION_QUADRATURE_X4, -- [TI_EQEP_COUNT_FUNC_DIR_COUNT] = COUNTER_FUNCTION_PULSE_DIRECTION, -- [TI_EQEP_COUNT_FUNC_UP_COUNT] = COUNTER_FUNCTION_INCREASE, -- [TI_EQEP_COUNT_FUNC_DOWN_COUNT] = COUNTER_FUNCTION_DECREASE, -+ COUNTER_FUNCTION_QUADRATURE_X4, -+ COUNTER_FUNCTION_PULSE_DIRECTION, -+ COUNTER_FUNCTION_INCREASE, -+ COUNTER_FUNCTION_DECREASE, - }; - - static const enum counter_synapse_action ti_eqep_position_synapse_actions[] = { -- [TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES, -- [TI_EQEP_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE, -- [TI_EQEP_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE, -+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES, -+ COUNTER_SYNAPSE_ACTION_RISING_EDGE, -+ COUNTER_SYNAPSE_ACTION_NONE, - }; - - static struct counter_synapse ti_eqep_position_synapses[] = { -diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c -index d0b10baf039ab..151771129c7ba 100644 ---- a/drivers/cpufreq/amd_freq_sensitivity.c -+++ b/drivers/cpufreq/amd_freq_sensitivity.c -@@ -124,6 +124,8 @@ static int __init amd_freq_sensitivity_init(void) - if (!pcidev) { - if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK)) - return -ENODEV; -+ } else { -+ pci_dev_put(pcidev); - } - - if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) -diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c -index c10fc33b29b18..b74289a95a171 100644 ---- a/drivers/cpufreq/armada-37xx-cpufreq.c -+++ b/drivers/cpufreq/armada-37xx-cpufreq.c -@@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void) - return -ENODEV; - } - -- clk = clk_get(cpu_dev, 0); -+ clk = clk_get(cpu_dev, NULL); - if (IS_ERR(clk)) { - dev_err(cpu_dev, "Cannot get clock for CPU0\n"); - return PTR_ERR(clk); -diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c -index 4153150e20db5..f644c5e325fb2 100644 ---- a/drivers/cpufreq/brcmstb-avs-cpufreq.c -+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c -@@ -434,7 +434,11 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv) - if (ret) - return ERR_PTR(ret); - -- table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table), -+ /* -+ * We allocate space for the 5 different P-STATES AVS, -+ * plus extra space for a terminating element. -+ */ -+ table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table), - GFP_KERNEL); - if (!table) - return ERR_PTR(-ENOMEM); -diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c -index d4c27022b9c9b..e0ff09d66c96b 100644 ---- a/drivers/cpufreq/cppc_cpufreq.c -+++ b/drivers/cpufreq/cppc_cpufreq.c -@@ -303,52 +303,48 @@ static u64 cppc_get_dmi_max_khz(void) - - /* - * If CPPC lowest_freq and nominal_freq registers are exposed then we can -- * use them to convert perf to freq and vice versa -- * -- * If the perf/freq point lies between Nominal and Lowest, we can treat -- * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line -- * and extrapolate the rest -- * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion -+ * use them to convert perf to freq and vice versa. The conversion is -+ * extrapolated as an affine function passing by the 2 points: -+ * - (Low perf, Low freq) -+ * - (Nominal perf, Nominal perf) - */ - static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, - unsigned int perf) - { - struct cppc_perf_caps *caps = &cpu_data->perf_caps; -+ s64 retval, offset = 0; - static u64 max_khz; - u64 mul, div; - - if (caps->lowest_freq && caps->nominal_freq) { -- if (perf >= caps->nominal_perf) { -- mul = caps->nominal_freq; -- div = caps->nominal_perf; -- } else { -- mul = caps->nominal_freq - caps->lowest_freq; -- div = caps->nominal_perf - caps->lowest_perf; -- } -+ mul = caps->nominal_freq - caps->lowest_freq; -+ div = caps->nominal_perf - caps->lowest_perf; -+ offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div); - } else { - if (!max_khz) - max_khz = cppc_get_dmi_max_khz(); - mul = max_khz; - div = caps->highest_perf; - } -- return (u64)perf * mul / div; -+ -+ retval = offset + div64_u64(perf * mul, div); -+ if (retval >= 0) -+ return retval; -+ return 0; - } - - static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, - unsigned int freq) - { - struct cppc_perf_caps *caps = &cpu_data->perf_caps; -+ s64 retval, offset = 0; - static u64 max_khz; - u64 mul, div; - - if (caps->lowest_freq && caps->nominal_freq) { -- if (freq >= caps->nominal_freq) { -- mul = caps->nominal_perf; -- div = caps->nominal_freq; -- } else { -- mul = caps->lowest_perf; -- div = caps->lowest_freq; -- } -+ mul = caps->nominal_perf - caps->lowest_perf; -+ div = caps->nominal_freq - caps->lowest_freq; -+ offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div); - } else { - if (!max_khz) - max_khz = cppc_get_dmi_max_khz(); -@@ -356,7 +352,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, - div = max_khz; - } - -- return (u64)freq * mul / div; -+ retval = offset + div64_u64(freq * mul, div); -+ if (retval >= 0) -+ return retval; -+ return 0; - } - - static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, -diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c -index ca1d103ec4492..e1b5975c7daa1 100644 ---- a/drivers/cpufreq/cpufreq-dt-platdev.c -+++ b/drivers/cpufreq/cpufreq-dt-platdev.c -@@ -133,6 +133,7 @@ static const struct of_device_id blocklist[] __initconst = { - { .compatible = "nvidia,tegra30", }, - { .compatible = "nvidia,tegra124", }, - { .compatible = "nvidia,tegra210", }, -+ { .compatible = "nvidia,tegra234", }, - - { .compatible = "qcom,apq8096", }, - { .compatible = "qcom,msm8996", }, -@@ -143,6 +144,7 @@ static const struct of_device_id blocklist[] __initconst = { - { .compatible = "qcom,sc8180x", }, - { .compatible = "qcom,sdm845", }, - { .compatible = "qcom,sm6350", }, -+ { .compatible = "qcom,sm6375", }, - { .compatible = "qcom,sm8150", }, - { .compatible = "qcom,sm8250", }, - { .compatible = "qcom,sm8350", }, -diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c -index 5782b15a8caad..c2227be7bad88 100644 ---- a/drivers/cpufreq/cpufreq.c -+++ b/drivers/cpufreq/cpufreq.c -@@ -28,6 +28,7 @@ - #include - #include - #include -+#include - #include - - static LIST_HEAD(cpufreq_policy_list); -@@ -449,8 +450,10 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy, - policy->cur, - policy->cpuinfo.max_freq); - -+ spin_lock(&policy->transition_lock); - policy->transition_ongoing = false; - policy->transition_task = NULL; -+ spin_unlock(&policy->transition_lock); - - wake_up(&policy->transition_wait); - } -@@ -531,7 +534,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy, - - target_freq = clamp_val(target_freq, policy->min, policy->max); - -- if (!cpufreq_driver->target_index) -+ if (!policy->freq_table) - return target_freq; - - idx = cpufreq_frequency_table_target(policy, target_freq, relation); -@@ -1004,10 +1007,9 @@ static struct kobj_type ktype_cpufreq = { - .release = cpufreq_sysfs_release, - }; - --static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) -+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu, -+ struct device *dev) - { -- struct device *dev = get_cpu_device(cpu); -- - if (unlikely(!dev)) - return; - -@@ -1212,6 +1214,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) - if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) - goto err_free_rcpumask; - -+ init_completion(&policy->kobj_unregister); - ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, - cpufreq_global_kobject, "policy%u", cpu); - if (ret) { -@@ -1250,7 +1253,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) - init_rwsem(&policy->rwsem); - spin_lock_init(&policy->transition_lock); - init_waitqueue_head(&policy->transition_wait); -- init_completion(&policy->kobj_unregister); - INIT_WORK(&policy->update, handle_update); - - policy->cpu = cpu; -@@ -1391,7 +1393,7 @@ static int cpufreq_online(unsigned int cpu) - if (new_policy) { - for_each_cpu(j, policy->related_cpus) { - per_cpu(cpufreq_cpu_data, j) = policy; -- add_cpu_dev_symlink(policy, j); -+ add_cpu_dev_symlink(policy, j, get_cpu_device(j)); - } - - policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req), -@@ -1403,7 +1405,7 @@ static int cpufreq_online(unsigned int cpu) - - ret = freq_qos_add_request(&policy->constraints, - policy->min_freq_req, FREQ_QOS_MIN, -- policy->min); -+ FREQ_QOS_MIN_DEFAULT_VALUE); - if (ret < 0) { - /* - * So we don't call freq_qos_remove_request() for an -@@ -1423,7 +1425,7 @@ static int cpufreq_online(unsigned int cpu) - - ret = freq_qos_add_request(&policy->constraints, - policy->max_freq_req, FREQ_QOS_MAX, -- policy->max); -+ FREQ_QOS_MAX_DEFAULT_VALUE); - if (ret < 0) { - policy->max_freq_req = NULL; - goto out_destroy_policy; -@@ -1565,7 +1567,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) - /* Create sysfs link on CPU registration */ - policy = per_cpu(cpufreq_cpu_data, cpu); - if (policy) -- add_cpu_dev_symlink(policy, cpu); -+ add_cpu_dev_symlink(policy, cpu, dev); - - return 0; - } -@@ -1702,6 +1704,16 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b - return new_freq; - - if (policy->cur != new_freq) { -+ /* -+ * For some platforms, the frequency returned by hardware may be -+ * slightly different from what is provided in the frequency -+ * table, for example hardware may return 499 MHz instead of 500 -+ * MHz. In such cases it is better to avoid getting into -+ * unnecessary frequency updates. -+ */ -+ if (abs(policy->cur - new_freq) < KHZ_PER_MHZ) -+ return policy->cur; -+ - cpufreq_out_of_sync(policy, new_freq); - if (update) - schedule_work(&policy->update); -@@ -2523,8 +2535,15 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, - if (ret) - return ret; - -+ /* -+ * Resolve policy min/max to available frequencies. It ensures -+ * no frequency resolution will neither overshoot the requested maximum -+ * nor undershoot the requested minimum. -+ */ - policy->min = new_data.min; - policy->max = new_data.max; -+ policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L); -+ policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); - trace_cpu_frequency_limits(policy); - - policy->cached_target_freq = UINT_MAX; -diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c -index 63f7c219062b9..55c80319d2684 100644 ---- a/drivers/cpufreq/cpufreq_governor.c -+++ b/drivers/cpufreq/cpufreq_governor.c -@@ -388,6 +388,15 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs, - gov->free(policy_dbs); - } - -+static void cpufreq_dbs_data_release(struct kobject *kobj) -+{ -+ struct dbs_data *dbs_data = to_dbs_data(to_gov_attr_set(kobj)); -+ struct dbs_governor *gov = dbs_data->gov; -+ -+ gov->exit(dbs_data); -+ kfree(dbs_data); -+} -+ - int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) - { - struct dbs_governor *gov = dbs_governor_of(policy); -@@ -425,6 +434,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) - goto free_policy_dbs_info; - } - -+ dbs_data->gov = gov; - gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list); - - ret = gov->init(dbs_data); -@@ -447,6 +457,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) - policy->governor_data = policy_dbs; - - gov->kobj_type.sysfs_ops = &governor_sysfs_ops; -+ gov->kobj_type.release = cpufreq_dbs_data_release; - ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type, - get_governor_parent_kobj(policy), - "%s", gov->gov.name); -@@ -488,13 +499,8 @@ void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy) - - policy->governor_data = NULL; - -- if (!count) { -- if (!have_governor_per_policy()) -- gov->gdbs_data = NULL; -- -- gov->exit(dbs_data); -- kfree(dbs_data); -- } -+ if (!count && !have_governor_per_policy()) -+ gov->gdbs_data = NULL; - - free_policy_dbs_info(policy_dbs, gov); - -diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h -index bab8e61403771..a6de26318abb8 100644 ---- a/drivers/cpufreq/cpufreq_governor.h -+++ b/drivers/cpufreq/cpufreq_governor.h -@@ -37,6 +37,7 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; - /* Governor demand based switching data (per-policy or global). */ - struct dbs_data { - struct gov_attr_set attr_set; -+ struct dbs_governor *gov; - void *tuners; - unsigned int ignore_nice_load; - unsigned int sampling_rate; -diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c -index a6f365b9cc1ad..771770ea0ed0b 100644 ---- a/drivers/cpufreq/cpufreq_governor_attr_set.c -+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c -@@ -8,11 +8,6 @@ - - #include "cpufreq_governor.h" - --static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) --{ -- return container_of(kobj, struct gov_attr_set, kobj); --} -- - static inline struct governor_attr *to_gov_attr(struct attribute *attr) - { - return container_of(attr, struct governor_attr, attr); -diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c -index 8c176b7dae415..736cb2cfcbb08 100644 ---- a/drivers/cpufreq/intel_pstate.c -+++ b/drivers/cpufreq/intel_pstate.c -@@ -27,6 +27,7 @@ - #include - #include - -+#include - #include - #include - #include -@@ -277,10 +278,10 @@ static struct cpudata **all_cpu_data; - * structure is used to store those callbacks. - */ - struct pstate_funcs { -- int (*get_max)(void); -- int (*get_max_physical)(void); -- int (*get_min)(void); -- int (*get_turbo)(void); -+ int (*get_max)(int cpu); -+ int (*get_max_physical)(int cpu); -+ int (*get_min)(int cpu); -+ int (*get_turbo)(int cpu); - int (*get_scaling)(void); - int (*get_cpu_scaling)(int cpu); - int (*get_aperf_mperf_shift)(void); -@@ -335,6 +336,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) - - static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); - -+#define CPPC_MAX_PERF U8_MAX -+ - static void intel_pstate_set_itmt_prio(int cpu) - { - struct cppc_perf_caps cppc_perf; -@@ -345,6 +348,14 @@ static void intel_pstate_set_itmt_prio(int cpu) - if (ret) - return; - -+ /* -+ * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. -+ * In this case we can't use CPPC.highest_perf to enable ITMT. -+ * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. -+ */ -+ if (cppc_perf.highest_perf == CPPC_MAX_PERF) -+ cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); -+ - /* - * The priorities can be set regardless of whether or not - * sched_set_itmt_support(true) has been called and it is valid to -@@ -385,16 +396,6 @@ static int intel_pstate_get_cppc_guaranteed(int cpu) - - return cppc_perf.nominal_perf; - } -- --static u32 intel_pstate_cppc_nominal(int cpu) --{ -- u64 nominal_perf; -- -- if (cppc_get_nominal_perf(cpu, &nominal_perf)) -- return 0; -- -- return nominal_perf; --} - #else /* CONFIG_ACPI_CPPC_LIB */ - static inline void intel_pstate_set_itmt_prio(int cpu) - { -@@ -447,20 +448,6 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) - (u32) cpu->acpi_perf_data.states[i].control); - } - -- /* -- * The _PSS table doesn't contain whole turbo frequency range. -- * This just contains +1 MHZ above the max non turbo frequency, -- * with control value corresponding to max turbo ratio. But -- * when cpufreq set policy is called, it will call with this -- * max frequency, which will cause a reduced performance as -- * this driver uses real max turbo frequency as the max -- * frequency. So correct this frequency in _PSS table to -- * correct max turbo frequency based on the turbo state. -- * Also need to convert to MHz as _PSS freq is in MHz. -- */ -- if (!global.turbo_disabled) -- cpu->acpi_perf_data.states[0].core_frequency = -- policy->cpuinfo.max_freq / 1000; - cpu->valid_pss_table = true; - pr_debug("_PPC limits will be enforced\n"); - -@@ -518,34 +505,18 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) - { - int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; - int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; -- int perf_ctl_turbo = pstate_funcs.get_turbo(); -- int turbo_freq = perf_ctl_turbo * perf_ctl_scaling; -+ int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu); - int scaling = cpu->pstate.scaling; - - pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); -- pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max()); - pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); - pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); - pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); - pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); - pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); - -- /* -- * If the product of the HWP performance scaling factor and the HWP_CAP -- * highest performance is greater than the maximum turbo frequency -- * corresponding to the pstate_funcs.get_turbo() return value, the -- * scaling factor is too high, so recompute it to make the HWP_CAP -- * highest performance correspond to the maximum turbo frequency. -- */ -- if (turbo_freq < cpu->pstate.turbo_pstate * scaling) { -- cpu->pstate.turbo_freq = turbo_freq; -- scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); -- cpu->pstate.scaling = scaling; -- -- pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n", -- cpu->cpu, scaling); -- } -- -+ cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling, -+ perf_ctl_scaling); - cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, - perf_ctl_scaling); - -@@ -839,6 +810,8 @@ static ssize_t store_energy_performance_preference( - err = cpufreq_start_governor(policy); - if (!ret) - ret = err; -+ } else { -+ ret = 0; - } - } - -@@ -998,9 +971,22 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) - */ - value &= ~GENMASK_ULL(31, 24); - value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); -- WRITE_ONCE(cpu->hwp_req_cached, value); -+ /* -+ * However, make sure that EPP will be set to "performance" when -+ * the CPU is brought back online again and the "performance" -+ * scaling algorithm is still in effect. -+ */ -+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; - } - -+ /* -+ * Clear the desired perf field in the cached HWP request value to -+ * prevent nonzero desired values from being leaked into the active -+ * mode. -+ */ -+ value &= ~HWP_DESIRED_PERF(~0L); -+ WRITE_ONCE(cpu->hwp_req_cached, value); -+ - value &= ~GENMASK_ULL(31, 0); - min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); - -@@ -1557,7 +1543,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) - cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); - } - --static int atom_get_min_pstate(void) -+static int atom_get_min_pstate(int not_used) - { - u64 value; - -@@ -1565,7 +1551,7 @@ static int atom_get_min_pstate(void) - return (value >> 8) & 0x7F; - } - --static int atom_get_max_pstate(void) -+static int atom_get_max_pstate(int not_used) - { - u64 value; - -@@ -1573,7 +1559,7 @@ static int atom_get_max_pstate(void) - return (value >> 16) & 0x7F; - } - --static int atom_get_turbo_pstate(void) -+static int atom_get_turbo_pstate(int not_used) - { - u64 value; - -@@ -1651,23 +1637,23 @@ static void atom_get_vid(struct cpudata *cpudata) - cpudata->vid.turbo = value & 0x7f; - } - --static int core_get_min_pstate(void) -+static int core_get_min_pstate(int cpu) - { - u64 value; - -- rdmsrl(MSR_PLATFORM_INFO, value); -+ rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); - return (value >> 40) & 0xFF; - } - --static int core_get_max_pstate_physical(void) -+static int core_get_max_pstate_physical(int cpu) - { - u64 value; - -- rdmsrl(MSR_PLATFORM_INFO, value); -+ rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); - return (value >> 8) & 0xFF; - } - --static int core_get_tdp_ratio(u64 plat_info) -+static int core_get_tdp_ratio(int cpu, u64 plat_info) - { - /* Check how many TDP levels present */ - if (plat_info & 0x600000000) { -@@ -1677,13 +1663,13 @@ static int core_get_tdp_ratio(u64 plat_info) - int err; - - /* Get the TDP level (0, 1, 2) to get ratios */ -- err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); -+ err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); - if (err) - return err; - - /* TDP MSR are continuous starting at 0x648 */ - tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); -- err = rdmsrl_safe(tdp_msr, &tdp_ratio); -+ err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); - if (err) - return err; - -@@ -1700,7 +1686,7 @@ static int core_get_tdp_ratio(u64 plat_info) - return -ENXIO; - } - --static int core_get_max_pstate(void) -+static int core_get_max_pstate(int cpu) - { - u64 tar; - u64 plat_info; -@@ -1708,10 +1694,10 @@ static int core_get_max_pstate(void) - int tdp_ratio; - int err; - -- rdmsrl(MSR_PLATFORM_INFO, plat_info); -+ rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); - max_pstate = (plat_info >> 8) & 0xFF; - -- tdp_ratio = core_get_tdp_ratio(plat_info); -+ tdp_ratio = core_get_tdp_ratio(cpu, plat_info); - if (tdp_ratio <= 0) - return max_pstate; - -@@ -1720,7 +1706,7 @@ static int core_get_max_pstate(void) - return tdp_ratio; - } - -- err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); -+ err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); - if (!err) { - int tar_levels; - -@@ -1735,13 +1721,13 @@ static int core_get_max_pstate(void) - return max_pstate; - } - --static int core_get_turbo_pstate(void) -+static int core_get_turbo_pstate(int cpu) - { - u64 value; - int nont, ret; - -- rdmsrl(MSR_TURBO_RATIO_LIMIT, value); -- nont = core_get_max_pstate(); -+ rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); -+ nont = core_get_max_pstate(cpu); - ret = (value) & 255; - if (ret <= nont) - ret = nont; -@@ -1769,50 +1755,37 @@ static int knl_get_aperf_mperf_shift(void) - return 10; - } - --static int knl_get_turbo_pstate(void) -+static int knl_get_turbo_pstate(int cpu) - { - u64 value; - int nont, ret; - -- rdmsrl(MSR_TURBO_RATIO_LIMIT, value); -- nont = core_get_max_pstate(); -+ rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); -+ nont = core_get_max_pstate(cpu); - ret = (((value) >> 8) & 0xFF); - if (ret <= nont) - ret = nont; - return ret; - } - --#ifdef CONFIG_ACPI_CPPC_LIB --static u32 hybrid_ref_perf; -- --static int hybrid_get_cpu_scaling(int cpu) -+static void hybrid_get_type(void *data) - { -- return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf, -- intel_pstate_cppc_nominal(cpu)); -+ u8 *cpu_type = data; -+ -+ *cpu_type = get_this_hybrid_cpu_type(); - } - --static void intel_pstate_cppc_set_cpu_scaling(void) -+static int hybrid_get_cpu_scaling(int cpu) - { -- u32 min_nominal_perf = U32_MAX; -- int cpu; -- -- for_each_present_cpu(cpu) { -- u32 nominal_perf = intel_pstate_cppc_nominal(cpu); -+ u8 cpu_type = 0; - -- if (nominal_perf && nominal_perf < min_nominal_perf) -- min_nominal_perf = nominal_perf; -- } -+ smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1); -+ /* P-cores have a smaller perf level-to-freqency scaling factor. */ -+ if (cpu_type == 0x40) -+ return 78741; - -- if (min_nominal_perf < U32_MAX) { -- hybrid_ref_perf = min_nominal_perf; -- pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; -- } --} --#else --static inline void intel_pstate_cppc_set_cpu_scaling(void) --{ -+ return core_get_scaling(); - } --#endif /* CONFIG_ACPI_CPPC_LIB */ - - static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) - { -@@ -1842,10 +1815,10 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu) - - static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) - { -- int perf_ctl_max_phys = pstate_funcs.get_max_physical(); -+ int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); - int perf_ctl_scaling = pstate_funcs.get_scaling(); - -- cpu->pstate.min_pstate = pstate_funcs.get_min(); -+ cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu); - cpu->pstate.max_pstate_physical = perf_ctl_max_phys; - cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; - -@@ -1861,8 +1834,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) - } - } else { - cpu->pstate.scaling = perf_ctl_scaling; -- cpu->pstate.max_pstate = pstate_funcs.get_max(); -- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); -+ cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); -+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu); - } - - if (cpu->pstate.scaling == perf_ctl_scaling) { -@@ -2233,6 +2206,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { - X86_MATCH(SKYLAKE_X, core_funcs), - X86_MATCH(COMETLAKE, core_funcs), - X86_MATCH(ICELAKE_X, core_funcs), -+ X86_MATCH(TIGERLAKE, core_funcs), - {} - }; - MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); -@@ -2241,6 +2215,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { - X86_MATCH(BROADWELL_D, core_funcs), - X86_MATCH(BROADWELL_X, core_funcs), - X86_MATCH(SKYLAKE_X, core_funcs), -+ X86_MATCH(ICELAKE_X, core_funcs), - {} - }; - -@@ -2902,6 +2877,27 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) - return intel_pstate_cpu_exit(policy); - } - -+static int intel_cpufreq_suspend(struct cpufreq_policy *policy) -+{ -+ intel_pstate_suspend(policy); -+ -+ if (hwp_active) { -+ struct cpudata *cpu = all_cpu_data[policy->cpu]; -+ u64 value = READ_ONCE(cpu->hwp_req_cached); -+ -+ /* -+ * Clear the desired perf field in MSR_HWP_REQUEST in case -+ * intel_cpufreq_adjust_perf() is in use and the last value -+ * written by it may not be suitable. -+ */ -+ value &= ~HWP_DESIRED_PERF(~0L); -+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); -+ WRITE_ONCE(cpu->hwp_req_cached, value); -+ } -+ -+ return 0; -+} -+ - static struct cpufreq_driver intel_cpufreq = { - .flags = CPUFREQ_CONST_LOOPS, - .verify = intel_cpufreq_verify_policy, -@@ -2911,7 +2907,7 @@ static struct cpufreq_driver intel_cpufreq = { - .exit = intel_cpufreq_cpu_exit, - .offline = intel_cpufreq_cpu_offline, - .online = intel_pstate_cpu_online, -- .suspend = intel_pstate_suspend, -+ .suspend = intel_cpufreq_suspend, - .resume = intel_pstate_resume, - .update_limits = intel_pstate_update_limits, - .name = "intel_cpufreq", -@@ -3016,9 +3012,9 @@ static unsigned int force_load __initdata; - - static int __init intel_pstate_msrs_not_valid(void) - { -- if (!pstate_funcs.get_max() || -- !pstate_funcs.get_min() || -- !pstate_funcs.get_turbo()) -+ if (!pstate_funcs.get_max(0) || -+ !pstate_funcs.get_min(0) || -+ !pstate_funcs.get_turbo(0)) - return -ENODEV; - - return 0; -@@ -3234,7 +3230,7 @@ static int __init intel_pstate_init(void) - default_driver = &intel_pstate; - - if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) -- intel_pstate_cppc_set_cpu_scaling(); -+ pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; - - goto hwp_cpu_matched; - } -diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c -index 866163883b48d..bfe240c726e34 100644 ---- a/drivers/cpufreq/mediatek-cpufreq.c -+++ b/drivers/cpufreq/mediatek-cpufreq.c -@@ -44,6 +44,8 @@ struct mtk_cpu_dvfs_info { - bool need_voltage_tracking; - }; - -+static struct platform_device *cpufreq_pdev; -+ - static LIST_HEAD(dvfs_info_list); - - static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu) -@@ -547,7 +549,6 @@ static int __init mtk_cpufreq_driver_init(void) - { - struct device_node *np; - const struct of_device_id *match; -- struct platform_device *pdev; - int err; - - np = of_find_node_by_path("/"); -@@ -571,16 +572,23 @@ static int __init mtk_cpufreq_driver_init(void) - * and the device registration codes are put here to handle defer - * probing. - */ -- pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0); -- if (IS_ERR(pdev)) { -+ cpufreq_pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0); -+ if (IS_ERR(cpufreq_pdev)) { - pr_err("failed to register mtk-cpufreq platform device\n"); - platform_driver_unregister(&mtk_cpufreq_platdrv); -- return PTR_ERR(pdev); -+ return PTR_ERR(cpufreq_pdev); - } - - return 0; - } --device_initcall(mtk_cpufreq_driver_init); -+module_init(mtk_cpufreq_driver_init) -+ -+static void __exit mtk_cpufreq_driver_exit(void) -+{ -+ platform_device_unregister(cpufreq_pdev); -+ platform_driver_unregister(&mtk_cpufreq_platdrv); -+} -+module_exit(mtk_cpufreq_driver_exit) - - MODULE_DESCRIPTION("MediaTek CPUFreq driver"); - MODULE_AUTHOR("Pi-Cheng Chen "); -diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c -index 4f20c6a9108df..8e41fe9ee870d 100644 ---- a/drivers/cpufreq/pmac32-cpufreq.c -+++ b/drivers/cpufreq/pmac32-cpufreq.c -@@ -470,6 +470,10 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) - if (slew_done_gpio_np) - slew_done_gpio = read_gpio(slew_done_gpio_np); - -+ of_node_put(volt_gpio_np); -+ of_node_put(freq_gpio_np); -+ of_node_put(slew_done_gpio_np); -+ - /* If we use the frequency GPIOs, calculate the min/max speeds based - * on the bus frequencies - */ -diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c -index 12ab4014af712..94fe0e15623e4 100644 ---- a/drivers/cpufreq/powernow-k8.c -+++ b/drivers/cpufreq/powernow-k8.c -@@ -1101,7 +1101,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol) - - kfree(data->powernow_table); - kfree(data); -- for_each_cpu(cpu, pol->cpus) -+ /* pol->cpus will be empty here, use related_cpus instead. */ -+ for_each_cpu(cpu, pol->related_cpus) - per_cpu(powernow_data, cpu) = NULL; - - return 0; -diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c -index a2be0df7e1747..bbcba2c38e853 100644 ---- a/drivers/cpufreq/qcom-cpufreq-hw.c -+++ b/drivers/cpufreq/qcom-cpufreq-hw.c -@@ -24,12 +24,16 @@ - #define CLK_HW_DIV 2 - #define LUT_TURBO_IND 1 - -+#define GT_IRQ_STATUS BIT(2) -+ - #define HZ_PER_KHZ 1000 - - struct qcom_cpufreq_soc_data { - u32 reg_enable; -+ u32 reg_domain_state; - u32 reg_freq_lut; - u32 reg_volt_lut; -+ u32 reg_intr_clr; - u32 reg_current_vote; - u32 reg_perf_state; - u8 lut_row_size; -@@ -173,6 +177,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, - } - } else if (ret != -ENODEV) { - dev_err(cpu_dev, "Invalid opp table in device tree\n"); -+ kfree(table); - return ret; - } else { - policy->fast_switch_possible = true; -@@ -266,28 +271,31 @@ static void qcom_get_related_cpus(int index, struct cpumask *m) - } - } - --static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) -+static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) - { -- unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote); -+ unsigned int lval; -+ -+ if (data->soc_data->reg_current_vote) -+ lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff; -+ else -+ lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff; - -- return (val & 0x3FF) * 19200; -+ return lval * xo_rate; - } - - static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) - { - unsigned long max_capacity, capacity, freq_hz, throttled_freq; - struct cpufreq_policy *policy = data->policy; -- int cpu = cpumask_first(policy->cpus); -+ int cpu = cpumask_first(policy->related_cpus); - struct device *dev = get_cpu_device(cpu); - struct dev_pm_opp *opp; -- unsigned int freq; - - /* - * Get the h/w throttled frequency, normalize it using the - * registered opp table and use it to calculate thermal pressure. - */ -- freq = qcom_lmh_get_throttle_freq(data); -- freq_hz = freq * HZ_PER_KHZ; -+ freq_hz = qcom_lmh_get_throttle_freq(data); - - opp = dev_pm_opp_find_freq_floor(dev, &freq_hz); - if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE) -@@ -304,7 +312,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) - if (capacity > max_capacity) - capacity = max_capacity; - -- arch_set_thermal_pressure(policy->cpus, max_capacity - capacity); -+ arch_set_thermal_pressure(policy->related_cpus, -+ max_capacity - capacity); - - /* - * In the unlikely case policy is unregistered do not enable -@@ -342,9 +351,13 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data) - - /* Disable interrupt and enable polling */ - disable_irq_nosync(c_data->throttle_irq); -- qcom_lmh_dcvs_notify(c_data); -+ schedule_delayed_work(&c_data->throttle_work, 0); - -- return 0; -+ if (c_data->soc_data->reg_intr_clr) -+ writel_relaxed(GT_IRQ_STATUS, -+ c_data->base + c_data->soc_data->reg_intr_clr); -+ -+ return IRQ_HANDLED; - } - - static const struct qcom_cpufreq_soc_data qcom_soc_data = { -@@ -358,8 +371,10 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = { - - static const struct qcom_cpufreq_soc_data epss_soc_data = { - .reg_enable = 0x0, -+ .reg_domain_state = 0x20, - .reg_freq_lut = 0x100, - .reg_volt_lut = 0x200, -+ .reg_intr_clr = 0x308, - .reg_perf_state = 0x320, - .lut_row_size = 4, - }; -diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c -index d1744b5d96190..6e011e8bfb6a9 100644 ---- a/drivers/cpufreq/qcom-cpufreq-nvmem.c -+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c -@@ -130,7 +130,7 @@ static void get_krait_bin_format_b(struct device *cpu_dev, - } - - /* Check PVS_BLOW_STATUS */ -- pte_efuse = *(((u32 *)buf) + 4); -+ pte_efuse = *(((u32 *)buf) + 1); - pte_efuse &= BIT(21); - if (pte_efuse) { - dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs); -@@ -215,6 +215,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, - int speed = 0, pvs = 0, pvs_ver = 0; - u8 *speedbin; - size_t len; -+ int ret = 0; - - speedbin = nvmem_cell_read(speedbin_nvmem, &len); - -@@ -232,7 +233,8 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, - break; - default: - dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n"); -- return -ENODEV; -+ ret = -ENODEV; -+ goto len_error; - } - - snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d", -@@ -240,8 +242,9 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, - - drv->versions = (1 << speed); - -+len_error: - kfree(speedbin); -- return 0; -+ return ret; - } - - static const struct qcom_cpufreq_match_data match_data_kryo = { -@@ -264,7 +267,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) - struct nvmem_cell *speedbin_nvmem; - struct device_node *np; - struct device *cpu_dev; -- char *pvs_name = "speedXX-pvsXX-vXX"; -+ char pvs_name_buffer[] = "speedXX-pvsXX-vXX"; -+ char *pvs_name = pvs_name_buffer; - unsigned cpu; - const struct of_device_id *match; - int ret; -diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c -index 6b6b20da2bcfc..573b417e14833 100644 ---- a/drivers/cpufreq/qoriq-cpufreq.c -+++ b/drivers/cpufreq/qoriq-cpufreq.c -@@ -275,6 +275,7 @@ static int qoriq_cpufreq_probe(struct platform_device *pdev) - - np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist); - if (np) { -+ of_node_put(np); - dev_info(&pdev->dev, "Disabling due to erratum A-008083"); - return -ENODEV; - } -diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c -index 2deed8d8773fa..75e1bf3a08f7c 100644 ---- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c -+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c -@@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev) - return -ENOMEM; - - ret = sun50i_cpufreq_get_efuse(&speed); -- if (ret) -+ if (ret) { -+ kfree(opp_tables); - return ret; -+ } - - snprintf(name, MAX_NAME_LEN, "speed%d", speed); - -diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c -index ff2c3f8e4668a..ce5c415fb04d9 100644 ---- a/drivers/cpuidle/cpuidle-psci-domain.c -+++ b/drivers/cpuidle/cpuidle-psci-domain.c -@@ -182,7 +182,8 @@ static void psci_pd_remove(void) - struct psci_pd_provider *pd_provider, *it; - struct generic_pm_domain *genpd; - -- list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) { -+ list_for_each_entry_safe_reverse(pd_provider, it, -+ &psci_pd_providers, link) { - of_genpd_del_provider(pd_provider->node); - - genpd = of_genpd_remove_last(pd_provider->node); -diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c -index b51b5df084500..540105ca0781f 100644 ---- a/drivers/cpuidle/cpuidle-psci.c -+++ b/drivers/cpuidle/cpuidle-psci.c -@@ -23,6 +23,7 @@ - #include - #include - #include -+#include - - #include - -@@ -131,6 +132,49 @@ static int psci_idle_cpuhp_down(unsigned int cpu) - return 0; - } - -+static void psci_idle_syscore_switch(bool suspend) -+{ -+ bool cleared = false; -+ struct device *dev; -+ int cpu; -+ -+ for_each_possible_cpu(cpu) { -+ dev = per_cpu_ptr(&psci_cpuidle_data, cpu)->dev; -+ -+ if (dev && suspend) { -+ dev_pm_genpd_suspend(dev); -+ } else if (dev) { -+ dev_pm_genpd_resume(dev); -+ -+ /* Account for userspace having offlined a CPU. */ -+ if (pm_runtime_status_suspended(dev)) -+ pm_runtime_set_active(dev); -+ -+ /* Clear domain state to re-start fresh. */ -+ if (!cleared) { -+ psci_set_domain_state(0); -+ cleared = true; -+ } -+ } -+ } -+} -+ -+static int psci_idle_syscore_suspend(void) -+{ -+ psci_idle_syscore_switch(true); -+ return 0; -+} -+ -+static void psci_idle_syscore_resume(void) -+{ -+ psci_idle_syscore_switch(false); -+} -+ -+static struct syscore_ops psci_idle_syscore_ops = { -+ .suspend = psci_idle_syscore_suspend, -+ .resume = psci_idle_syscore_resume, -+}; -+ - static void psci_idle_init_cpuhp(void) - { - int err; -@@ -138,6 +182,8 @@ static void psci_idle_init_cpuhp(void) - if (!psci_cpuidle_use_cpuhp) - return; - -+ register_syscore_ops(&psci_idle_syscore_ops); -+ - err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, - "cpuidle/psci:online", - psci_idle_cpuhp_up, -diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c -index 7e7ab5597d7ac..0590001db6532 100644 ---- a/drivers/cpuidle/cpuidle-pseries.c -+++ b/drivers/cpuidle/cpuidle-pseries.c -@@ -410,13 +410,7 @@ static int __init pseries_idle_probe(void) - return -ENODEV; - - if (firmware_has_feature(FW_FEATURE_SPLPAR)) { -- /* -- * Use local_paca instead of get_lppaca() since -- * preemption is not disabled, and it is not required in -- * fact, since lppaca_ptr does not need to be the value -- * associated to the current CPU, it can be from any CPU. -- */ -- if (lppaca_shared_proc(local_paca->lppaca_ptr)) { -+ if (lppaca_shared_proc()) { - cpuidle_state_table = shared_states; - max_idle_state = ARRAY_SIZE(shared_states); - } else { -diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c -index 252f2a9686a62..448bc796b0b40 100644 ---- a/drivers/cpuidle/dt_idle_states.c -+++ b/drivers/cpuidle/dt_idle_states.c -@@ -223,6 +223,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, - * also be 0 on platforms with missing DT idle states or legacy DT - * configuration predating the DT idle states bindings. - */ -- return i; -+ return state_idx - start_idx; - } - EXPORT_SYMBOL_GPL(dt_init_idle_driver); -diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c -index 53ec9585ccd44..469e18547d06c 100644 ---- a/drivers/cpuidle/sysfs.c -+++ b/drivers/cpuidle/sysfs.c -@@ -488,6 +488,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) - &kdev->kobj, "state%d", i); - if (ret) { - kobject_put(&kobj->kobj); -+ kfree(kobj); - goto error_state; - } - cpuidle_add_s2idle_attr_group(kobj); -@@ -619,6 +620,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) - &kdev->kobj, "driver"); - if (ret) { - kobject_put(&kdrv->kobj); -+ kfree(kdrv); - return ret; - } - -@@ -705,7 +707,6 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) - if (!kdev) - return -ENOMEM; - kdev->dev = dev; -- dev->kobj_dev = kdev; - - init_completion(&kdev->kobj_unregister); - -@@ -713,9 +714,11 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) - "cpuidle"); - if (error) { - kobject_put(&kdev->kobj); -+ kfree(kdev); - return error; - } - -+ dev->kobj_dev = kdev; - kobject_uevent(&kdev->kobj, KOBJ_ADD); - - return 0; -diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig -index 51690e73153ad..a40883e118424 100644 ---- a/drivers/crypto/Kconfig -+++ b/drivers/crypto/Kconfig -@@ -772,7 +772,12 @@ config CRYPTO_DEV_IMGTEC_HASH - config CRYPTO_DEV_ROCKCHIP - tristate "Rockchip's Cryptographic Engine driver" - depends on OF && ARCH_ROCKCHIP -+ depends on PM -+ select CRYPTO_ECB -+ select CRYPTO_CBC -+ select CRYPTO_DES - select CRYPTO_AES -+ select CRYPTO_ENGINE - select CRYPTO_LIB_DES - select CRYPTO_MD5 - select CRYPTO_SHA1 -@@ -900,6 +905,7 @@ config CRYPTO_DEV_SA2UL - select CRYPTO_AES_ARM64 - select CRYPTO_ALGAPI - select CRYPTO_AUTHENC -+ select CRYPTO_DES - select CRYPTO_SHA1 - select CRYPTO_SHA256 - select CRYPTO_SHA512 -diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c -index 54ae8d16e4931..35e3cadccac2b 100644 ---- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c -+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c -@@ -11,6 +11,7 @@ - * You could find a link for the datasheet in Documentation/arm/sunxi.rst - */ - -+#include - #include - #include - #include -@@ -283,7 +284,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) - - flow = rctx->flow; - err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); -+ local_bh_disable(); - crypto_finalize_skcipher_request(engine, breq, err); -+ local_bh_enable(); - return 0; - } - -diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c -index 88194718a806c..859b7522faaac 100644 ---- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c -+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c -@@ -9,6 +9,7 @@ - * - * You could find the datasheet in Documentation/arm/sunxi.rst - */ -+#include - #include - #include - #include -@@ -414,6 +415,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) - theend: - kfree(buf); - kfree(result); -+ local_bh_disable(); - crypto_finalize_hash_request(engine, breq, err); -+ local_bh_enable(); - return 0; - } -diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c -index 9ef1c85c4aaa5..0cc8cafdde27c 100644 ---- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c -+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c -@@ -11,6 +11,7 @@ - * You could find a link for the datasheet in Documentation/arm/sunxi.rst - */ - -+#include - #include - #include - #include -@@ -92,6 +93,69 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) - return err; - } - -+static int sun8i_ss_setup_ivs(struct skcipher_request *areq) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); -+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); -+ struct sun8i_ss_dev *ss = op->ss; -+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); -+ struct scatterlist *sg = areq->src; -+ unsigned int todo, offset; -+ unsigned int len = areq->cryptlen; -+ unsigned int ivsize = crypto_skcipher_ivsize(tfm); -+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; -+ int i = 0; -+ dma_addr_t a; -+ int err; -+ -+ rctx->ivlen = ivsize; -+ if (rctx->op_dir & SS_DECRYPTION) { -+ offset = areq->cryptlen - ivsize; -+ scatterwalk_map_and_copy(sf->biv, areq->src, offset, -+ ivsize, 0); -+ } -+ -+ /* we need to copy all IVs from source in case DMA is bi-directionnal */ -+ while (sg && len) { -+ if (sg_dma_len(sg) == 0) { -+ sg = sg_next(sg); -+ continue; -+ } -+ if (i == 0) -+ memcpy(sf->iv[0], areq->iv, ivsize); -+ a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE); -+ if (dma_mapping_error(ss->dev, a)) { -+ memzero_explicit(sf->iv[i], ivsize); -+ dev_err(ss->dev, "Cannot DMA MAP IV\n"); -+ err = -EFAULT; -+ goto dma_iv_error; -+ } -+ rctx->p_iv[i] = a; -+ /* we need to setup all others IVs only in the decrypt way */ -+ if (rctx->op_dir == SS_ENCRYPTION) -+ return 0; -+ todo = min(len, sg_dma_len(sg)); -+ len -= todo; -+ i++; -+ if (i < MAX_SG) { -+ offset = sg->length - ivsize; -+ scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0); -+ } -+ rctx->niv = i; -+ sg = sg_next(sg); -+ } -+ -+ return 0; -+dma_iv_error: -+ i--; -+ while (i >= 0) { -+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); -+ memzero_explicit(sf->iv[i], ivsize); -+ i--; -+ } -+ return err; -+} -+ - static int sun8i_ss_cipher(struct skcipher_request *areq) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); -@@ -100,9 +164,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) - struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct sun8i_ss_alg_template *algt; -+ struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; - struct scatterlist *sg; - unsigned int todo, len, offset, ivsize; -- void *backup_iv = NULL; - int nr_sgs = 0; - int nr_sgd = 0; - int err = 0; -@@ -133,30 +197,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) - - ivsize = crypto_skcipher_ivsize(tfm); - if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { -- rctx->ivlen = ivsize; -- rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); -- if (!rctx->biv) { -- err = -ENOMEM; -+ err = sun8i_ss_setup_ivs(areq); -+ if (err) - goto theend_key; -- } -- if (rctx->op_dir & SS_DECRYPTION) { -- backup_iv = kzalloc(ivsize, GFP_KERNEL); -- if (!backup_iv) { -- err = -ENOMEM; -- goto theend_key; -- } -- offset = areq->cryptlen - ivsize; -- scatterwalk_map_and_copy(backup_iv, areq->src, offset, -- ivsize, 0); -- } -- memcpy(rctx->biv, areq->iv, ivsize); -- rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen, -- DMA_TO_DEVICE); -- if (dma_mapping_error(ss->dev, rctx->p_iv)) { -- dev_err(ss->dev, "Cannot DMA MAP IV\n"); -- err = -ENOMEM; -- goto theend_iv; -- } - } - if (areq->src == areq->dst) { - nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), -@@ -242,21 +285,19 @@ theend_sgs: - } - - theend_iv: -- if (rctx->p_iv) -- dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen, -- DMA_TO_DEVICE); -- - if (areq->iv && ivsize > 0) { -- if (rctx->biv) { -- offset = areq->cryptlen - ivsize; -- if (rctx->op_dir & SS_DECRYPTION) { -- memcpy(areq->iv, backup_iv, ivsize); -- kfree_sensitive(backup_iv); -- } else { -- scatterwalk_map_and_copy(areq->iv, areq->dst, offset, -- ivsize, 0); -- } -- kfree(rctx->biv); -+ for (i = 0; i < rctx->niv; i++) { -+ dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); -+ memzero_explicit(sf->iv[i], ivsize); -+ } -+ -+ offset = areq->cryptlen - ivsize; -+ if (rctx->op_dir & SS_DECRYPTION) { -+ memcpy(areq->iv, sf->biv, ivsize); -+ memzero_explicit(sf->biv, ivsize); -+ } else { -+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset, -+ ivsize, 0); - } - } - -@@ -274,7 +315,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar - struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); - - err = sun8i_ss_cipher(breq); -+ local_bh_disable(); - crypto_finalize_skcipher_request(engine, breq, err); -+ local_bh_enable(); - - return 0; - } -diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c -index 80e89066dbd1a..47b5828e35c34 100644 ---- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c -+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c -@@ -30,6 +30,8 @@ - static const struct ss_variant ss_a80_variant = { - .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, - }, -+ .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, -+ }, - .op_mode = { SS_OP_ECB, SS_OP_CBC, - }, - .ss_clks = { -@@ -64,6 +66,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx - const char *name) - { - int flow = rctx->flow; -+ unsigned int ivlen = rctx->ivlen; - u32 v = SS_START; - int i; - -@@ -102,15 +105,14 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx - mutex_lock(&ss->mlock); - writel(rctx->p_key, ss->base + SS_KEY_ADR_REG); - -- if (i == 0) { -- if (rctx->p_iv) -- writel(rctx->p_iv, ss->base + SS_IV_ADR_REG); -- } else { -- if (rctx->biv) { -- if (rctx->op_dir == SS_ENCRYPTION) -- writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); -+ if (ivlen) { -+ if (rctx->op_dir == SS_ENCRYPTION) { -+ if (i == 0) -+ writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG); - else -- writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); -+ writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG); -+ } else { -+ writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG); - } - } - -@@ -462,7 +464,7 @@ static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) - */ - static int allocate_flows(struct sun8i_ss_dev *ss) - { -- int i, err; -+ int i, j, err; - - ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), - GFP_KERNEL); -@@ -472,6 +474,36 @@ static int allocate_flows(struct sun8i_ss_dev *ss) - for (i = 0; i < MAXFLOW; i++) { - init_completion(&ss->flows[i].complete); - -+ ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, -+ GFP_KERNEL | GFP_DMA); -+ if (!ss->flows[i].biv) { -+ err = -ENOMEM; -+ goto error_engine; -+ } -+ -+ for (j = 0; j < MAX_SG; j++) { -+ ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, -+ GFP_KERNEL | GFP_DMA); -+ if (!ss->flows[i].iv[j]) { -+ err = -ENOMEM; -+ goto error_engine; -+ } -+ } -+ -+ /* the padding could be up to two block. */ -+ ss->flows[i].pad = devm_kmalloc(ss->dev, SHA256_BLOCK_SIZE * 2, -+ GFP_KERNEL | GFP_DMA); -+ if (!ss->flows[i].pad) { -+ err = -ENOMEM; -+ goto error_engine; -+ } -+ ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE, -+ GFP_KERNEL | GFP_DMA); -+ if (!ss->flows[i].result) { -+ err = -ENOMEM; -+ goto error_engine; -+ } -+ - ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); - if (!ss->flows[i].engine) { - dev_err(ss->dev, "Cannot allocate engine\n"); -diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c -index 3c073eb3db038..f89a580618aaa 100644 ---- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c -+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c -@@ -9,6 +9,7 @@ - * - * You could find the datasheet in Documentation/arm/sunxi.rst - */ -+#include - #include - #include - #include -@@ -341,18 +342,11 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) - if (digestsize == SHA224_DIGEST_SIZE) - digestsize = SHA256_DIGEST_SIZE; - -- /* the padding could be up to two block. */ -- pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA); -- if (!pad) -- return -ENOMEM; -+ result = ss->flows[rctx->flow].result; -+ pad = ss->flows[rctx->flow].pad; -+ memset(pad, 0, algt->alg.hash.halg.base.cra_blocksize * 2); - bf = (__le32 *)pad; - -- result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); -- if (!result) { -- kfree(pad); -- return -ENOMEM; -- } -- - for (i = 0; i < MAX_SG; i++) { - rctx->t_dst[i].addr = 0; - rctx->t_dst[i].len = 0; -@@ -379,13 +373,21 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) - } - - len = areq->nbytes; -- for_each_sg(areq->src, sg, nr_sgs, i) { -+ sg = areq->src; -+ i = 0; -+ while (len > 0 && sg) { -+ if (sg_dma_len(sg) == 0) { -+ sg = sg_next(sg); -+ continue; -+ } - rctx->t_src[i].addr = sg_dma_address(sg); - todo = min(len, sg_dma_len(sg)); - rctx->t_src[i].len = todo / 4; - len -= todo; - rctx->t_dst[i].addr = addr_res; - rctx->t_dst[i].len = digestsize / 4; -+ sg = sg_next(sg); -+ i++; - } - if (len > 0) { - dev_err(ss->dev, "remaining len %d\n", len); -@@ -440,8 +442,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) - - memcpy(areq->result, result, algt->alg.hash.halg.digestsize); - theend: -- kfree(pad); -- kfree(result); -+ local_bh_disable(); - crypto_finalize_hash_request(engine, breq, err); -+ local_bh_enable(); - return 0; - } -diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h -index 28188685b9100..eb82ee5345ae1 100644 ---- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h -+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h -@@ -121,11 +121,19 @@ struct sginfo { - * @complete: completion for the current task on this flow - * @status: set to 1 by interrupt if task is done - * @stat_req: number of request done by this flow -+ * @iv: list of IV to use for each step -+ * @biv: buffer which contain the backuped IV -+ * @pad: padding buffer for hash operations -+ * @result: buffer for storing the result of hash operations - */ - struct sun8i_ss_flow { - struct crypto_engine *engine; - struct completion complete; - int status; -+ u8 *iv[MAX_SG]; -+ u8 *biv; -+ void *pad; -+ void *result; - #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - unsigned long stat_req; - #endif -@@ -164,28 +172,28 @@ struct sun8i_ss_dev { - * @t_src: list of mapped SGs with their size - * @t_dst: list of mapped SGs with their size - * @p_key: DMA address of the key -- * @p_iv: DMA address of the IV -+ * @p_iv: DMA address of the IVs -+ * @niv: Number of IVs DMA mapped - * @method: current algorithm for this request - * @op_mode: op_mode for this request - * @op_dir: direction (encrypt vs decrypt) for this request - * @flow: the flow to use for this request -- * @ivlen: size of biv -+ * @ivlen: size of IVs - * @keylen: keylen for this request -- * @biv: buffer which contain the IV - * @fallback_req: request struct for invoking the fallback skcipher TFM - */ - struct sun8i_cipher_req_ctx { - struct sginfo t_src[MAX_SG]; - struct sginfo t_dst[MAX_SG]; - u32 p_key; -- u32 p_iv; -+ u32 p_iv[MAX_SG]; -+ int niv; - u32 method; - u32 op_mode; - u32 op_dir; - int flow; - unsigned int ivlen; - unsigned int keylen; -- void *biv; - struct skcipher_request fallback_req; // keep at the end - }; - -diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c -index 8278d98074e9a..e1556a3582a30 100644 ---- a/drivers/crypto/amcc/crypto4xx_core.c -+++ b/drivers/crypto/amcc/crypto4xx_core.c -@@ -522,7 +522,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev, - { - struct skcipher_request *req; - struct scatterlist *dst; -- dma_addr_t addr; - - req = skcipher_request_cast(pd_uinfo->async_req); - -@@ -531,8 +530,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev, - req->cryptlen, req->dst); - } else { - dst = pd_uinfo->dest_va; -- addr = dma_map_page(dev->core_dev->device, sg_page(dst), -- dst->offset, dst->length, DMA_FROM_DEVICE); -+ dma_unmap_page(dev->core_dev->device, pd->dest, dst->length, -+ DMA_FROM_DEVICE); - } - - if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) { -@@ -557,10 +556,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev, - struct ahash_request *ahash_req; - - ahash_req = ahash_request_cast(pd_uinfo->async_req); -- ctx = crypto_tfm_ctx(ahash_req->base.tfm); -+ ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req)); - -- crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, -- crypto_tfm_ctx(ahash_req->base.tfm)); -+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx); - crypto4xx_ret_sg_desc(dev, pd_uinfo); - - if (pd_uinfo->state & PD_ENTRY_BUSY) -diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c -index c6865cbd334b2..e79514fce731f 100644 ---- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c -+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c -@@ -265,7 +265,9 @@ static int meson_handle_cipher_request(struct crypto_engine *engine, - struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); - - err = meson_cipher(breq); -+ local_bh_disable(); - crypto_finalize_skcipher_request(engine, breq, err); -+ local_bh_enable(); - - return 0; - } -diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c -index 6e7ae896717cd..937187027ad57 100644 ---- a/drivers/crypto/amlogic/amlogic-gxl-core.c -+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c -@@ -237,7 +237,6 @@ static int meson_crypto_probe(struct platform_device *pdev) - return err; - } - -- mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL); - for (i = 0; i < MAXFLOW; i++) { - mc->irqs[i] = platform_get_irq(pdev, i); - if (mc->irqs[i] < 0) -diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h -index dc0f142324a3c..8c0746a1d6d43 100644 ---- a/drivers/crypto/amlogic/amlogic-gxl.h -+++ b/drivers/crypto/amlogic/amlogic-gxl.h -@@ -95,7 +95,7 @@ struct meson_dev { - struct device *dev; - struct meson_flow *chanlist; - atomic_t flow; -- int *irqs; -+ int irqs[MAXFLOW]; - #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG - struct dentry *dbgfs_dir; - #endif -diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c -index 9391ccc03382d..fe05584031914 100644 ---- a/drivers/crypto/atmel-aes.c -+++ b/drivers/crypto/atmel-aes.c -@@ -960,6 +960,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, - ctx = crypto_tfm_ctx(areq->tfm); - - dd->areq = areq; -+ dd->ctx = ctx; - start_async = (areq != new_areq); - dd->is_async = start_async; - -@@ -1274,7 +1275,6 @@ static int atmel_aes_init_tfm(struct crypto_skcipher *tfm) - - crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); - ctx->base.dd = dd; -- ctx->base.dd->ctx = &ctx->base; - ctx->base.start = atmel_aes_start; - - return 0; -@@ -1291,7 +1291,6 @@ static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm) - - crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); - ctx->base.dd = dd; -- ctx->base.dd->ctx = &ctx->base; - ctx->base.start = atmel_aes_ctr_start; - - return 0; -@@ -1783,7 +1782,6 @@ static int atmel_aes_gcm_init(struct crypto_aead *tfm) - - crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); - ctx->base.dd = dd; -- ctx->base.dd->ctx = &ctx->base; - ctx->base.start = atmel_aes_gcm_start; - - return 0; -@@ -1927,7 +1925,6 @@ static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm) - crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) + - crypto_skcipher_reqsize(ctx->fallback_tfm)); - ctx->base.dd = dd; -- ctx->base.dd->ctx = &ctx->base; - ctx->base.start = atmel_aes_xts_start; - - return 0; -@@ -2154,7 +2151,6 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm, - crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) + - auth_reqsize)); - ctx->base.dd = dd; -- ctx->base.dd->ctx = &ctx->base; - ctx->base.start = atmel_aes_authenc_start; - - return 0; -diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c -index 8697ae53b0633..d3d8bb0a69900 100644 ---- a/drivers/crypto/caam/caamalg.c -+++ b/drivers/crypto/caam/caamalg.c -@@ -1533,6 +1533,9 @@ static int aead_do_one_req(struct crypto_engine *engine, void *areq) - - ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req); - -+ if (ret == -ENOSPC && engine->retry_support) -+ return ret; -+ - if (ret != -EINPROGRESS) { - aead_unmap(ctx->jrdev, rctx->edesc, req); - kfree(rctx->edesc); -@@ -1762,6 +1765,9 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq) - - ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req); - -+ if (ret == -ENOSPC && engine->retry_support) -+ return ret; -+ - if (ret != -EINPROGRESS) { - skcipher_unmap(ctx->jrdev, rctx->edesc, req); - kfree(rctx->edesc); -diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c -index 8b8ed77d8715d..6753f0e6e55d1 100644 ---- a/drivers/crypto/caam/caamalg_qi2.c -+++ b/drivers/crypto/caam/caamalg_qi2.c -@@ -5470,7 +5470,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) - dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); - dpaa2_fd_set_flc(&fd, req->flc_dma); - -- ppriv = this_cpu_ptr(priv->ppriv); -+ ppriv = raw_cpu_ptr(priv->ppriv); - for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { - err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, - &fd); -diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c -index e8a6d8bc43b5d..36ef738e4a181 100644 ---- a/drivers/crypto/caam/caamhash.c -+++ b/drivers/crypto/caam/caamhash.c -@@ -765,6 +765,9 @@ static int ahash_do_one_req(struct crypto_engine *engine, void *areq) - - ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req); - -+ if (ret == -ENOSPC && engine->retry_support) -+ return ret; -+ - if (ret != -EINPROGRESS) { - ahash_unmap(jrdev, state->edesc, req, 0); - kfree(state->edesc); -diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c -index e313233ec6de7..51b48b57266a6 100644 ---- a/drivers/crypto/caam/caampkc.c -+++ b/drivers/crypto/caam/caampkc.c -@@ -223,7 +223,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, - if (len && *buff) - break; - -- sg_miter_next(&miter); -+ if (!sg_miter_next(&miter)) -+ break; -+ - buff = miter.addr; - len = miter.length; - -@@ -380,6 +382,9 @@ static int akcipher_do_one_req(struct crypto_engine *engine, void *areq) - - ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req); - -+ if (ret == -ENOSPC && engine->retry_support) -+ return ret; -+ - if (ret != -EINPROGRESS) { - rsa_pub_unmap(jrdev, req_ctx->edesc, req); - rsa_io_unmap(jrdev, req_ctx->edesc, req); -@@ -1153,16 +1158,27 @@ static struct caam_akcipher_alg caam_rsa = { - int caam_pkc_init(struct device *ctrldev) - { - struct caam_drv_private *priv = dev_get_drvdata(ctrldev); -- u32 pk_inst; -+ u32 pk_inst, pkha; - int err; - init_done = false; - - /* Determine public key hardware accelerator presence. */ -- if (priv->era < 10) -+ if (priv->era < 10) { - pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & - CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; -- else -- pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; -+ } else { -+ pkha = rd_reg32(&priv->ctrl->vreg.pkha); -+ pk_inst = pkha & CHA_VER_NUM_MASK; -+ -+ /* -+ * Newer CAAMs support partially disabled functionality. If this is the -+ * case, the number is non-zero, but this bit is set to indicate that -+ * no encryption or decryption is supported. Only signing and verifying -+ * is supported. -+ */ -+ if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT) -+ pk_inst = 0; -+ } - - /* Do not register algorithms if PKHA is not present. */ - if (!pk_inst) -diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c -index ca0361b2dbb07..f9a1ec3c84851 100644 ---- a/drivers/crypto/caam/ctrl.c -+++ b/drivers/crypto/caam/ctrl.c -@@ -284,6 +284,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, - const u32 rdsta_if = RDSTA_IF0 << sh_idx; - const u32 rdsta_pr = RDSTA_PR0 << sh_idx; - const u32 rdsta_mask = rdsta_if | rdsta_pr; -+ -+ /* Clear the contents before using the descriptor */ -+ memset(desc, 0x00, CAAM_CMD_SZ * 7); -+ - /* - * If the corresponding bit is set, this state handle - * was initialized by somebody else, so it's left alone. -@@ -327,8 +331,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, - } - - dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); -- /* Clear the contents before recreating the descriptor */ -- memset(desc, 0x00, CAAM_CMD_SZ * 7); - } - - kfree(desc); -@@ -609,6 +611,13 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major, - } - #endif - -+static bool needs_entropy_delay_adjustment(void) -+{ -+ if (of_machine_is_compatible("fsl,imx6sx")) -+ return true; -+ return false; -+} -+ - /* Probe routine for CAAM top (controller) level */ - static int caam_probe(struct platform_device *pdev) - { -@@ -855,6 +864,8 @@ static int caam_probe(struct platform_device *pdev) - * Also, if a handle was instantiated, do not change - * the TRNG parameters. - */ -+ if (needs_entropy_delay_adjustment()) -+ ent_delay = 12000; - if (!(ctrlpriv->rng4_sh_init || inst_handles)) { - dev_info(dev, - "Entropy delay = %u\n", -@@ -871,6 +882,15 @@ static int caam_probe(struct platform_device *pdev) - */ - ret = instantiate_rng(dev, inst_handles, - gen_sk); -+ /* -+ * Entropy delay is determined via TRNG characterization. -+ * TRNG characterization is run across different voltages -+ * and temperatures. -+ * If worst case value for ent_dly is identified, -+ * the loop can be skipped for that platform. -+ */ -+ if (needs_entropy_delay_adjustment()) -+ break; - if (ret == -EAGAIN) - /* - * if here, the loop will rerun, -diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h -index af61f3a2c0d46..3738625c02509 100644 ---- a/drivers/crypto/caam/regs.h -+++ b/drivers/crypto/caam/regs.h -@@ -322,6 +322,9 @@ struct version_regs { - /* CHA Miscellaneous Information - AESA_MISC specific */ - #define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT) - -+/* CHA Miscellaneous Information - PKHA_MISC specific */ -+#define CHA_VER_MISC_PKHA_NO_CRYPT BIT(7 + CHA_VER_MISC_SHIFT) -+ - /* - * caam_perfmon - Performance Monitor/Secure Memory Status/ - * CAAM Global Status/Component Version IDs -diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c -index 8c32d0eb8fcf2..6872ac3440010 100644 ---- a/drivers/crypto/cavium/cpt/cptpf_main.c -+++ b/drivers/crypto/cavium/cpt/cptpf_main.c -@@ -253,6 +253,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) - const struct firmware *fw_entry; - struct device *dev = &cpt->pdev->dev; - struct ucode_header *ucode; -+ unsigned int code_length; - struct microcode *mcode; - int j, ret = 0; - -@@ -263,11 +264,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) - ucode = (struct ucode_header *)fw_entry->data; - mcode = &cpt->mcode[cpt->next_mc_idx]; - memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ); -- mcode->code_size = ntohl(ucode->code_length) * 2; -- if (!mcode->code_size) { -+ code_length = ntohl(ucode->code_length); -+ if (code_length == 0 || code_length >= INT_MAX / 2) { - ret = -EINVAL; - goto fw_release; - } -+ mcode->code_size = code_length * 2; - - mcode->is_ae = is_ae; - mcode->core_mask = 0ULL; -diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c -index 2e9c0d2143632..199fcec9b8d0b 100644 ---- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c -+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c -@@ -191,6 +191,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev) - ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); - if (!ndev->iov.pf2vf_wq) { - kfree(ndev->iov.vfdev); -+ ndev->iov.vfdev = NULL; - return -ENOMEM; - } - /* enable pf2vf mailbox interrupts */ -diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c -index d718db224be42..b386a7063818b 100644 ---- a/drivers/crypto/ccp/ccp-dmaengine.c -+++ b/drivers/crypto/ccp/ccp-dmaengine.c -@@ -632,6 +632,36 @@ static int ccp_terminate_all(struct dma_chan *dma_chan) - return 0; - } - -+static void ccp_dma_release(struct ccp_device *ccp) -+{ -+ struct ccp_dma_chan *chan; -+ struct dma_chan *dma_chan; -+ unsigned int i; -+ -+ for (i = 0; i < ccp->cmd_q_count; i++) { -+ chan = ccp->ccp_dma_chan + i; -+ dma_chan = &chan->dma_chan; -+ -+ tasklet_kill(&chan->cleanup_tasklet); -+ list_del_rcu(&dma_chan->device_node); -+ } -+} -+ -+static void ccp_dma_release_channels(struct ccp_device *ccp) -+{ -+ struct ccp_dma_chan *chan; -+ struct dma_chan *dma_chan; -+ unsigned int i; -+ -+ for (i = 0; i < ccp->cmd_q_count; i++) { -+ chan = ccp->ccp_dma_chan + i; -+ dma_chan = &chan->dma_chan; -+ -+ if (dma_chan->client_count) -+ dma_release_channel(dma_chan); -+ } -+} -+ - int ccp_dmaengine_register(struct ccp_device *ccp) - { - struct ccp_dma_chan *chan; -@@ -736,6 +766,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) - return 0; - - err_reg: -+ ccp_dma_release(ccp); - kmem_cache_destroy(ccp->dma_desc_cache); - - err_cache: -@@ -751,7 +782,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) - if (!dmaengine) - return; - -+ ccp_dma_release_channels(ccp); - dma_async_device_unregister(dma_dev); -+ ccp_dma_release(ccp); - - kmem_cache_destroy(ccp->dma_desc_cache); - kmem_cache_destroy(ccp->dma_cmd_cache); -diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c -index ae7b445999144..4bf9eaab4456f 100644 ---- a/drivers/crypto/ccp/psp-dev.c -+++ b/drivers/crypto/ccp/psp-dev.c -@@ -42,6 +42,9 @@ static irqreturn_t psp_irq_handler(int irq, void *data) - /* Read the interrupt status: */ - status = ioread32(psp->io_regs + psp->vdata->intsts_reg); - -+ /* Clear the interrupt status by writing the same value we read. */ -+ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); -+ - /* invoke subdevice interrupt handlers */ - if (status) { - if (psp->sev_irq_handler) -@@ -51,9 +54,6 @@ static irqreturn_t psp_irq_handler(int irq, void *data) - psp->tee_irq_handler(irq, psp->tee_irq_data, status); - } - -- /* Clear the interrupt status by writing the same value we read. */ -- iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); -- - return IRQ_HANDLED; - } - -diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c -index 2ecb0e1f65d8d..70174a9118b19 100644 ---- a/drivers/crypto/ccp/sev-dev.c -+++ b/drivers/crypto/ccp/sev-dev.c -@@ -24,6 +24,7 @@ - #include - - #include -+#include - - #include "psp-dev.h" - #include "sev-dev.h" -@@ -141,6 +142,17 @@ static int sev_cmd_buffer_len(int cmd) - return 0; - } - -+static void *sev_fw_alloc(unsigned long len) -+{ -+ struct page *page; -+ -+ page = alloc_pages(GFP_KERNEL, get_order(len)); -+ if (!page) -+ return NULL; -+ -+ return page_address(page); -+} -+ - static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) - { - struct psp_device *psp = psp_master; -@@ -241,7 +253,7 @@ static int __sev_platform_init_locked(int *error) - struct psp_device *psp = psp_master; - struct sev_data_init data; - struct sev_device *sev; -- int rc = 0; -+ int psp_ret = -1, rc = 0; - - if (!psp || !psp->sev_data) - return -ENODEV; -@@ -266,7 +278,21 @@ static int __sev_platform_init_locked(int *error) - data.tmr_len = SEV_ES_TMR_SIZE; - } - -- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &data, error); -+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &data, &psp_ret); -+ if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { -+ /* -+ * Initialization command returned an integrity check failure -+ * status code, meaning that firmware load and validation of SEV -+ * related persistent data has failed. Retrying the -+ * initialization function should succeed by replacing the state -+ * with a reset state. -+ */ -+ dev_dbg(sev->dev, "SEV: retrying INIT command"); -+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &data, &psp_ret); -+ } -+ if (error) -+ *error = psp_ret; -+ - if (rc) - return rc; - -@@ -300,7 +326,7 @@ static int __sev_platform_shutdown_locked(int *error) - struct sev_device *sev = psp_master->sev_data; - int ret; - -- if (sev->state == SEV_STATE_UNINIT) -+ if (!sev || sev->state == SEV_STATE_UNINIT) - return 0; - - ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); -@@ -374,6 +400,8 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) - struct sev_user_data_status data; - int ret; - -+ memset(&data, 0, sizeof(data)); -+ - ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); - if (ret) - return ret; -@@ -427,7 +455,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) - if (input.length > SEV_FW_BLOB_MAX_SIZE) - return -EFAULT; - -- blob = kmalloc(input.length, GFP_KERNEL); -+ blob = kzalloc(input.length, GFP_KERNEL); - if (!blob) - return -ENOMEM; - -@@ -651,7 +679,14 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) - input_address = (void __user *)input.address; - - if (input.address && input.length) { -- id_blob = kmalloc(input.length, GFP_KERNEL); -+ /* -+ * The length of the ID shouldn't be assumed by software since -+ * it may change in the future. The allocation size is limited -+ * to 1 << (PAGE_SHIFT + MAX_ORDER - 1) by the page allocator. -+ * If the allocation fails, simply return ENOMEM rather than -+ * warning in the kernel log. -+ */ -+ id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN); - if (!id_blob) - return -ENOMEM; - -@@ -770,14 +805,14 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) - if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) - return -EFAULT; - -- pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL); -+ pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL); - if (!pdh_blob) - return -ENOMEM; - - data.pdh_cert_address = __psp_pa(pdh_blob); - data.pdh_cert_len = input.pdh_cert_len; - -- cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL); -+ cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL); - if (!cert_blob) { - ret = -ENOMEM; - goto e_free_pdh; -@@ -1064,7 +1099,6 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); - void sev_pci_init(void) - { - struct sev_device *sev = psp_master->sev_data; -- struct page *tmr_page; - int error, rc; - - if (!sev) -@@ -1080,29 +1114,16 @@ void sev_pci_init(void) - sev_get_api_version(); - - /* Obtain the TMR memory area for SEV-ES use */ -- tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE)); -- if (tmr_page) { -- sev_es_tmr = page_address(tmr_page); -- } else { -- sev_es_tmr = NULL; -+ sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE); -+ if (sev_es_tmr) -+ /* Must flush the cache before giving it to the firmware */ -+ clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE); -+ else - dev_warn(sev->dev, - "SEV: TMR allocation failed, SEV-ES support unavailable\n"); -- } - - /* Initialize the platform */ - rc = sev_platform_init(&error); -- if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) { -- /* -- * INIT command returned an integrity check failure -- * status code, meaning that firmware load and -- * validation of SEV related persistent data has -- * failed and persistent state has been erased. -- * Retrying INIT command here should succeed. -- */ -- dev_dbg(sev->dev, "SEV: retrying INIT command"); -- rc = sev_platform_init(&error); -- } -- - if (rc) { - dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error); - return; -diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c -index 88c672ad27e44..9470a9a19f29d 100644 ---- a/drivers/crypto/ccp/sp-pci.c -+++ b/drivers/crypto/ccp/sp-pci.c -@@ -320,6 +320,15 @@ static const struct psp_vdata pspv3 = { - .inten_reg = 0x10690, - .intsts_reg = 0x10694, - }; -+ -+static const struct psp_vdata pspv4 = { -+ .sev = &sevv2, -+ .tee = &teev1, -+ .feature_reg = 0x109fc, -+ .inten_reg = 0x10690, -+ .intsts_reg = 0x10694, -+}; -+ - #endif - - static const struct sp_dev_vdata dev_vdata[] = { -@@ -365,7 +374,7 @@ static const struct sp_dev_vdata dev_vdata[] = { - { /* 5 */ - .bar = 2, - #ifdef CONFIG_CRYPTO_DEV_SP_PSP -- .psp_vdata = &pspv2, -+ .psp_vdata = &pspv4, - #endif - }, - }; -diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c -index a5e041d9d2cf1..6140e49273226 100644 ---- a/drivers/crypto/ccree/cc_buffer_mgr.c -+++ b/drivers/crypto/ccree/cc_buffer_mgr.c -@@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, - { - int ret = 0; - -+ if (!nbytes) { -+ *mapped_nents = 0; -+ *lbytes = 0; -+ *nents = 0; -+ return 0; -+ } -+ - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); - if (*nents > max_sg_nents) { - *nents = 0; -@@ -349,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx, - req_ctx->mlli_params.mlli_dma_addr); - } - -- dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); -- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); -- - if (src != dst) { -- dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); -+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); -+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); - dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); -+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); -+ } else { -+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); -+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); - } - } - -@@ -370,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, - u32 dummy = 0; - int rc = 0; - u32 mapped_nents = 0; -+ int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); - - req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; - mlli_params->curr_pool = NULL; -@@ -392,7 +402,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, - } - - /* Map the src SGL */ -- rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, -+ rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, - LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); - if (rc) - goto cipher_exit; -@@ -409,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, - } - } else { - /* Map the dst sg */ -- rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, -+ rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE, - &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, - &dummy, &mapped_nents); - if (rc) -@@ -449,6 +459,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) - struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - unsigned int hw_iv_size = areq_ctx->hw_iv_size; - struct cc_drvdata *drvdata = dev_get_drvdata(dev); -+ int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); - - if (areq_ctx->mac_buf_dma_addr) { - dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, -@@ -507,13 +518,11 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) - sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, - areq_ctx->assoclen, req->cryptlen); - -- dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, -- DMA_BIDIRECTIONAL); -+ dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); - if (req->src != req->dst) { - dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", - sg_virt(req->dst)); -- dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, -- DMA_BIDIRECTIONAL); -+ dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); - } - if (drvdata->coherent && - areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && -@@ -836,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, - else - size_for_map -= authsize; - -- rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, -+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, - &areq_ctx->dst.mapped_nents, - LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, - &dst_mapped_nents); -@@ -1049,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) - size_to_map += authsize; - } - -- rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, -+ rc = cc_map_sg(dev, req->src, size_to_map, -+ (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), - &areq_ctx->src.mapped_nents, - (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + - LLI_MAX_NUM_OF_DATA_ENTRIES), -diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c -index 78833491f534d..309da6334a0a0 100644 ---- a/drivers/crypto/ccree/cc_cipher.c -+++ b/drivers/crypto/ccree/cc_cipher.c -@@ -257,8 +257,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) - &ctx_p->user.key_dma_addr); - - /* Free key buffer in context */ -- kfree_sensitive(ctx_p->user.key); - dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key); -+ kfree_sensitive(ctx_p->user.key); - } - - struct tdes_keys { -diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c -index 7083767602fcf..8f008f024f8f1 100644 ---- a/drivers/crypto/ccree/cc_debugfs.c -+++ b/drivers/crypto/ccree/cc_debugfs.c -@@ -55,7 +55,7 @@ void __init cc_debugfs_global_init(void) - cc_debugfs_dir = debugfs_create_dir("ccree", NULL); - } - --void __exit cc_debugfs_global_fini(void) -+void cc_debugfs_global_fini(void) - { - debugfs_remove(cc_debugfs_dir); - } -diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c -index e599ac6dc162a..41f0a404bdf9e 100644 ---- a/drivers/crypto/ccree/cc_driver.c -+++ b/drivers/crypto/ccree/cc_driver.c -@@ -103,7 +103,8 @@ MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match); - static void init_cc_cache_params(struct cc_drvdata *drvdata) - { - struct device *dev = drvdata_to_dev(drvdata); -- u32 cache_params, ace_const, val, mask; -+ u32 cache_params, ace_const, val; -+ u64 mask; - - /* compute CC_AXIM_CACHE_PARAMS */ - cache_params = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS)); -@@ -655,9 +656,17 @@ static struct platform_driver ccree_driver = { - - static int __init ccree_init(void) - { -+ int rc; -+ - cc_debugfs_global_init(); - -- return platform_driver_register(&ccree_driver); -+ rc = platform_driver_register(&ccree_driver); -+ if (rc) { -+ cc_debugfs_global_fini(); -+ return rc; -+ } -+ -+ return 0; - } - module_init(ccree_init); - -diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c -index c1c2b1d866639..f2be0a7d7f7ac 100644 ---- a/drivers/crypto/gemini/sl3516-ce-cipher.c -+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c -@@ -264,7 +264,9 @@ static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *a - struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); - - err = sl3516_ce_cipher(breq); -+ local_bh_disable(); - crypto_finalize_skcipher_request(engine, breq, err); -+ local_bh_enable(); - - return 0; - } -diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c -index a032c192ef1d6..4062251fd1b68 100644 ---- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c -+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c -@@ -252,7 +252,7 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, - if (unlikely(shift < 0)) - return -EINVAL; - -- ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL); -+ ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC); - if (unlikely(!ptr)) - return -ENOMEM; - -@@ -1865,7 +1865,7 @@ static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req, - */ - if (memcmp(ptr, p, ctx->key_sz) == 0) { - dev_err(dev, "gx is p!\n"); -- return -EINVAL; -+ goto err; - } else if (memcmp(ptr, p, ctx->key_sz) > 0) { - hpre_curve25519_src_modulo_p(ptr); - } -diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c -index 65a641396c07f..edc61e4105f30 100644 ---- a/drivers/crypto/hisilicon/hpre/hpre_main.c -+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c -@@ -1143,18 +1143,12 @@ err_with_qm_init: - static void hpre_remove(struct pci_dev *pdev) - { - struct hisi_qm *qm = pci_get_drvdata(pdev); -- int ret; - - hisi_qm_pm_uninit(qm); - hisi_qm_wait_task_finish(qm, &hpre_devices); - hisi_qm_alg_unregister(qm, &hpre_devices); -- if (qm->fun_type == QM_HW_PF && qm->vfs_num) { -- ret = hisi_qm_sriov_disable(pdev, true); -- if (ret) { -- pci_err(pdev, "Disable SRIOV fail!\n"); -- return; -- } -- } -+ if (qm->fun_type == QM_HW_PF && qm->vfs_num) -+ hisi_qm_sriov_disable(pdev, true); - - hpre_debugfs_exit(qm); - hisi_qm_stop(qm, QM_NORMAL); -diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c -index 369562d34d66a..fd89918abd191 100644 ---- a/drivers/crypto/hisilicon/qm.c -+++ b/drivers/crypto/hisilicon/qm.c -@@ -1888,8 +1888,10 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, - return ret; - - /* Judge if the instance is being reset. */ -- if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) -- return 0; -+ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) { -+ ret = 0; -+ goto put_dfx_access; -+ } - - if (count > QM_DBG_WRITE_LEN) { - ret = -ENOSPC; -@@ -4107,7 +4109,7 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) - static int qm_vf_read_qos(struct hisi_qm *qm) - { - int cnt = 0; -- int ret; -+ int ret = -EINVAL; - - /* reset mailbox qos val */ - qm->mb_qos = 0; -@@ -5725,8 +5727,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) - GFP_ATOMIC); - dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); - if (!qm->qdma.va) { -- ret = -ENOMEM; -- goto err_alloc_qdma; -+ ret = -ENOMEM; -+ goto err_destroy_idr; - } - - QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); -@@ -5742,7 +5744,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) - - err_alloc_qp_array: - dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); --err_alloc_qdma: -+err_destroy_idr: -+ idr_destroy(&qm->qp_idr); - kfree(qm->factor); - - return ret; -@@ -5986,7 +5989,7 @@ int hisi_qm_resume(struct device *dev) - if (ret) - pci_err(pdev, "failed to start qm(%d)\n", ret); - -- return 0; -+ return ret; - } - EXPORT_SYMBOL_GPL(hisi_qm_resume); - -diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h -index 3068093229a50..bbb35de994eb7 100644 ---- a/drivers/crypto/hisilicon/qm.h -+++ b/drivers/crypto/hisilicon/qm.h -@@ -318,14 +318,14 @@ struct hisi_qp { - static inline int q_num_set(const char *val, const struct kernel_param *kp, - unsigned int device) - { -- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, -- device, NULL); -+ struct pci_dev *pdev; - u32 n, q_num; - int ret; - - if (!val) - return -EINVAL; - -+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); - if (!pdev) { - q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); - pr_info("No device found currently, suppose queue number is %u\n", -@@ -335,6 +335,8 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp, - q_num = QM_QNUM_V1; - else - q_num = QM_QNUM_V2; -+ -+ pci_dev_put(pdev); - } - - ret = kstrtou32(val, 10, &n); -diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c -index 0a3c8f019b025..490e1542305e1 100644 ---- a/drivers/crypto/hisilicon/sec/sec_algs.c -+++ b/drivers/crypto/hisilicon/sec/sec_algs.c -@@ -449,7 +449,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, - */ - } - -- mutex_lock(&ctx->queue->queuelock); -+ spin_lock_bh(&ctx->queue->queuelock); - /* Put the IV in place for chained cases */ - switch (ctx->cipher_alg) { - case SEC_C_AES_CBC_128: -@@ -509,7 +509,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, - list_del(&backlog_req->backlog_head); - } - } -- mutex_unlock(&ctx->queue->queuelock); -+ spin_unlock_bh(&ctx->queue->queuelock); - - mutex_lock(&sec_req->lock); - list_del(&sec_req_el->head); -@@ -798,7 +798,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, - */ - - /* Grab a big lock for a long time to avoid concurrency issues */ -- mutex_lock(&queue->queuelock); -+ spin_lock_bh(&queue->queuelock); - - /* - * Can go on to queue if we have space in either: -@@ -814,15 +814,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, - ret = -EBUSY; - if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { - list_add_tail(&sec_req->backlog_head, &ctx->backlog); -- mutex_unlock(&queue->queuelock); -+ spin_unlock_bh(&queue->queuelock); - goto out; - } - -- mutex_unlock(&queue->queuelock); -+ spin_unlock_bh(&queue->queuelock); - goto err_free_elements; - } - ret = sec_send_request(sec_req, queue); -- mutex_unlock(&queue->queuelock); -+ spin_unlock_bh(&queue->queuelock); - if (ret) - goto err_free_elements; - -@@ -881,7 +881,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm) - if (IS_ERR(ctx->queue)) - return PTR_ERR(ctx->queue); - -- mutex_init(&ctx->queue->queuelock); -+ spin_lock_init(&ctx->queue->queuelock); - ctx->queue->havesoftqueue = false; - - return 0; -diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h -index 179a8250d691c..e2a50bf2234b9 100644 ---- a/drivers/crypto/hisilicon/sec/sec_drv.h -+++ b/drivers/crypto/hisilicon/sec/sec_drv.h -@@ -347,7 +347,7 @@ struct sec_queue { - DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN); - DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *)); - bool havesoftqueue; -- struct mutex queuelock; -+ spinlock_t queuelock; - void *shadow[SEC_QUEUE_LEN]; - }; - -diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h -index d97cf02b1df75..cff00fd297652 100644 ---- a/drivers/crypto/hisilicon/sec2/sec.h -+++ b/drivers/crypto/hisilicon/sec2/sec.h -@@ -119,7 +119,7 @@ struct sec_qp_ctx { - struct idr req_idr; - struct sec_alg_res res[QM_Q_DEPTH]; - struct sec_ctx *ctx; -- struct mutex req_lock; -+ spinlock_t req_lock; - struct list_head backlog; - struct hisi_acc_sgl_pool *c_in_pool; - struct hisi_acc_sgl_pool *c_out_pool; -diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c -index 6a45bd23b3635..0d26eda36a526 100644 ---- a/drivers/crypto/hisilicon/sec2/sec_crypto.c -+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c -@@ -124,11 +124,11 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) - { - int req_id; - -- mutex_lock(&qp_ctx->req_lock); -+ spin_lock_bh(&qp_ctx->req_lock); - - req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, - 0, QM_Q_DEPTH, GFP_ATOMIC); -- mutex_unlock(&qp_ctx->req_lock); -+ spin_unlock_bh(&qp_ctx->req_lock); - if (unlikely(req_id < 0)) { - dev_err(req->ctx->dev, "alloc req id fail!\n"); - return req_id; -@@ -153,9 +153,9 @@ static void sec_free_req_id(struct sec_req *req) - qp_ctx->req_list[req_id] = NULL; - req->qp_ctx = NULL; - -- mutex_lock(&qp_ctx->req_lock); -+ spin_lock_bh(&qp_ctx->req_lock); - idr_remove(&qp_ctx->req_idr, req_id); -- mutex_unlock(&qp_ctx->req_lock); -+ spin_unlock_bh(&qp_ctx->req_lock); - } - - static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) -@@ -270,7 +270,7 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) - !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; - -- mutex_lock(&qp_ctx->req_lock); -+ spin_lock_bh(&qp_ctx->req_lock); - ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); - - if (ctx->fake_req_limit <= -@@ -278,10 +278,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) - list_add_tail(&req->backlog_head, &qp_ctx->backlog); - atomic64_inc(&ctx->sec->debug.dfx.send_cnt); - atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); -- mutex_unlock(&qp_ctx->req_lock); -+ spin_unlock_bh(&qp_ctx->req_lock); - return -EBUSY; - } -- mutex_unlock(&qp_ctx->req_lock); -+ spin_unlock_bh(&qp_ctx->req_lock); - - if (unlikely(ret == -EBUSY)) - return -ENOBUFS; -@@ -484,7 +484,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, - - qp->req_cb = sec_req_cb; - -- mutex_init(&qp_ctx->req_lock); -+ spin_lock_init(&qp_ctx->req_lock); - idr_init(&qp_ctx->req_idr); - INIT_LIST_HEAD(&qp_ctx->backlog); - -@@ -617,7 +617,7 @@ static int sec_auth_init(struct sec_ctx *ctx) - { - struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - -- a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, -+ a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, - &a_ctx->a_key_dma, GFP_KERNEL); - if (!a_ctx->a_key) - return -ENOMEM; -@@ -629,8 +629,8 @@ static void sec_auth_uninit(struct sec_ctx *ctx) - { - struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - -- memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); -- dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, -+ memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); -+ dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, - a_ctx->a_key, a_ctx->a_key_dma); - } - -@@ -1373,7 +1373,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, - { - struct sec_req *backlog_req = NULL; - -- mutex_lock(&qp_ctx->req_lock); -+ spin_lock_bh(&qp_ctx->req_lock); - if (ctx->fake_req_limit >= - atomic_read(&qp_ctx->qp->qp_status.used) && - !list_empty(&qp_ctx->backlog)) { -@@ -1381,7 +1381,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, - typeof(*backlog_req), backlog_head); - list_del(&backlog_req->backlog_head); - } -- mutex_unlock(&qp_ctx->req_lock); -+ spin_unlock_bh(&qp_ctx->req_lock); - - return backlog_req; - } -@@ -2284,9 +2284,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, - struct aead_request *aead_req, - bool encrypt) - { -- struct aead_request *subreq = aead_request_ctx(aead_req); - struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - struct device *dev = ctx->dev; -+ struct aead_request *subreq; -+ int ret; - - /* Kunpeng920 aead mode not support input 0 size */ - if (!a_ctx->fallback_aead_tfm) { -@@ -2294,6 +2295,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, - return -EINVAL; - } - -+ subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); -+ if (!subreq) -+ return -ENOMEM; -+ - aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); - aead_request_set_callback(subreq, aead_req->base.flags, - aead_req->base.complete, aead_req->base.data); -@@ -2301,8 +2306,13 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, - aead_req->cryptlen, aead_req->iv); - aead_request_set_ad(subreq, aead_req->assoclen); - -- return encrypt ? crypto_aead_encrypt(subreq) : -- crypto_aead_decrypt(subreq); -+ if (encrypt) -+ ret = crypto_aead_encrypt(subreq); -+ else -+ ret = crypto_aead_decrypt(subreq); -+ aead_request_free(subreq); -+ -+ return ret; - } - - static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) -diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h -index 9f71c358a6d35..ee2edaf5058df 100644 ---- a/drivers/crypto/hisilicon/sec2/sec_crypto.h -+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h -@@ -7,6 +7,7 @@ - #define SEC_AIV_SIZE 12 - #define SEC_IV_SIZE 24 - #define SEC_MAX_KEY_SIZE 64 -+#define SEC_MAX_AKEY_SIZE 128 - #define SEC_COMM_SCENE 0 - #define SEC_MIN_BLOCK_SZ 1 - -diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c -index 90551bf38b523..03d239cfdf8c6 100644 ---- a/drivers/crypto/hisilicon/sec2/sec_main.c -+++ b/drivers/crypto/hisilicon/sec2/sec_main.c -@@ -443,9 +443,11 @@ static int sec_engine_init(struct hisi_qm *qm) - - writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); - -- /* Enable sm4 extra mode, as ctr/ecb */ -- writel_relaxed(SEC_BD_ERR_CHK_EN0, -- qm->io_base + SEC_BD_ERR_CHK_EN_REG0); -+ /* HW V2 enable sm4 extra mode, as ctr/ecb */ -+ if (qm->ver < QM_HW_V3) -+ writel_relaxed(SEC_BD_ERR_CHK_EN0, -+ qm->io_base + SEC_BD_ERR_CHK_EN_REG0); -+ - /* Enable sm4 xts mode multiple iv */ - writel_relaxed(SEC_BD_ERR_CHK_EN1, - qm->io_base + SEC_BD_ERR_CHK_EN_REG1); -diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c -index 057273769f264..3dbe5405d17bc 100644 ---- a/drivers/crypto/hisilicon/sgl.c -+++ b/drivers/crypto/hisilicon/sgl.c -@@ -122,9 +122,8 @@ err_free_mem: - for (j = 0; j < i; j++) { - dma_free_coherent(dev, block_size, block[j].sgl, - block[j].sgl_dma); -- memset(block + j, 0, sizeof(*block)); - } -- kfree(pool); -+ kfree_sensitive(pool); - return ERR_PTR(-ENOMEM); - } - EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); -diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c -index 9520a4113c81e..a91e6e0e9c693 100644 ---- a/drivers/crypto/hisilicon/zip/zip_crypto.c -+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c -@@ -122,12 +122,12 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) - if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX) - return -EINVAL; - -- return param_set_int(val, kp); -+ return param_set_ushort(val, kp); - } - - static const struct kernel_param_ops sgl_sge_nr_ops = { - .set = sgl_sge_nr_set, -- .get = param_get_int, -+ .get = param_get_ushort, - }; - - static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; -diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c -index aa4c7b2af3e2e..34b41cbcfa8de 100644 ---- a/drivers/crypto/img-hash.c -+++ b/drivers/crypto/img-hash.c -@@ -358,12 +358,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) - static void img_hash_dma_task(unsigned long d) - { - struct img_hash_dev *hdev = (struct img_hash_dev *)d; -- struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); -+ struct img_hash_request_ctx *ctx; - u8 *addr; - size_t nbytes, bleft, wsend, len, tbc; - struct scatterlist tsg; - -- if (!hdev->req || !ctx->sg) -+ if (!hdev->req) -+ return; -+ -+ ctx = ahash_request_ctx(hdev->req); -+ if (!ctx->sg) - return; - - addr = sg_virt(ctx->sg); -diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c -index 9ff885d50edfc..7fa6c9144e495 100644 ---- a/drivers/crypto/inside-secure/safexcel.c -+++ b/drivers/crypto/inside-secure/safexcel.c -@@ -1631,19 +1631,23 @@ static int safexcel_probe_generic(void *pdev, - &priv->ring[i].rdr); - if (ret) { - dev_err(dev, "Failed to initialize rings\n"); -- return ret; -+ goto err_cleanup_rings; - } - - priv->ring[i].rdr_req = devm_kcalloc(dev, - EIP197_DEFAULT_RING_SIZE, - sizeof(*priv->ring[i].rdr_req), - GFP_KERNEL); -- if (!priv->ring[i].rdr_req) -- return -ENOMEM; -+ if (!priv->ring[i].rdr_req) { -+ ret = -ENOMEM; -+ goto err_cleanup_rings; -+ } - - ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); -- if (!ring_irq) -- return -ENOMEM; -+ if (!ring_irq) { -+ ret = -ENOMEM; -+ goto err_cleanup_rings; -+ } - - ring_irq->priv = priv; - ring_irq->ring = i; -@@ -1657,7 +1661,8 @@ static int safexcel_probe_generic(void *pdev, - ring_irq); - if (irq < 0) { - dev_err(dev, "Failed to get IRQ ID for ring %d\n", i); -- return irq; -+ ret = irq; -+ goto err_cleanup_rings; - } - - priv->ring[i].irq = irq; -@@ -1669,8 +1674,10 @@ static int safexcel_probe_generic(void *pdev, - snprintf(wq_name, 9, "wq_ring%d", i); - priv->ring[i].workqueue = - create_singlethread_workqueue(wq_name); -- if (!priv->ring[i].workqueue) -- return -ENOMEM; -+ if (!priv->ring[i].workqueue) { -+ ret = -ENOMEM; -+ goto err_cleanup_rings; -+ } - - priv->ring[i].requests = 0; - priv->ring[i].busy = false; -@@ -1687,16 +1694,26 @@ static int safexcel_probe_generic(void *pdev, - ret = safexcel_hw_init(priv); - if (ret) { - dev_err(dev, "HW init failed (%d)\n", ret); -- return ret; -+ goto err_cleanup_rings; - } - - ret = safexcel_register_algorithms(priv); - if (ret) { - dev_err(dev, "Failed to register algorithms (%d)\n", ret); -- return ret; -+ goto err_cleanup_rings; - } - - return 0; -+ -+err_cleanup_rings: -+ for (i = 0; i < priv->config.rings; i++) { -+ if (priv->ring[i].irq) -+ irq_set_affinity_hint(priv->ring[i].irq, NULL); -+ if (priv->ring[i].workqueue) -+ destroy_workqueue(priv->ring[i].workqueue); -+ } -+ -+ return ret; - } - - static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) -@@ -1831,6 +1848,8 @@ static const struct of_device_id safexcel_of_match_table[] = { - {}, - }; - -+MODULE_DEVICE_TABLE(of, safexcel_of_match_table); -+ - static struct platform_driver crypto_safexcel = { - .probe = safexcel_probe, - .remove = safexcel_remove, -diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c -index bc60b58022564..2124416742f84 100644 ---- a/drivers/crypto/inside-secure/safexcel_hash.c -+++ b/drivers/crypto/inside-secure/safexcel_hash.c -@@ -383,7 +383,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, - u32 x; - - x = ipad[i] ^ ipad[i + 4]; -- cache[i] ^= swab(x); -+ cache[i] ^= swab32(x); - } - } - cache_len = AES_BLOCK_SIZE; -@@ -821,7 +821,7 @@ static int safexcel_ahash_final(struct ahash_request *areq) - u32 *result = (void *)areq->result; - - /* K3 */ -- result[i] = swab(ctx->base.ipad.word[i + 4]); -+ result[i] = swab32(ctx->base.ipad.word[i + 4]); - } - areq->result[0] ^= 0x80; // 10- padding - crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result); -@@ -2106,7 +2106,7 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, - crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE, - "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3"); - for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++) -- ctx->base.ipad.word[i] = swab(key_tmp[i]); -+ ctx->base.ipad.word[i] = swab32(key_tmp[i]); - - crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & -@@ -2189,7 +2189,7 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, - return ret; - - for (i = 0; i < len / sizeof(u32); i++) -- ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]); -+ ctx->base.ipad.word[i + 8] = swab32(aes.key_enc[i]); - - /* precompute the CMAC key material */ - crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); -diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c -index b739d3b873dcf..0f37dfd42d850 100644 ---- a/drivers/crypto/marvell/cesa/cipher.c -+++ b/drivers/crypto/marvell/cesa/cipher.c -@@ -297,7 +297,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, - static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, - const u8 *key, unsigned int len) - { -- struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); -+ struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher); - int err; - - err = verify_skcipher_des3_key(cipher, key); -@@ -624,7 +624,6 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { - .decrypt = mv_cesa_ecb_des3_ede_decrypt, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, -- .ivsize = DES3_EDE_BLOCK_SIZE, - .base = { - .cra_name = "ecb(des3_ede)", - .cra_driver_name = "mv-ecb-des3-ede", -diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c -index 40b482198ebc5..a765eefb18c2f 100644 ---- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c -+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c -@@ -286,6 +286,7 @@ static int process_tar_file(struct device *dev, - struct tar_ucode_info_t *tar_info; - struct otx_cpt_ucode_hdr *ucode_hdr; - int ucode_type, ucode_size; -+ unsigned int code_length; - - /* - * If size is less than microcode header size then don't report -@@ -303,7 +304,13 @@ static int process_tar_file(struct device *dev, - if (get_ucode_type(ucode_hdr, &ucode_type)) - return 0; - -- ucode_size = ntohl(ucode_hdr->code_length) * 2; -+ code_length = ntohl(ucode_hdr->code_length); -+ if (code_length >= INT_MAX / 2) { -+ dev_err(dev, "Invalid code_length %u\n", code_length); -+ return -EINVAL; -+ } -+ -+ ucode_size = code_length * 2; - if (!ucode_size || (size < round_up(ucode_size, 16) + - sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { - dev_err(dev, "Ucode %s invalid size\n", filename); -@@ -886,6 +893,7 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, - { - struct otx_cpt_ucode_hdr *ucode_hdr; - const struct firmware *fw; -+ unsigned int code_length; - int ret; - - set_ucode_filename(ucode, ucode_filename); -@@ -896,7 +904,13 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, - ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data; - memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); - ucode->ver_num = ucode_hdr->ver_num; -- ucode->size = ntohl(ucode_hdr->code_length) * 2; -+ code_length = ntohl(ucode_hdr->code_length); -+ if (code_length >= INT_MAX / 2) { -+ dev_err(dev, "Ucode invalid code_length %u\n", code_length); -+ ret = -EINVAL; -+ goto release_fw; -+ } -+ ucode->size = code_length * 2; - if (!ucode->size || (fw->size < round_up(ucode->size, 16) - + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { - dev_err(dev, "Ucode %s invalid size\n", ucode_filename); -diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c -index 146a55ac4b9b0..be1ad55a208f6 100644 ---- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c -+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c -@@ -494,12 +494,11 @@ static ssize_t kvf_limits_store(struct device *dev, - { - struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev); - int lfs_num; -+ int ret; - -- if (kstrtoint(buf, 0, &lfs_num)) { -- dev_err(dev, "lfs count %d must be in range [1 - %d]\n", -- lfs_num, num_online_cpus()); -- return -EINVAL; -- } -+ ret = kstrtoint(buf, 0, &lfs_num); -+ if (ret) -+ return ret; - if (lfs_num < 1 || lfs_num > num_online_cpus()) { - dev_err(dev, "lfs count %d must be in range [1 - %d]\n", - lfs_num, num_online_cpus()); -diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c -index dff34b3ec09e1..7c1b92aaab398 100644 ---- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c -+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c -@@ -29,7 +29,8 @@ static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev, - bool found = false; - int i; - -- if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) { -+ if (eng_grp->g->engs_num < 0 || -+ eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) { - dev_err(dev, "unsupported number of engines %d on octeontx2\n", - eng_grp->g->engs_num); - return bmap; -diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c -index a72723455df72..570074e23b60e 100644 ---- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c -+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c -@@ -1274,6 +1274,7 @@ static int aead_do_fallback(struct aead_request *req, bool is_enc) - req->base.complete, req->base.data); - aead_request_set_crypt(&rctx->fbk_req, req->src, - req->dst, req->cryptlen, req->iv); -+ aead_request_set_ad(&rctx->fbk_req, req->assoclen); - ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) : - crypto_aead_decrypt(&rctx->fbk_req); - } else { -@@ -1633,16 +1634,13 @@ static inline int cpt_register_algs(void) - { - int i, err = 0; - -- if (!IS_ENABLED(CONFIG_DM_CRYPT)) { -- for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++) -- otx2_cpt_skciphers[i].base.cra_flags &= -- ~CRYPTO_ALG_DEAD; -+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++) -+ otx2_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; - -- err = crypto_register_skciphers(otx2_cpt_skciphers, -- ARRAY_SIZE(otx2_cpt_skciphers)); -- if (err) -- return err; -- } -+ err = crypto_register_skciphers(otx2_cpt_skciphers, -+ ARRAY_SIZE(otx2_cpt_skciphers)); -+ if (err) -+ return err; - - for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++) - otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; -diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c -index d19e5ffb5104b..d6f9e2fe863d7 100644 ---- a/drivers/crypto/mxs-dcp.c -+++ b/drivers/crypto/mxs-dcp.c -@@ -331,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) - memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); - } - -- for_each_sg(req->src, src, sg_nents(src), i) { -+ for_each_sg(req->src, src, sg_nents(req->src), i) { - src_buf = sg_virt(src); - len = sg_dma_len(src); - tlen += len; -diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c -index 3b0bf6fea491a..b4db560105a9e 100644 ---- a/drivers/crypto/n2_core.c -+++ b/drivers/crypto/n2_core.c -@@ -1229,6 +1229,7 @@ struct n2_hash_tmpl { - const u8 *hash_init; - u8 hw_op_hashsz; - u8 digest_size; -+ u8 statesize; - u8 block_size; - u8 auth_type; - u8 hmac_type; -@@ -1260,6 +1261,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { - .hmac_type = AUTH_TYPE_HMAC_MD5, - .hw_op_hashsz = MD5_DIGEST_SIZE, - .digest_size = MD5_DIGEST_SIZE, -+ .statesize = sizeof(struct md5_state), - .block_size = MD5_HMAC_BLOCK_SIZE }, - { .name = "sha1", - .hash_zero = sha1_zero_message_hash, -@@ -1268,6 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { - .hmac_type = AUTH_TYPE_HMAC_SHA1, - .hw_op_hashsz = SHA1_DIGEST_SIZE, - .digest_size = SHA1_DIGEST_SIZE, -+ .statesize = sizeof(struct sha1_state), - .block_size = SHA1_BLOCK_SIZE }, - { .name = "sha256", - .hash_zero = sha256_zero_message_hash, -@@ -1276,6 +1279,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { - .hmac_type = AUTH_TYPE_HMAC_SHA256, - .hw_op_hashsz = SHA256_DIGEST_SIZE, - .digest_size = SHA256_DIGEST_SIZE, -+ .statesize = sizeof(struct sha256_state), - .block_size = SHA256_BLOCK_SIZE }, - { .name = "sha224", - .hash_zero = sha224_zero_message_hash, -@@ -1284,6 +1288,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { - .hmac_type = AUTH_TYPE_RESERVED, - .hw_op_hashsz = SHA256_DIGEST_SIZE, - .digest_size = SHA224_DIGEST_SIZE, -+ .statesize = sizeof(struct sha256_state), - .block_size = SHA224_BLOCK_SIZE }, - }; - #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) -@@ -1424,6 +1429,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) - - halg = &ahash->halg; - halg->digestsize = tmpl->digest_size; -+ halg->statesize = tmpl->statesize; - - base = &halg->base; - snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); -diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile -index d00181a26dd65..483cef62acee8 100644 ---- a/drivers/crypto/nx/Makefile -+++ b/drivers/crypto/nx/Makefile -@@ -1,7 +1,6 @@ - # SPDX-License-Identifier: GPL-2.0 - obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o - nx-crypto-objs := nx.o \ -- nx_debugfs.o \ - nx-aes-cbc.o \ - nx-aes-ecb.o \ - nx-aes-gcm.o \ -@@ -11,6 +10,7 @@ nx-crypto-objs := nx.o \ - nx-sha256.o \ - nx-sha512.o - -+nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o - obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o - obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o - nx-compress-objs := nx-842.o -diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c -index 32a036ada5d0a..f418817c0f43e 100644 ---- a/drivers/crypto/nx/nx-common-powernv.c -+++ b/drivers/crypto/nx/nx-common-powernv.c -@@ -827,7 +827,7 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, - goto err_out; - - vas_init_rx_win_attr(&rxattr, coproc->ct); -- rxattr.rx_fifo = (void *)rx_fifo; -+ rxattr.rx_fifo = rx_fifo; - rxattr.rx_fifo_size = fifo_size; - rxattr.lnotify_lpid = lpid; - rxattr.lnotify_pid = pid; -diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h -index c6233173c612e..2697baebb6a35 100644 ---- a/drivers/crypto/nx/nx.h -+++ b/drivers/crypto/nx/nx.h -@@ -170,8 +170,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, - void nx_debugfs_init(struct nx_crypto_driver *); - void nx_debugfs_fini(struct nx_crypto_driver *); - #else --#define NX_DEBUGFS_INIT(drv) (0) --#define NX_DEBUGFS_FINI(drv) (0) -+#define NX_DEBUGFS_INIT(drv) do {} while (0) -+#define NX_DEBUGFS_FINI(drv) do {} while (0) - #endif - - #define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL) -diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c -index 9b968ac4ee7b6..a196bb8b17010 100644 ---- a/drivers/crypto/omap-aes.c -+++ b/drivers/crypto/omap-aes.c -@@ -1302,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev) - - static int omap_aes_resume(struct device *dev) - { -- pm_runtime_resume_and_get(dev); -+ pm_runtime_get_sync(dev); - return 0; - } - #endif -diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c -index f6bf53c00b614..4ec6949a7ca9e 100644 ---- a/drivers/crypto/omap-sham.c -+++ b/drivers/crypto/omap-sham.c -@@ -2114,7 +2114,7 @@ static int omap_sham_probe(struct platform_device *pdev) - - pm_runtime_enable(dev); - -- err = pm_runtime_get_sync(dev); -+ err = pm_runtime_resume_and_get(dev); - if (err < 0) { - dev_err(dev, "failed to get sync: %d\n", err); - goto err_pm; -diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c -index 33d8e50dcbdac..88c0ded411f15 100644 ---- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c -+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c -@@ -1,5 +1,6 @@ - // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) - /* Copyright(c) 2020 Intel Corporation */ -+#include - #include - #include - #include -@@ -161,6 +162,35 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev) - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); - } - -+static int adf_init_device(struct adf_accel_dev *accel_dev) -+{ -+ void __iomem *addr; -+ u32 status; -+ u32 csr; -+ int ret; -+ -+ addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; -+ -+ /* Temporarily mask PM interrupt */ -+ csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2); -+ csr |= ADF_4XXX_PM_SOU; -+ ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr); -+ -+ /* Set DRV_ACTIVE bit to power up the device */ -+ ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE); -+ -+ /* Poll status register to make sure the device is powered up */ -+ ret = read_poll_timeout(ADF_CSR_RD, status, -+ status & ADF_4XXX_PM_INIT_STATE, -+ ADF_4XXX_PM_POLL_DELAY_US, -+ ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr, -+ ADF_4XXX_PM_STATUS); -+ if (ret) -+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); -+ -+ return ret; -+} -+ - static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev) - { - return 0; -@@ -215,6 +245,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) - hw_data->exit_arb = adf_exit_arb; - hw_data->get_arb_mapping = adf_get_arbiter_mapping; - hw_data->enable_ints = adf_enable_ints; -+ hw_data->init_device = adf_init_device; - hw_data->reset_device = adf_reset_flr; - hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; - hw_data->uof_get_num_objs = uof_get_num_objs; -diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h -index 4fe2a776293c2..924bac6feb372 100644 ---- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h -+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h -@@ -62,6 +62,16 @@ - #define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578) - #define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970) - -+/* Power management */ -+#define ADF_4XXX_PM_POLL_DELAY_US 20 -+#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC -+#define ADF_4XXX_PM_STATUS (0x50A00C) -+#define ADF_4XXX_PM_INTERRUPT (0x50A028) -+#define ADF_4XXX_PM_DRV_ACTIVE BIT(20) -+#define ADF_4XXX_PM_INIT_STATE BIT(21) -+/* Power management source in ERRSOU2 and ERRMSK2 */ -+#define ADF_4XXX_PM_SOU BIT(18) -+ - /* Firmware Binaries */ - #define ADF_4XXX_FW "qat_4xxx.bin" - #define ADF_4XXX_MMP "qat_4xxx_mmp.bin" -diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile -index 9c57abdf56b78..fc477f0162135 100644 ---- a/drivers/crypto/qat/qat_common/Makefile -+++ b/drivers/crypto/qat/qat_common/Makefile -@@ -15,6 +15,7 @@ intel_qat-objs := adf_cfg.o \ - qat_crypto.o \ - qat_algs.o \ - qat_asym_algs.o \ -+ qat_algs_send.o \ - qat_uclo.o \ - qat_hal.o - -diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h -index 38c0af6d4e43e..580566cfcb04c 100644 ---- a/drivers/crypto/qat/qat_common/adf_accel_devices.h -+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h -@@ -166,6 +166,7 @@ struct adf_hw_device_data { - int (*init_arb)(struct adf_accel_dev *accel_dev); - void (*exit_arb)(struct adf_accel_dev *accel_dev); - const u32 *(*get_arb_mapping)(void); -+ int (*init_device)(struct adf_accel_dev *accel_dev); - void (*disable_iov)(struct adf_accel_dev *accel_dev); - void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, - bool enable); -diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h -index 4261749fae8d4..75693ca4afea1 100644 ---- a/drivers/crypto/qat/qat_common/adf_common_drv.h -+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h -@@ -49,11 +49,6 @@ struct service_hndl { - struct list_head list; - }; - --static inline int get_current_node(void) --{ -- return topology_physical_package_id(raw_smp_processor_id()); --} -- - int adf_service_register(struct service_hndl *service); - int adf_service_unregister(struct service_hndl *service); - -diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c -index 9e560c7d41630..0ba62b286a85e 100644 ---- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c -+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c -@@ -161,21 +161,33 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) - u32 legfuses; - u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | - ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | -- ICP_ACCEL_CAPABILITIES_AUTHENTICATION; -+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION | -+ ICP_ACCEL_CAPABILITIES_CIPHER | -+ ICP_ACCEL_CAPABILITIES_COMPRESSION; - - /* Read accelerator capabilities mask */ - pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses); - -- if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) -+ /* A set bit in legfuses means the feature is OFF in this SKU */ -+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) { - capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; -+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; -+ } - if (legfuses & ICP_ACCEL_MASK_PKE_SLICE) - capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; -- if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) -+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) { - capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; -+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; -+ } -+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) -+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; - - if ((straps | fuses) & ADF_POWERGATE_PKE) - capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; - -+ if ((straps | fuses) & ADF_POWERGATE_DC) -+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; -+ - return capabilities; - } - EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap); -diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h -index 756b0ddfac5e1..2aaf02ccbb3af 100644 ---- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h -+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h -@@ -111,6 +111,7 @@ do { \ - (ADF_ARB_REG_SLOT * (index)), value) - - /* Power gating */ -+#define ADF_POWERGATE_DC BIT(23) - #define ADF_POWERGATE_PKE BIT(24) - - /* WDT timers -diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h -index b8fca1ff7aab0..0b7086cae00bd 100644 ---- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h -+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h -@@ -99,7 +99,7 @@ do { \ - * Timeout is in cycles. Clock speed may vary across products but this - * value should be a few milli-seconds. - */ --#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000 -+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL - #define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000 - #define ADF_SSMWDTL_OFFSET 0x54 - #define ADF_SSMWDTH_OFFSET 0x5C -diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c -index 60bc7b991d351..e3749e5817d94 100644 ---- a/drivers/crypto/qat/qat_common/adf_init.c -+++ b/drivers/crypto/qat/qat_common/adf_init.c -@@ -79,6 +79,11 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) - return -EFAULT; - } - -+ if (hw_data->init_device && hw_data->init_device(accel_dev)) { -+ dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n"); -+ return -EFAULT; -+ } -+ - if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { - dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n"); - return -EFAULT; -diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c -index 976b9ab7617cd..7ec81989beb03 100644 ---- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c -+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c -@@ -117,37 +117,19 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) - - mutex_lock(lock); - -- /* Check if PF2VF CSR is in use by remote function */ -+ /* Check if the PFVF CSR is in use by remote function */ - val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); - if ((val & remote_in_use_mask) == remote_in_use_pattern) { - dev_dbg(&GET_DEV(accel_dev), -- "PF2VF CSR in use by remote function\n"); -+ "PFVF CSR in use by remote function\n"); - ret = -EBUSY; - goto out; - } - -- /* Attempt to get ownership of PF2VF CSR */ - msg &= ~local_in_use_mask; - msg |= local_in_use_pattern; -- ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg); - -- /* Wait in case remote func also attempting to get ownership */ -- msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY); -- -- val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); -- if ((val & local_in_use_mask) != local_in_use_pattern) { -- dev_dbg(&GET_DEV(accel_dev), -- "PF2VF CSR in use by remote - collision detected\n"); -- ret = -EBUSY; -- goto out; -- } -- -- /* -- * This function now owns the PV2VF CSR. The IN_USE_BY pattern must -- * remain in the PF2VF CSR for all writes including ACK from remote -- * until this local function relinquishes the CSR. Send the message -- * by interrupting the remote. -- */ -+ /* Attempt to get ownership of the PFVF CSR */ - ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit); - - /* Wait for confirmation from remote func it received the message */ -@@ -162,7 +144,14 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) - ret = -EIO; - } - -- /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */ -+ if (val != msg) { -+ dev_dbg(&GET_DEV(accel_dev), -+ "Collision - PFVF CSR overwritten by remote function\n"); -+ ret = -EIO; -+ goto out; -+ } -+ -+ /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */ - ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask); - out: - mutex_unlock(lock); -@@ -170,12 +159,13 @@ out: - } - - /** -- * adf_iov_putmsg() - send PF2VF message -+ * adf_iov_putmsg() - send PFVF message - * @accel_dev: Pointer to acceleration device. - * @msg: Message to send -- * @vf_nr: VF number to which the message will be sent -+ * @vf_nr: VF number to which the message will be sent if on PF, ignored -+ * otherwise - * -- * Function sends a message from the PF to a VF -+ * Function sends a message through the PFVF channel - * - * Return: 0 on success, error code otherwise. - */ -@@ -204,6 +194,11 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info) - - /* Read message from the VF */ - msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr)); -+ if (!(msg & ADF_VF2PF_INT)) { -+ dev_info(&GET_DEV(accel_dev), -+ "Spurious VF2PF interrupt, msg %X. Ignored\n", msg); -+ goto out; -+ } - - /* To ACK, clear the VF2PFINT bit */ - msg &= ~ADF_VF2PF_INT; -@@ -287,6 +282,7 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info) - if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr)) - dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n"); - -+out: - /* re-enable interrupt on PF from this VF */ - adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr)); - -diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c -index 8ba28409fb74b..630d0483c4e0a 100644 ---- a/drivers/crypto/qat/qat_common/adf_transport.c -+++ b/drivers/crypto/qat/qat_common/adf_transport.c -@@ -8,6 +8,9 @@ - #include "adf_cfg.h" - #include "adf_common_drv.h" - -+#define ADF_MAX_RING_THRESHOLD 80 -+#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100) -+ - static inline u32 adf_modulo(u32 data, u32 shift) - { - u32 div = data >> shift; -@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) - bank->irq_mask); - } - -+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring) -+{ -+ return atomic_read(ring->inflights) > ring->threshold; -+} -+ - int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) - { - struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); -@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, - struct adf_etr_bank_data *bank; - struct adf_etr_ring_data *ring; - char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; -+ int max_inflights; - u32 ring_num; - int ret; - -@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, - ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); - ring->head = 0; - ring->tail = 0; -+ max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size); -+ ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD); - atomic_set(ring->inflights, 0); - ret = adf_init_ring(ring); - if (ret) -diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h -index 2c95f1697c76f..e6ef6f9b76913 100644 ---- a/drivers/crypto/qat/qat_common/adf_transport.h -+++ b/drivers/crypto/qat/qat_common/adf_transport.h -@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, - const char *ring_name, adf_callback_fn callback, - int poll_mode, struct adf_etr_ring_data **ring_ptr); - -+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring); - int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg); - void adf_remove_ring(struct adf_etr_ring_data *ring); - #endif -diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h -index 501bcf0f1809a..8b2c92ba7ca1f 100644 ---- a/drivers/crypto/qat/qat_common/adf_transport_internal.h -+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h -@@ -22,6 +22,7 @@ struct adf_etr_ring_data { - spinlock_t lock; /* protects ring data struct */ - u16 head; - u16 tail; -+ u32 threshold; - u8 ring_number; - u8 ring_size; - u8 msg_size; -diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c -index 7828a6573f3e2..2e300c255ab94 100644 ---- a/drivers/crypto/qat/qat_common/adf_vf_isr.c -+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c -@@ -101,6 +101,11 @@ static void adf_pf2vf_bh_handler(void *data) - - /* Read the message from PF */ - msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0)); -+ if (!(msg & ADF_PF2VF_INT)) { -+ dev_info(&GET_DEV(accel_dev), -+ "Spurious PF2VF interrupt, msg %X. Ignored\n", msg); -+ goto out; -+ } - - if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) - /* Ignore legacy non-system (non-kernel) PF2VF messages */ -@@ -149,6 +154,7 @@ static void adf_pf2vf_bh_handler(void *data) - msg &= ~ADF_PF2VF_INT; - ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg); - -+out: - /* Re-enable PF2VF interrupts */ - adf_enable_pf2vf_interrupts(accel_dev); - return; -diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c -index f998ed58457c2..f56ee4cc5ae8b 100644 ---- a/drivers/crypto/qat/qat_common/qat_algs.c -+++ b/drivers/crypto/qat/qat_common/qat_algs.c -@@ -17,7 +17,7 @@ - #include - #include - #include "adf_accel_devices.h" --#include "adf_transport.h" -+#include "qat_algs_send.h" - #include "adf_common_drv.h" - #include "qat_crypto.h" - #include "icp_qat_hw.h" -@@ -46,19 +46,6 @@ - static DEFINE_MUTEX(algs_lock); - static unsigned int active_devs; - --struct qat_alg_buf { -- u32 len; -- u32 resrvd; -- u64 addr; --} __packed; -- --struct qat_alg_buf_list { -- u64 resrvd; -- u32 num_bufs; -- u32 num_mapped_bufs; -- struct qat_alg_buf bufers[]; --} __packed __aligned(64); -- - /* Common content descriptor */ - struct qat_alg_cd { - union { -@@ -447,8 +434,8 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, - } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) { - ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE); -- keylen = round_up(keylen, 16); - memcpy(cd->ucs_aes.key, key, keylen); -+ keylen = round_up(keylen, 16); - } else { - memcpy(cd->aes.key, key, keylen); - } -@@ -618,7 +605,7 @@ static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key, - { - struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct qat_crypto_instance *inst = NULL; -- int node = get_current_node(); -+ int node = numa_node_id(); - struct device *dev; - int ret; - -@@ -686,14 +673,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, - dma_addr_t blpout = qat_req->buf.bloutp; - size_t sz = qat_req->buf.sz; - size_t sz_out = qat_req->buf.sz_out; -+ int bl_dma_dir; - int i; - -+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; -+ - for (i = 0; i < bl->num_bufs; i++) - dma_unmap_single(dev, bl->bufers[i].addr, -- bl->bufers[i].len, DMA_BIDIRECTIONAL); -+ bl->bufers[i].len, bl_dma_dir); - - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); -- kfree(bl); -+ -+ if (!qat_req->buf.sgl_src_valid) -+ kfree(bl); -+ - if (blp != blpout) { - /* If out of place operation dma unmap only data */ - int bufless = blout->num_bufs - blout->num_mapped_bufs; -@@ -701,17 +694,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, - for (i = bufless; i < blout->num_bufs; i++) { - dma_unmap_single(dev, blout->bufers[i].addr, - blout->bufers[i].len, -- DMA_BIDIRECTIONAL); -+ DMA_FROM_DEVICE); - } - dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); -- kfree(blout); -+ -+ if (!qat_req->buf.sgl_dst_valid) -+ kfree(blout); - } - } - - static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - struct scatterlist *sgl, - struct scatterlist *sglout, -- struct qat_crypto_request *qat_req) -+ struct qat_crypto_request *qat_req, -+ gfp_t flags) - { - struct device *dev = &GET_DEV(inst->accel_dev); - int i, sg_nctr = 0; -@@ -721,15 +717,27 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - dma_addr_t blp = DMA_MAPPING_ERROR; - dma_addr_t bloutp = DMA_MAPPING_ERROR; - struct scatterlist *sg; -- size_t sz_out, sz = struct_size(bufl, bufers, n + 1); -+ size_t sz_out, sz = struct_size(bufl, bufers, n); -+ int node = dev_to_node(&GET_DEV(inst->accel_dev)); -+ int bufl_dma_dir; - - if (unlikely(!n)) - return -EINVAL; - -- bufl = kzalloc_node(sz, GFP_ATOMIC, -- dev_to_node(&GET_DEV(inst->accel_dev))); -- if (unlikely(!bufl)) -- return -ENOMEM; -+ qat_req->buf.sgl_src_valid = false; -+ qat_req->buf.sgl_dst_valid = false; -+ -+ if (n > QAT_MAX_BUFF_DESC) { -+ bufl = kzalloc_node(sz, flags, node); -+ if (unlikely(!bufl)) -+ return -ENOMEM; -+ } else { -+ bufl = &qat_req->buf.sgl_src.sgl_hdr; -+ memset(bufl, 0, sizeof(struct qat_alg_buf_list)); -+ qat_req->buf.sgl_src_valid = true; -+ } -+ -+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; - - for_each_sg(sgl, sg, n, i) - bufl->bufers[i].addr = DMA_MAPPING_ERROR; -@@ -742,7 +750,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - - bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, -- DMA_BIDIRECTIONAL); -+ bufl_dma_dir); - bufl->bufers[y].len = sg->length; - if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) - goto err_in; -@@ -760,12 +768,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - struct qat_alg_buf *bufers; - - n = sg_nents(sglout); -- sz_out = struct_size(buflout, bufers, n + 1); -+ sz_out = struct_size(buflout, bufers, n); - sg_nctr = 0; -- buflout = kzalloc_node(sz_out, GFP_ATOMIC, -- dev_to_node(&GET_DEV(inst->accel_dev))); -- if (unlikely(!buflout)) -- goto err_in; -+ -+ if (n > QAT_MAX_BUFF_DESC) { -+ buflout = kzalloc_node(sz_out, flags, node); -+ if (unlikely(!buflout)) -+ goto err_in; -+ } else { -+ buflout = &qat_req->buf.sgl_dst.sgl_hdr; -+ memset(buflout, 0, sizeof(struct qat_alg_buf_list)); -+ qat_req->buf.sgl_dst_valid = true; -+ } - - bufers = buflout->bufers; - for_each_sg(sglout, sg, n, i) -@@ -779,7 +793,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - - bufers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, -- DMA_BIDIRECTIONAL); -+ DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, bufers[y].addr))) - goto err_out; - bufers[y].len = sg->length; -@@ -809,8 +823,10 @@ err_out: - if (!dma_mapping_error(dev, buflout->bufers[i].addr)) - dma_unmap_single(dev, buflout->bufers[i].addr, - buflout->bufers[i].len, -- DMA_BIDIRECTIONAL); -- kfree(buflout); -+ DMA_FROM_DEVICE); -+ -+ if (!qat_req->buf.sgl_dst_valid) -+ kfree(buflout); - - err_in: - if (!dma_mapping_error(dev, blp)) -@@ -821,9 +837,10 @@ err_in: - if (!dma_mapping_error(dev, bufl->bufers[i].addr)) - dma_unmap_single(dev, bufl->bufers[i].addr, - bufl->bufers[i].len, -- DMA_BIDIRECTIONAL); -+ bufl_dma_dir); - -- kfree(bufl); -+ if (!qat_req->buf.sgl_src_valid) -+ kfree(bufl); - - dev_err(dev, "Failed to map buf for dma\n"); - return -ENOMEM; -@@ -925,8 +942,25 @@ void qat_alg_callback(void *resp) - struct icp_qat_fw_la_resp *qat_resp = resp; - struct qat_crypto_request *qat_req = - (void *)(__force long)qat_resp->opaque_data; -+ struct qat_instance_backlog *backlog = qat_req->alg_req.backlog; - - qat_req->cb(qat_resp, qat_req); -+ -+ qat_alg_send_backlog(backlog); -+} -+ -+static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req, -+ struct qat_crypto_instance *inst, -+ struct crypto_async_request *base) -+{ -+ struct qat_alg_req *alg_req = &qat_req->alg_req; -+ -+ alg_req->fw_req = (u32 *)&qat_req->req; -+ alg_req->tx_ring = inst->sym_tx; -+ alg_req->base = base; -+ alg_req->backlog = &inst->backlog; -+ -+ return qat_alg_send_message(alg_req); - } - - static int qat_alg_aead_dec(struct aead_request *areq) -@@ -939,14 +973,15 @@ static int qat_alg_aead_dec(struct aead_request *areq) - struct icp_qat_fw_la_auth_req_params *auth_param; - struct icp_qat_fw_la_bulk_req *msg; - int digst_size = crypto_aead_authsize(aead_tfm); -- int ret, ctr = 0; -+ gfp_t f = qat_algs_alloc_flags(&areq->base); -+ int ret; - u32 cipher_len; - - cipher_len = areq->cryptlen - digst_size; - if (cipher_len % AES_BLOCK_SIZE != 0) - return -EINVAL; - -- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); -+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); - if (unlikely(ret)) - return ret; - -@@ -965,15 +1000,12 @@ static int qat_alg_aead_dec(struct aead_request *areq) - auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); - auth_param->auth_off = 0; - auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; -- do { -- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); -- } while (ret == -EAGAIN && ctr++ < 10); - -- if (ret == -EAGAIN) { -+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); -+ if (ret == -ENOSPC) - qat_alg_free_bufl(ctx->inst, qat_req); -- return -EBUSY; -- } -- return -EINPROGRESS; -+ -+ return ret; - } - - static int qat_alg_aead_enc(struct aead_request *areq) -@@ -984,14 +1016,15 @@ static int qat_alg_aead_enc(struct aead_request *areq) - struct qat_crypto_request *qat_req = aead_request_ctx(areq); - struct icp_qat_fw_la_cipher_req_params *cipher_param; - struct icp_qat_fw_la_auth_req_params *auth_param; -+ gfp_t f = qat_algs_alloc_flags(&areq->base); - struct icp_qat_fw_la_bulk_req *msg; - u8 *iv = areq->iv; -- int ret, ctr = 0; -+ int ret; - - if (areq->cryptlen % AES_BLOCK_SIZE != 0) - return -EINVAL; - -- ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); -+ ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); - if (unlikely(ret)) - return ret; - -@@ -1013,15 +1046,11 @@ static int qat_alg_aead_enc(struct aead_request *areq) - auth_param->auth_off = 0; - auth_param->auth_len = areq->assoclen + areq->cryptlen; - -- do { -- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); -- } while (ret == -EAGAIN && ctr++ < 10); -- -- if (ret == -EAGAIN) { -+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); -+ if (ret == -ENOSPC) - qat_alg_free_bufl(ctx->inst, qat_req); -- return -EBUSY; -- } -- return -EINPROGRESS; -+ -+ return ret; - } - - static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx, -@@ -1042,7 +1071,7 @@ static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx, - { - struct qat_crypto_instance *inst = NULL; - struct device *dev; -- int node = get_current_node(); -+ int node = numa_node_id(); - int ret; - - inst = qat_crypto_get_instance_node(node); -@@ -1173,13 +1202,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) - struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct qat_crypto_request *qat_req = skcipher_request_ctx(req); - struct icp_qat_fw_la_cipher_req_params *cipher_param; -+ gfp_t f = qat_algs_alloc_flags(&req->base); - struct icp_qat_fw_la_bulk_req *msg; -- int ret, ctr = 0; -+ int ret; - - if (req->cryptlen == 0) - return 0; - -- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); -+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); - if (unlikely(ret)) - return ret; - -@@ -1198,15 +1228,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) - - qat_alg_set_req_iv(qat_req); - -- do { -- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); -- } while (ret == -EAGAIN && ctr++ < 10); -- -- if (ret == -EAGAIN) { -+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); -+ if (ret == -ENOSPC) - qat_alg_free_bufl(ctx->inst, qat_req); -- return -EBUSY; -- } -- return -EINPROGRESS; -+ -+ return ret; - } - - static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) -@@ -1242,13 +1268,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) - struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct qat_crypto_request *qat_req = skcipher_request_ctx(req); - struct icp_qat_fw_la_cipher_req_params *cipher_param; -+ gfp_t f = qat_algs_alloc_flags(&req->base); - struct icp_qat_fw_la_bulk_req *msg; -- int ret, ctr = 0; -+ int ret; - - if (req->cryptlen == 0) - return 0; - -- ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); -+ ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); - if (unlikely(ret)) - return ret; - -@@ -1268,15 +1295,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) - qat_alg_set_req_iv(qat_req); - qat_alg_update_iv(qat_req); - -- do { -- ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); -- } while (ret == -EAGAIN && ctr++ < 10); -- -- if (ret == -EAGAIN) { -+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); -+ if (ret == -ENOSPC) - qat_alg_free_bufl(ctx->inst, qat_req); -- return -EBUSY; -- } -- return -EINPROGRESS; -+ -+ return ret; - } - - static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) -diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c -new file mode 100644 -index 0000000000000..ff5b4347f7831 ---- /dev/null -+++ b/drivers/crypto/qat/qat_common/qat_algs_send.c -@@ -0,0 +1,86 @@ -+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) -+/* Copyright(c) 2022 Intel Corporation */ -+#include "adf_transport.h" -+#include "qat_algs_send.h" -+#include "qat_crypto.h" -+ -+#define ADF_MAX_RETRIES 20 -+ -+static int qat_alg_send_message_retry(struct qat_alg_req *req) -+{ -+ int ret = 0, ctr = 0; -+ -+ do { -+ ret = adf_send_message(req->tx_ring, req->fw_req); -+ } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES); -+ -+ if (ret == -EAGAIN) -+ return -ENOSPC; -+ -+ return -EINPROGRESS; -+} -+ -+void qat_alg_send_backlog(struct qat_instance_backlog *backlog) -+{ -+ struct qat_alg_req *req, *tmp; -+ -+ spin_lock_bh(&backlog->lock); -+ list_for_each_entry_safe(req, tmp, &backlog->list, list) { -+ if (adf_send_message(req->tx_ring, req->fw_req)) { -+ /* The HW ring is full. Do nothing. -+ * qat_alg_send_backlog() will be invoked again by -+ * another callback. -+ */ -+ break; -+ } -+ list_del(&req->list); -+ req->base->complete(req->base, -EINPROGRESS); -+ } -+ spin_unlock_bh(&backlog->lock); -+} -+ -+static void qat_alg_backlog_req(struct qat_alg_req *req, -+ struct qat_instance_backlog *backlog) -+{ -+ INIT_LIST_HEAD(&req->list); -+ -+ spin_lock_bh(&backlog->lock); -+ list_add_tail(&req->list, &backlog->list); -+ spin_unlock_bh(&backlog->lock); -+} -+ -+static int qat_alg_send_message_maybacklog(struct qat_alg_req *req) -+{ -+ struct qat_instance_backlog *backlog = req->backlog; -+ struct adf_etr_ring_data *tx_ring = req->tx_ring; -+ u32 *fw_req = req->fw_req; -+ -+ /* If any request is already backlogged, then add to backlog list */ -+ if (!list_empty(&backlog->list)) -+ goto enqueue; -+ -+ /* If ring is nearly full, then add to backlog list */ -+ if (adf_ring_nearly_full(tx_ring)) -+ goto enqueue; -+ -+ /* If adding request to HW ring fails, then add to backlog list */ -+ if (adf_send_message(tx_ring, fw_req)) -+ goto enqueue; -+ -+ return -EINPROGRESS; -+ -+enqueue: -+ qat_alg_backlog_req(req, backlog); -+ -+ return -EBUSY; -+} -+ -+int qat_alg_send_message(struct qat_alg_req *req) -+{ -+ u32 flags = req->base->flags; -+ -+ if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG) -+ return qat_alg_send_message_maybacklog(req); -+ else -+ return qat_alg_send_message_retry(req); -+} -diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h -new file mode 100644 -index 0000000000000..5ce9f4f69d8ff ---- /dev/null -+++ b/drivers/crypto/qat/qat_common/qat_algs_send.h -@@ -0,0 +1,11 @@ -+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ -+/* Copyright(c) 2022 Intel Corporation */ -+#ifndef QAT_ALGS_SEND_H -+#define QAT_ALGS_SEND_H -+ -+#include "qat_crypto.h" -+ -+int qat_alg_send_message(struct qat_alg_req *req); -+void qat_alg_send_backlog(struct qat_instance_backlog *backlog); -+ -+#endif -diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c -index b0b78445418bb..4128200a90329 100644 ---- a/drivers/crypto/qat/qat_common/qat_asym_algs.c -+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c -@@ -12,6 +12,7 @@ - #include - #include "icp_qat_fw_pke.h" - #include "adf_accel_devices.h" -+#include "qat_algs_send.h" - #include "adf_transport.h" - #include "adf_common_drv.h" - #include "qat_crypto.h" -@@ -135,8 +136,23 @@ struct qat_asym_request { - } areq; - int err; - void (*cb)(struct icp_qat_fw_pke_resp *resp); -+ struct qat_alg_req alg_req; - } __aligned(64); - -+static int qat_alg_send_asym_message(struct qat_asym_request *qat_req, -+ struct qat_crypto_instance *inst, -+ struct crypto_async_request *base) -+{ -+ struct qat_alg_req *alg_req = &qat_req->alg_req; -+ -+ alg_req->fw_req = (u32 *)&qat_req->req; -+ alg_req->tx_ring = inst->pke_tx; -+ alg_req->base = base; -+ alg_req->backlog = &inst->backlog; -+ -+ return qat_alg_send_message(alg_req); -+} -+ - static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) - { - struct qat_asym_request *req = (void *)(__force long)resp->opaque; -@@ -148,24 +164,18 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) - err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; - - if (areq->src) { -- if (req->src_align) -- dma_free_coherent(dev, req->ctx.dh->p_size, -- req->src_align, req->in.dh.in.b); -- else -- dma_unmap_single(dev, req->in.dh.in.b, -- req->ctx.dh->p_size, DMA_TO_DEVICE); -+ dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size, -+ DMA_TO_DEVICE); -+ kfree_sensitive(req->src_align); - } - - areq->dst_len = req->ctx.dh->p_size; -+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, -+ DMA_FROM_DEVICE); - if (req->dst_align) { - scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, - areq->dst_len, 1); -- -- dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align, -- req->out.dh.r); -- } else { -- dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, -- DMA_FROM_DEVICE); -+ kfree_sensitive(req->dst_align); - } - - dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params), -@@ -213,8 +223,10 @@ static int qat_dh_compute_value(struct kpp_request *req) - struct qat_asym_request *qat_req = - PTR_ALIGN(kpp_request_ctx(req), 64); - struct icp_qat_fw_pke_request *msg = &qat_req->req; -- int ret, ctr = 0; -+ gfp_t flags = qat_algs_alloc_flags(&req->base); - int n_input_params = 0; -+ u8 *vaddr; -+ int ret; - - if (unlikely(!ctx->xa)) - return -EINVAL; -@@ -223,6 +235,10 @@ static int qat_dh_compute_value(struct kpp_request *req) - req->dst_len = ctx->p_size; - return -EOVERFLOW; - } -+ -+ if (req->src_len > ctx->p_size) -+ return -EINVAL; -+ - memset(msg, '\0', sizeof(*msg)); - ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, - ICP_QAT_FW_COMN_REQ_FLAG_SET); -@@ -271,27 +287,24 @@ static int qat_dh_compute_value(struct kpp_request *req) - */ - if (sg_is_last(req->src) && req->src_len == ctx->p_size) { - qat_req->src_align = NULL; -- qat_req->in.dh.in.b = dma_map_single(dev, -- sg_virt(req->src), -- req->src_len, -- DMA_TO_DEVICE); -- if (unlikely(dma_mapping_error(dev, -- qat_req->in.dh.in.b))) -- return ret; -- -+ vaddr = sg_virt(req->src); - } else { - int shift = ctx->p_size - req->src_len; - -- qat_req->src_align = dma_alloc_coherent(dev, -- ctx->p_size, -- &qat_req->in.dh.in.b, -- GFP_KERNEL); -+ qat_req->src_align = kzalloc(ctx->p_size, flags); - if (unlikely(!qat_req->src_align)) - return ret; - - scatterwalk_map_and_copy(qat_req->src_align + shift, - req->src, 0, req->src_len, 0); -+ -+ vaddr = qat_req->src_align; - } -+ -+ qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size, -+ DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b))) -+ goto unmap_src; - } - /* - * dst can be of any size in valid range, but HW expects it to be the -@@ -302,32 +315,30 @@ static int qat_dh_compute_value(struct kpp_request *req) - */ - if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) { - qat_req->dst_align = NULL; -- qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst), -- req->dst_len, -- DMA_FROM_DEVICE); -- -- if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) -- goto unmap_src; -- -+ vaddr = sg_virt(req->dst); - } else { -- qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, -- &qat_req->out.dh.r, -- GFP_KERNEL); -+ qat_req->dst_align = kzalloc(ctx->p_size, flags); - if (unlikely(!qat_req->dst_align)) - goto unmap_src; -+ -+ vaddr = qat_req->dst_align; - } -+ qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size, -+ DMA_FROM_DEVICE); -+ if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) -+ goto unmap_dst; - - qat_req->in.dh.in_tab[n_input_params] = 0; - qat_req->out.dh.out_tab[1] = 0; - /* Mapping in.in.b or in.in_g2.xa is the same */ -- qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b, -- sizeof(qat_req->in.dh.in.b), -+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh, -+ sizeof(struct qat_dh_input_params), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) - goto unmap_dst; - -- qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r, -- sizeof(qat_req->out.dh.r), -+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh, -+ sizeof(struct qat_dh_output_params), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) - goto unmap_in_params; -@@ -338,13 +349,13 @@ static int qat_dh_compute_value(struct kpp_request *req) - msg->input_param_count = n_input_params; - msg->output_param_count = 1; - -- do { -- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); -- } while (ret == -EBUSY && ctr++ < 100); -+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base); -+ if (ret == -ENOSPC) -+ goto unmap_all; - -- if (!ret) -- return -EINPROGRESS; -+ return ret; - -+unmap_all: - if (!dma_mapping_error(dev, qat_req->phy_out)) - dma_unmap_single(dev, qat_req->phy_out, - sizeof(struct qat_dh_output_params), -@@ -355,23 +366,17 @@ unmap_in_params: - sizeof(struct qat_dh_input_params), - DMA_TO_DEVICE); - unmap_dst: -- if (qat_req->dst_align) -- dma_free_coherent(dev, ctx->p_size, qat_req->dst_align, -- qat_req->out.dh.r); -- else -- if (!dma_mapping_error(dev, qat_req->out.dh.r)) -- dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, -- DMA_FROM_DEVICE); -+ if (!dma_mapping_error(dev, qat_req->out.dh.r)) -+ dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, -+ DMA_FROM_DEVICE); -+ kfree_sensitive(qat_req->dst_align); - unmap_src: - if (req->src) { -- if (qat_req->src_align) -- dma_free_coherent(dev, ctx->p_size, qat_req->src_align, -- qat_req->in.dh.in.b); -- else -- if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) -- dma_unmap_single(dev, qat_req->in.dh.in.b, -- ctx->p_size, -- DMA_TO_DEVICE); -+ if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) -+ dma_unmap_single(dev, qat_req->in.dh.in.b, -+ ctx->p_size, -+ DMA_TO_DEVICE); -+ kfree_sensitive(qat_req->src_align); - } - return ret; - } -@@ -420,14 +425,17 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) - static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx) - { - if (ctx->g) { -+ memset(ctx->g, 0, ctx->p_size); - dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g); - ctx->g = NULL; - } - if (ctx->xa) { -+ memset(ctx->xa, 0, ctx->p_size); - dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa); - ctx->xa = NULL; - } - if (ctx->p) { -+ memset(ctx->p, 0, ctx->p_size); - dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); - ctx->p = NULL; - } -@@ -480,11 +488,13 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm) - { - struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); - struct qat_crypto_instance *inst = -- qat_crypto_get_instance_node(get_current_node()); -+ qat_crypto_get_instance_node(numa_node_id()); - - if (!inst) - return -EINVAL; - -+ kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64); -+ - ctx->p_size = 0; - ctx->g2 = false; - ctx->inst = inst; -@@ -510,23 +520,19 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) - - err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; - -- if (req->src_align) -- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align, -- req->in.rsa.enc.m); -- else -- dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, -- DMA_TO_DEVICE); -+ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, -+ DMA_TO_DEVICE); -+ -+ kfree_sensitive(req->src_align); - - areq->dst_len = req->ctx.rsa->key_sz; -+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, -+ DMA_FROM_DEVICE); - if (req->dst_align) { - scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, - areq->dst_len, 1); - -- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align, -- req->out.rsa.enc.c); -- } else { -- dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, -- DMA_FROM_DEVICE); -+ kfree_sensitive(req->dst_align); - } - - dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), -@@ -542,8 +548,11 @@ void qat_alg_asym_callback(void *_resp) - { - struct icp_qat_fw_pke_resp *resp = _resp; - struct qat_asym_request *areq = (void *)(__force long)resp->opaque; -+ struct qat_instance_backlog *backlog = areq->alg_req.backlog; - - areq->cb(resp); -+ -+ qat_alg_send_backlog(backlog); - } - - #define PKE_RSA_EP_512 0x1c161b21 -@@ -642,7 +651,9 @@ static int qat_rsa_enc(struct akcipher_request *req) - struct qat_asym_request *qat_req = - PTR_ALIGN(akcipher_request_ctx(req), 64); - struct icp_qat_fw_pke_request *msg = &qat_req->req; -- int ret, ctr = 0; -+ gfp_t flags = qat_algs_alloc_flags(&req->base); -+ u8 *vaddr; -+ int ret; - - if (unlikely(!ctx->n || !ctx->e)) - return -EINVAL; -@@ -651,6 +662,10 @@ static int qat_rsa_enc(struct akcipher_request *req) - req->dst_len = ctx->key_sz; - return -EOVERFLOW; - } -+ -+ if (req->src_len > ctx->key_sz) -+ return -EINVAL; -+ - memset(msg, '\0', sizeof(*msg)); - ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, - ICP_QAT_FW_COMN_REQ_FLAG_SET); -@@ -679,50 +694,49 @@ static int qat_rsa_enc(struct akcipher_request *req) - */ - if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { - qat_req->src_align = NULL; -- qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src), -- req->src_len, DMA_TO_DEVICE); -- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) -- return ret; -- -+ vaddr = sg_virt(req->src); - } else { - int shift = ctx->key_sz - req->src_len; - -- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, -- &qat_req->in.rsa.enc.m, -- GFP_KERNEL); -+ qat_req->src_align = kzalloc(ctx->key_sz, flags); - if (unlikely(!qat_req->src_align)) - return ret; - - scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, - 0, req->src_len, 0); -+ vaddr = qat_req->src_align; - } -- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { -- qat_req->dst_align = NULL; -- qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst), -- req->dst_len, -- DMA_FROM_DEVICE); - -- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) -- goto unmap_src; -+ qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz, -+ DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) -+ goto unmap_src; - -+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { -+ qat_req->dst_align = NULL; -+ vaddr = sg_virt(req->dst); - } else { -- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, -- &qat_req->out.rsa.enc.c, -- GFP_KERNEL); -+ qat_req->dst_align = kzalloc(ctx->key_sz, flags); - if (unlikely(!qat_req->dst_align)) - goto unmap_src; -- -+ vaddr = qat_req->dst_align; - } -+ -+ qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz, -+ DMA_FROM_DEVICE); -+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) -+ goto unmap_dst; -+ - qat_req->in.rsa.in_tab[3] = 0; - qat_req->out.rsa.out_tab[1] = 0; -- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, -- sizeof(qat_req->in.rsa.enc.m), -+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa, -+ sizeof(struct qat_rsa_input_params), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) - goto unmap_dst; - -- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c, -- sizeof(qat_req->out.rsa.enc.c), -+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa, -+ sizeof(struct qat_rsa_output_params), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) - goto unmap_in_params; -@@ -732,13 +746,14 @@ static int qat_rsa_enc(struct akcipher_request *req) - msg->pke_mid.opaque = (u64)(__force long)qat_req; - msg->input_param_count = 3; - msg->output_param_count = 1; -- do { -- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); -- } while (ret == -EBUSY && ctr++ < 100); - -- if (!ret) -- return -EINPROGRESS; -+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base); -+ if (ret == -ENOSPC) -+ goto unmap_all; -+ -+ return ret; - -+unmap_all: - if (!dma_mapping_error(dev, qat_req->phy_out)) - dma_unmap_single(dev, qat_req->phy_out, - sizeof(struct qat_rsa_output_params), -@@ -749,21 +764,15 @@ unmap_in_params: - sizeof(struct qat_rsa_input_params), - DMA_TO_DEVICE); - unmap_dst: -- if (qat_req->dst_align) -- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, -- qat_req->out.rsa.enc.c); -- else -- if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) -- dma_unmap_single(dev, qat_req->out.rsa.enc.c, -- ctx->key_sz, DMA_FROM_DEVICE); -+ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) -+ dma_unmap_single(dev, qat_req->out.rsa.enc.c, -+ ctx->key_sz, DMA_FROM_DEVICE); -+ kfree_sensitive(qat_req->dst_align); - unmap_src: -- if (qat_req->src_align) -- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, -- qat_req->in.rsa.enc.m); -- else -- if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) -- dma_unmap_single(dev, qat_req->in.rsa.enc.m, -- ctx->key_sz, DMA_TO_DEVICE); -+ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) -+ dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz, -+ DMA_TO_DEVICE); -+ kfree_sensitive(qat_req->src_align); - return ret; - } - -@@ -776,7 +785,9 @@ static int qat_rsa_dec(struct akcipher_request *req) - struct qat_asym_request *qat_req = - PTR_ALIGN(akcipher_request_ctx(req), 64); - struct icp_qat_fw_pke_request *msg = &qat_req->req; -- int ret, ctr = 0; -+ gfp_t flags = qat_algs_alloc_flags(&req->base); -+ u8 *vaddr; -+ int ret; - - if (unlikely(!ctx->n || !ctx->d)) - return -EINVAL; -@@ -785,6 +796,10 @@ static int qat_rsa_dec(struct akcipher_request *req) - req->dst_len = ctx->key_sz; - return -EOVERFLOW; - } -+ -+ if (req->src_len > ctx->key_sz) -+ return -EINVAL; -+ - memset(msg, '\0', sizeof(*msg)); - ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, - ICP_QAT_FW_COMN_REQ_FLAG_SET); -@@ -823,54 +838,51 @@ static int qat_rsa_dec(struct akcipher_request *req) - */ - if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { - qat_req->src_align = NULL; -- qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src), -- req->dst_len, DMA_TO_DEVICE); -- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) -- return ret; -- -+ vaddr = sg_virt(req->src); - } else { - int shift = ctx->key_sz - req->src_len; - -- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, -- &qat_req->in.rsa.dec.c, -- GFP_KERNEL); -+ qat_req->src_align = kzalloc(ctx->key_sz, flags); - if (unlikely(!qat_req->src_align)) - return ret; - - scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, - 0, req->src_len, 0); -+ vaddr = qat_req->src_align; - } -- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { -- qat_req->dst_align = NULL; -- qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst), -- req->dst_len, -- DMA_FROM_DEVICE); - -- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) -- goto unmap_src; -+ qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz, -+ DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) -+ goto unmap_src; - -+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { -+ qat_req->dst_align = NULL; -+ vaddr = sg_virt(req->dst); - } else { -- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, -- &qat_req->out.rsa.dec.m, -- GFP_KERNEL); -+ qat_req->dst_align = kzalloc(ctx->key_sz, flags); - if (unlikely(!qat_req->dst_align)) - goto unmap_src; -- -+ vaddr = qat_req->dst_align; - } -+ qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz, -+ DMA_FROM_DEVICE); -+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) -+ goto unmap_dst; - - if (ctx->crt_mode) - qat_req->in.rsa.in_tab[6] = 0; - else - qat_req->in.rsa.in_tab[3] = 0; - qat_req->out.rsa.out_tab[1] = 0; -- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c, -- sizeof(qat_req->in.rsa.dec.c), -+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa, -+ sizeof(struct qat_rsa_input_params), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) - goto unmap_dst; - -- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m, -- sizeof(qat_req->out.rsa.dec.m), -+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa, -+ sizeof(struct qat_rsa_output_params), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) - goto unmap_in_params; -@@ -884,13 +896,14 @@ static int qat_rsa_dec(struct akcipher_request *req) - msg->input_param_count = 3; - - msg->output_param_count = 1; -- do { -- ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); -- } while (ret == -EBUSY && ctr++ < 100); - -- if (!ret) -- return -EINPROGRESS; -+ ret = qat_alg_send_asym_message(qat_req, inst, &req->base); -+ if (ret == -ENOSPC) -+ goto unmap_all; - -+ return ret; -+ -+unmap_all: - if (!dma_mapping_error(dev, qat_req->phy_out)) - dma_unmap_single(dev, qat_req->phy_out, - sizeof(struct qat_rsa_output_params), -@@ -901,21 +914,15 @@ unmap_in_params: - sizeof(struct qat_rsa_input_params), - DMA_TO_DEVICE); - unmap_dst: -- if (qat_req->dst_align) -- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, -- qat_req->out.rsa.dec.m); -- else -- if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) -- dma_unmap_single(dev, qat_req->out.rsa.dec.m, -- ctx->key_sz, DMA_FROM_DEVICE); -+ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) -+ dma_unmap_single(dev, qat_req->out.rsa.dec.m, -+ ctx->key_sz, DMA_FROM_DEVICE); -+ kfree_sensitive(qat_req->dst_align); - unmap_src: -- if (qat_req->src_align) -- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, -- qat_req->in.rsa.dec.c); -- else -- if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) -- dma_unmap_single(dev, qat_req->in.rsa.dec.c, -- ctx->key_sz, DMA_TO_DEVICE); -+ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) -+ dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz, -+ DMA_TO_DEVICE); -+ kfree_sensitive(qat_req->src_align); - return ret; - } - -@@ -1218,11 +1225,13 @@ static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) - { - struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); - struct qat_crypto_instance *inst = -- qat_crypto_get_instance_node(get_current_node()); -+ qat_crypto_get_instance_node(numa_node_id()); - - if (!inst) - return -EINVAL; - -+ akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64); -+ - ctx->key_sz = 0; - ctx->inst = inst; - return 0; -@@ -1233,18 +1242,8 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) - struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); - struct device *dev = &GET_DEV(ctx->inst->accel_dev); - -- if (ctx->n) -- dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); -- if (ctx->e) -- dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); -- if (ctx->d) { -- memset(ctx->d, '\0', ctx->key_sz); -- dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); -- } -+ qat_rsa_clear_ctx(dev, ctx); - qat_crypto_put_instance(ctx->inst); -- ctx->n = NULL; -- ctx->e = NULL; -- ctx->d = NULL; - } - - static struct akcipher_alg rsa = { -@@ -1255,7 +1254,6 @@ static struct akcipher_alg rsa = { - .max_size = qat_rsa_max_size, - .init = qat_rsa_init_tfm, - .exit = qat_rsa_exit_tfm, -- .reqsize = sizeof(struct qat_asym_request) + 64, - .base = { - .cra_name = "rsa", - .cra_driver_name = "qat-rsa", -@@ -1272,7 +1270,6 @@ static struct kpp_alg dh = { - .max_size = qat_dh_max_size, - .init = qat_dh_init_tfm, - .exit = qat_dh_exit_tfm, -- .reqsize = sizeof(struct qat_asym_request) + 64, - .base = { - .cra_name = "dh", - .cra_driver_name = "qat-dh", -diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c -index ece6776fbd53d..994e43fab0a4d 100644 ---- a/drivers/crypto/qat/qat_common/qat_crypto.c -+++ b/drivers/crypto/qat/qat_common/qat_crypto.c -@@ -321,6 +321,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) - &inst->pke_rx); - if (ret) - goto err; -+ -+ INIT_LIST_HEAD(&inst->backlog.list); -+ spin_lock_init(&inst->backlog.lock); - } - return 0; - err: -diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h -index b6a4c95ae003f..df3c738ce323a 100644 ---- a/drivers/crypto/qat/qat_common/qat_crypto.h -+++ b/drivers/crypto/qat/qat_common/qat_crypto.h -@@ -9,6 +9,19 @@ - #include "adf_accel_devices.h" - #include "icp_qat_fw_la.h" - -+struct qat_instance_backlog { -+ struct list_head list; -+ spinlock_t lock; /* protects backlog list */ -+}; -+ -+struct qat_alg_req { -+ u32 *fw_req; -+ struct adf_etr_ring_data *tx_ring; -+ struct crypto_async_request *base; -+ struct list_head list; -+ struct qat_instance_backlog *backlog; -+}; -+ - struct qat_crypto_instance { - struct adf_etr_ring_data *sym_tx; - struct adf_etr_ring_data *sym_rx; -@@ -19,8 +32,29 @@ struct qat_crypto_instance { - unsigned long state; - int id; - atomic_t refctr; -+ struct qat_instance_backlog backlog; - }; - -+#define QAT_MAX_BUFF_DESC 4 -+ -+struct qat_alg_buf { -+ u32 len; -+ u32 resrvd; -+ u64 addr; -+} __packed; -+ -+struct qat_alg_buf_list { -+ u64 resrvd; -+ u32 num_bufs; -+ u32 num_mapped_bufs; -+ struct qat_alg_buf bufers[]; -+} __packed; -+ -+struct qat_alg_fixed_buf_list { -+ struct qat_alg_buf_list sgl_hdr; -+ struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; -+} __packed __aligned(64); -+ - struct qat_crypto_request_buffs { - struct qat_alg_buf_list *bl; - dma_addr_t blp; -@@ -28,6 +62,10 @@ struct qat_crypto_request_buffs { - dma_addr_t bloutp; - size_t sz; - size_t sz_out; -+ bool sgl_src_valid; -+ bool sgl_dst_valid; -+ struct qat_alg_fixed_buf_list sgl_src; -+ struct qat_alg_fixed_buf_list sgl_dst; - }; - - struct qat_crypto_request; -@@ -53,6 +91,7 @@ struct qat_crypto_request { - u8 iv[AES_BLOCK_SIZE]; - }; - bool encryption; -+ struct qat_alg_req alg_req; - }; - - static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev) -@@ -70,4 +109,9 @@ static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev) - return true; - } - -+static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req) -+{ -+ return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; -+} -+ - #endif -diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c -index 0a9ce365a544e..c2c73ee279b29 100644 ---- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c -+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c -@@ -86,17 +86,26 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) - - capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | - ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | -- ICP_ACCEL_CAPABILITIES_AUTHENTICATION; -+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION | -+ ICP_ACCEL_CAPABILITIES_CIPHER | -+ ICP_ACCEL_CAPABILITIES_COMPRESSION; - - /* Read accelerator capabilities mask */ - pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses); - -- if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) -+ /* A set bit in legfuses means the feature is OFF in this SKU */ -+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) { - capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; -+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; -+ } - if (legfuses & ICP_ACCEL_MASK_PKE_SLICE) - capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; -- if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) -+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) { - capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; -+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; -+ } -+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) -+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; - - return capabilities; - } -diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c -index 290e2446a2f35..97a530171f07a 100644 ---- a/drivers/crypto/qce/aead.c -+++ b/drivers/crypto/qce/aead.c -@@ -802,8 +802,8 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi - - ret = crypto_register_aead(alg); - if (ret) { -- kfree(tmpl); - dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); -+ kfree(tmpl); - return ret; - } - -diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c -index 8e6fcf2c21cc0..59159f5e64e52 100644 ---- a/drivers/crypto/qce/sha.c -+++ b/drivers/crypto/qce/sha.c -@@ -498,8 +498,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, - - ret = crypto_register_ahash(alg); - if (ret) { -- kfree(tmpl); - dev_err(qce->dev, "%s registration failed\n", base->cra_name); -+ kfree(tmpl); - return ret; - } - -diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c -index 8ff10928f581d..3d27cd5210ef5 100644 ---- a/drivers/crypto/qce/skcipher.c -+++ b/drivers/crypto/qce/skcipher.c -@@ -484,8 +484,8 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, - - ret = crypto_register_skcipher(alg); - if (ret) { -- kfree(tmpl); - dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); -+ kfree(tmpl); - return ret; - } - -diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c -index 99ba8d51d1020..031b5f701a0a3 100644 ---- a/drivers/crypto/qcom-rng.c -+++ b/drivers/crypto/qcom-rng.c -@@ -8,6 +8,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -43,16 +44,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) - { - unsigned int currsize = 0; - u32 val; -+ int ret; - - /* read random data from hardware */ - do { -- val = readl_relaxed(rng->base + PRNG_STATUS); -- if (!(val & PRNG_STATUS_DATA_AVAIL)) -- break; -+ ret = readl_poll_timeout(rng->base + PRNG_STATUS, val, -+ val & PRNG_STATUS_DATA_AVAIL, -+ 200, 10000); -+ if (ret) -+ return ret; - - val = readl_relaxed(rng->base + PRNG_DATA_OUT); - if (!val) -- break; -+ return -EINVAL; - - if ((max - currsize) >= WORD_SZ) { - memcpy(data, &val, WORD_SZ); -@@ -65,7 +69,7 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) - } - } while (currsize < max); - -- return currsize; -+ return 0; - } - - static int qcom_rng_generate(struct crypto_rng *tfm, -@@ -87,7 +91,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm, - mutex_unlock(&rng->lock); - clk_disable_unprepare(rng->clk); - -- return 0; -+ return ret; - } - - static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed, -diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c -index 35d73061d1569..14a0aef18ab13 100644 ---- a/drivers/crypto/rockchip/rk3288_crypto.c -+++ b/drivers/crypto/rockchip/rk3288_crypto.c -@@ -65,186 +65,24 @@ static void rk_crypto_disable_clk(struct rk_crypto_info *dev) - clk_disable_unprepare(dev->sclk); - } - --static int check_alignment(struct scatterlist *sg_src, -- struct scatterlist *sg_dst, -- int align_mask) --{ -- int in, out, align; -- -- in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && -- IS_ALIGNED((uint32_t)sg_src->length, align_mask); -- if (!sg_dst) -- return in; -- out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && -- IS_ALIGNED((uint32_t)sg_dst->length, align_mask); -- align = in && out; -- -- return (align && (sg_src->length == sg_dst->length)); --} -- --static int rk_load_data(struct rk_crypto_info *dev, -- struct scatterlist *sg_src, -- struct scatterlist *sg_dst) --{ -- unsigned int count; -- -- dev->aligned = dev->aligned ? -- check_alignment(sg_src, sg_dst, dev->align_size) : -- dev->aligned; -- if (dev->aligned) { -- count = min(dev->left_bytes, sg_src->length); -- dev->left_bytes -= count; -- -- if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { -- dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", -- __func__, __LINE__); -- return -EINVAL; -- } -- dev->addr_in = sg_dma_address(sg_src); -- -- if (sg_dst) { -- if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { -- dev_err(dev->dev, -- "[%s:%d] dma_map_sg(dst) error\n", -- __func__, __LINE__); -- dma_unmap_sg(dev->dev, sg_src, 1, -- DMA_TO_DEVICE); -- return -EINVAL; -- } -- dev->addr_out = sg_dma_address(sg_dst); -- } -- } else { -- count = (dev->left_bytes > PAGE_SIZE) ? -- PAGE_SIZE : dev->left_bytes; -- -- if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, -- dev->addr_vir, count, -- dev->total - dev->left_bytes)) { -- dev_err(dev->dev, "[%s:%d] pcopy err\n", -- __func__, __LINE__); -- return -EINVAL; -- } -- dev->left_bytes -= count; -- sg_init_one(&dev->sg_tmp, dev->addr_vir, count); -- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { -- dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", -- __func__, __LINE__); -- return -ENOMEM; -- } -- dev->addr_in = sg_dma_address(&dev->sg_tmp); -- -- if (sg_dst) { -- if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, -- DMA_FROM_DEVICE)) { -- dev_err(dev->dev, -- "[%s:%d] dma_map_sg(sg_tmp) error\n", -- __func__, __LINE__); -- dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, -- DMA_TO_DEVICE); -- return -ENOMEM; -- } -- dev->addr_out = sg_dma_address(&dev->sg_tmp); -- } -- } -- dev->count = count; -- return 0; --} -- --static void rk_unload_data(struct rk_crypto_info *dev) --{ -- struct scatterlist *sg_in, *sg_out; -- -- sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; -- dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); -- -- if (dev->sg_dst) { -- sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; -- dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); -- } --} -- - static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) - { - struct rk_crypto_info *dev = platform_get_drvdata(dev_id); - u32 interrupt_status; - -- spin_lock(&dev->lock); - interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); - CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); - -+ dev->status = 1; - if (interrupt_status & 0x0a) { - dev_warn(dev->dev, "DMA Error\n"); -- dev->err = -EFAULT; -+ dev->status = 0; - } -- tasklet_schedule(&dev->done_task); -+ complete(&dev->complete); - -- spin_unlock(&dev->lock); - return IRQ_HANDLED; - } - --static int rk_crypto_enqueue(struct rk_crypto_info *dev, -- struct crypto_async_request *async_req) --{ -- unsigned long flags; -- int ret; -- -- spin_lock_irqsave(&dev->lock, flags); -- ret = crypto_enqueue_request(&dev->queue, async_req); -- if (dev->busy) { -- spin_unlock_irqrestore(&dev->lock, flags); -- return ret; -- } -- dev->busy = true; -- spin_unlock_irqrestore(&dev->lock, flags); -- tasklet_schedule(&dev->queue_task); -- -- return ret; --} -- --static void rk_crypto_queue_task_cb(unsigned long data) --{ -- struct rk_crypto_info *dev = (struct rk_crypto_info *)data; -- struct crypto_async_request *async_req, *backlog; -- unsigned long flags; -- int err = 0; -- -- dev->err = 0; -- spin_lock_irqsave(&dev->lock, flags); -- backlog = crypto_get_backlog(&dev->queue); -- async_req = crypto_dequeue_request(&dev->queue); -- -- if (!async_req) { -- dev->busy = false; -- spin_unlock_irqrestore(&dev->lock, flags); -- return; -- } -- spin_unlock_irqrestore(&dev->lock, flags); -- -- if (backlog) { -- backlog->complete(backlog, -EINPROGRESS); -- backlog = NULL; -- } -- -- dev->async_req = async_req; -- err = dev->start(dev); -- if (err) -- dev->complete(dev->async_req, err); --} -- --static void rk_crypto_done_task_cb(unsigned long data) --{ -- struct rk_crypto_info *dev = (struct rk_crypto_info *)data; -- -- if (dev->err) { -- dev->complete(dev->async_req, dev->err); -- return; -- } -- -- dev->err = dev->update(dev); -- if (dev->err) -- dev->complete(dev->async_req, dev->err); --} -- - static struct rk_crypto_tmp *rk_cipher_algs[] = { - &rk_ecb_aes_alg, - &rk_cbc_aes_alg, -@@ -337,8 +175,6 @@ static int rk_crypto_probe(struct platform_device *pdev) - if (err) - goto err_crypto; - -- spin_lock_init(&crypto_info->lock); -- - crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(crypto_info->reg)) { - err = PTR_ERR(crypto_info->reg); -@@ -389,18 +225,11 @@ static int rk_crypto_probe(struct platform_device *pdev) - crypto_info->dev = &pdev->dev; - platform_set_drvdata(pdev, crypto_info); - -- tasklet_init(&crypto_info->queue_task, -- rk_crypto_queue_task_cb, (unsigned long)crypto_info); -- tasklet_init(&crypto_info->done_task, -- rk_crypto_done_task_cb, (unsigned long)crypto_info); -- crypto_init_queue(&crypto_info->queue, 50); -+ crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); -+ crypto_engine_start(crypto_info->engine); -+ init_completion(&crypto_info->complete); - -- crypto_info->enable_clk = rk_crypto_enable_clk; -- crypto_info->disable_clk = rk_crypto_disable_clk; -- crypto_info->load_data = rk_load_data; -- crypto_info->unload_data = rk_unload_data; -- crypto_info->enqueue = rk_crypto_enqueue; -- crypto_info->busy = false; -+ rk_crypto_enable_clk(crypto_info); - - err = rk_crypto_register(crypto_info); - if (err) { -@@ -412,9 +241,9 @@ static int rk_crypto_probe(struct platform_device *pdev) - return 0; - - err_register_alg: -- tasklet_kill(&crypto_info->queue_task); -- tasklet_kill(&crypto_info->done_task); -+ crypto_engine_exit(crypto_info->engine); - err_crypto: -+ dev_err(dev, "Crypto Accelerator not successfully registered\n"); - return err; - } - -@@ -423,8 +252,8 @@ static int rk_crypto_remove(struct platform_device *pdev) - struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); - - rk_crypto_unregister(); -- tasklet_kill(&crypto_tmp->done_task); -- tasklet_kill(&crypto_tmp->queue_task); -+ rk_crypto_disable_clk(crypto_tmp); -+ crypto_engine_exit(crypto_tmp->engine); - return 0; - } - -diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h -index 97278c2574ff9..045e811b4af84 100644 ---- a/drivers/crypto/rockchip/rk3288_crypto.h -+++ b/drivers/crypto/rockchip/rk3288_crypto.h -@@ -5,9 +5,11 @@ - #include - #include - #include -+#include - #include - #include - #include -+#include - #include - #include - -@@ -193,45 +195,15 @@ struct rk_crypto_info { - struct reset_control *rst; - void __iomem *reg; - int irq; -- struct crypto_queue queue; -- struct tasklet_struct queue_task; -- struct tasklet_struct done_task; -- struct crypto_async_request *async_req; -- int err; -- /* device lock */ -- spinlock_t lock; -- -- /* the public variable */ -- struct scatterlist *sg_src; -- struct scatterlist *sg_dst; -- struct scatterlist sg_tmp; -- struct scatterlist *first; -- unsigned int left_bytes; -- void *addr_vir; -- int aligned; -- int align_size; -- size_t src_nents; -- size_t dst_nents; -- unsigned int total; -- unsigned int count; -- dma_addr_t addr_in; -- dma_addr_t addr_out; -- bool busy; -- int (*start)(struct rk_crypto_info *dev); -- int (*update)(struct rk_crypto_info *dev); -- void (*complete)(struct crypto_async_request *base, int err); -- int (*enable_clk)(struct rk_crypto_info *dev); -- void (*disable_clk)(struct rk_crypto_info *dev); -- int (*load_data)(struct rk_crypto_info *dev, -- struct scatterlist *sg_src, -- struct scatterlist *sg_dst); -- void (*unload_data)(struct rk_crypto_info *dev); -- int (*enqueue)(struct rk_crypto_info *dev, -- struct crypto_async_request *async_req); -+ -+ struct crypto_engine *engine; -+ struct completion complete; -+ int status; - }; - - /* the private variable of hash */ - struct rk_ahash_ctx { -+ struct crypto_engine_ctx enginectx; - struct rk_crypto_info *dev; - /* for fallback */ - struct crypto_ahash *fallback_tfm; -@@ -241,14 +213,23 @@ struct rk_ahash_ctx { - struct rk_ahash_rctx { - struct ahash_request fallback_req; - u32 mode; -+ int nrsg; - }; - - /* the private variable of cipher */ - struct rk_cipher_ctx { -+ struct crypto_engine_ctx enginectx; - struct rk_crypto_info *dev; - unsigned int keylen; -- u32 mode; -+ u8 key[AES_MAX_KEY_SIZE]; - u8 iv[AES_BLOCK_SIZE]; -+ struct crypto_skcipher *fallback_tfm; -+}; -+ -+struct rk_cipher_rctx { -+ u8 backup_iv[AES_BLOCK_SIZE]; -+ u32 mode; -+ struct skcipher_request fallback_req; // keep at the end - }; - - enum alg_type { -diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c -index ed03058497bc2..edd40e16a3f0a 100644 ---- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c -+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c -@@ -9,6 +9,7 @@ - * Some ideas are from marvell/cesa.c and s5p-sss.c driver. - */ - #include -+#include - #include "rk3288_crypto.h" - - /* -@@ -16,6 +17,40 @@ - * so we put the fixed hash out when met zero message. - */ - -+static bool rk_ahash_need_fallback(struct ahash_request *req) -+{ -+ struct scatterlist *sg; -+ -+ sg = req->src; -+ while (sg) { -+ if (!IS_ALIGNED(sg->offset, sizeof(u32))) { -+ return true; -+ } -+ if (sg->length % 4) { -+ return true; -+ } -+ sg = sg_next(sg); -+ } -+ return false; -+} -+ -+static int rk_ahash_digest_fb(struct ahash_request *areq) -+{ -+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); -+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); -+ struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); -+ -+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); -+ rctx->fallback_req.base.flags = areq->base.flags & -+ CRYPTO_TFM_REQ_MAY_SLEEP; -+ -+ rctx->fallback_req.nbytes = areq->nbytes; -+ rctx->fallback_req.src = areq->src; -+ rctx->fallback_req.result = areq->result; -+ -+ return crypto_ahash_digest(&rctx->fallback_req); -+} -+ - static int zero_message_process(struct ahash_request *req) - { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); -@@ -38,16 +73,12 @@ static int zero_message_process(struct ahash_request *req) - return 0; - } - --static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err) --{ -- if (base->complete) -- base->complete(base, err); --} -- --static void rk_ahash_reg_init(struct rk_crypto_info *dev) -+static void rk_ahash_reg_init(struct ahash_request *req) - { -- struct ahash_request *req = ahash_request_cast(dev->async_req); - struct rk_ahash_rctx *rctx = ahash_request_ctx(req); -+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); -+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); -+ struct rk_crypto_info *dev = tctx->dev; - int reg_status; - - reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | -@@ -74,7 +105,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev) - RK_CRYPTO_BYTESWAP_BRFIFO | - RK_CRYPTO_BYTESWAP_BTFIFO); - -- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total); -+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes); - } - - static int rk_ahash_init(struct ahash_request *req) -@@ -167,48 +198,64 @@ static int rk_ahash_digest(struct ahash_request *req) - struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct rk_crypto_info *dev = tctx->dev; - -+ if (rk_ahash_need_fallback(req)) -+ return rk_ahash_digest_fb(req); -+ - if (!req->nbytes) - return zero_message_process(req); -- else -- return dev->enqueue(dev, &req->base); -+ -+ return crypto_transfer_hash_request_to_engine(dev->engine, req); - } - --static void crypto_ahash_dma_start(struct rk_crypto_info *dev) -+static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) - { -- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in); -- CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4); -+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg)); -+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4); - CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | - (RK_CRYPTO_HASH_START << 16)); - } - --static int rk_ahash_set_data_start(struct rk_crypto_info *dev) -+static int rk_hash_prepare(struct crypto_engine *engine, void *breq) -+{ -+ struct ahash_request *areq = container_of(breq, struct ahash_request, base); -+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); -+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); -+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); -+ int ret; -+ -+ ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); -+ if (ret <= 0) -+ return -EINVAL; -+ -+ rctx->nrsg = ret; -+ -+ return 0; -+} -+ -+static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) - { -- int err; -+ struct ahash_request *areq = container_of(breq, struct ahash_request, base); -+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); -+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); -+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); - -- err = dev->load_data(dev, dev->sg_src, NULL); -- if (!err) -- crypto_ahash_dma_start(dev); -- return err; -+ dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); -+ return 0; - } - --static int rk_ahash_start(struct rk_crypto_info *dev) -+static int rk_hash_run(struct crypto_engine *engine, void *breq) - { -- struct ahash_request *req = ahash_request_cast(dev->async_req); -- struct crypto_ahash *tfm; -- struct rk_ahash_rctx *rctx; -- -- dev->total = req->nbytes; -- dev->left_bytes = req->nbytes; -- dev->aligned = 0; -- dev->align_size = 4; -- dev->sg_dst = NULL; -- dev->sg_src = req->src; -- dev->first = req->src; -- dev->src_nents = sg_nents(req->src); -- rctx = ahash_request_ctx(req); -+ struct ahash_request *areq = container_of(breq, struct ahash_request, base); -+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); -+ struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); -+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); -+ struct scatterlist *sg = areq->src; -+ int err = 0; -+ int i; -+ u32 v; -+ - rctx->mode = 0; - -- tfm = crypto_ahash_reqtfm(req); - switch (crypto_ahash_digestsize(tfm)) { - case SHA1_DIGEST_SIZE: - rctx->mode = RK_CRYPTO_HASH_SHA1; -@@ -220,32 +267,26 @@ static int rk_ahash_start(struct rk_crypto_info *dev) - rctx->mode = RK_CRYPTO_HASH_MD5; - break; - default: -- return -EINVAL; -+ err = -EINVAL; -+ goto theend; - } - -- rk_ahash_reg_init(dev); -- return rk_ahash_set_data_start(dev); --} -- --static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) --{ -- int err = 0; -- struct ahash_request *req = ahash_request_cast(dev->async_req); -- struct crypto_ahash *tfm; -- -- dev->unload_data(dev); -- if (dev->left_bytes) { -- if (dev->aligned) { -- if (sg_is_last(dev->sg_src)) { -- dev_warn(dev->dev, "[%s:%d], Lack of data\n", -- __func__, __LINE__); -- err = -ENOMEM; -- goto out_rx; -- } -- dev->sg_src = sg_next(dev->sg_src); -+ rk_ahash_reg_init(areq); -+ -+ while (sg) { -+ reinit_completion(&tctx->dev->complete); -+ tctx->dev->status = 0; -+ crypto_ahash_dma_start(tctx->dev, sg); -+ wait_for_completion_interruptible_timeout(&tctx->dev->complete, -+ msecs_to_jiffies(2000)); -+ if (!tctx->dev->status) { -+ dev_err(tctx->dev->dev, "DMA timeout\n"); -+ err = -EFAULT; -+ goto theend; - } -- err = rk_ahash_set_data_start(dev); -- } else { -+ sg = sg_next(sg); -+ } -+ - /* - * it will take some time to process date after last dma - * transmission. -@@ -256,18 +297,20 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) - * efficiency, and make it response quickly when dma - * complete. - */ -- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) -- udelay(10); -- -- tfm = crypto_ahash_reqtfm(req); -- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, -- crypto_ahash_digestsize(tfm)); -- dev->complete(dev->async_req, 0); -- tasklet_schedule(&dev->queue_task); -+ while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS)) -+ udelay(10); -+ -+ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { -+ v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); -+ put_unaligned_le32(v, areq->result + i * 4); - } - --out_rx: -- return err; -+theend: -+ local_bh_disable(); -+ crypto_finalize_hash_request(engine, breq, err); -+ local_bh_enable(); -+ -+ return 0; - } - - static int rk_cra_hash_init(struct crypto_tfm *tfm) -@@ -281,14 +324,6 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) - algt = container_of(alg, struct rk_crypto_tmp, alg.hash); - - tctx->dev = algt->dev; -- tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL); -- if (!tctx->dev->addr_vir) { -- dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n"); -- return -ENOMEM; -- } -- tctx->dev->start = rk_ahash_start; -- tctx->dev->update = rk_ahash_crypto_rx; -- tctx->dev->complete = rk_ahash_crypto_complete; - - /* for fallback */ - tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, -@@ -297,19 +332,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) - dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); - return PTR_ERR(tctx->fallback_tfm); - } -+ - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct rk_ahash_rctx) + - crypto_ahash_reqsize(tctx->fallback_tfm)); - -- return tctx->dev->enable_clk(tctx->dev); -+ tctx->enginectx.op.do_one_request = rk_hash_run; -+ tctx->enginectx.op.prepare_request = rk_hash_prepare; -+ tctx->enginectx.op.unprepare_request = rk_hash_unprepare; -+ -+ return 0; - } - - static void rk_cra_hash_exit(struct crypto_tfm *tfm) - { - struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); - -- free_page((unsigned long)tctx->dev->addr_vir); -- return tctx->dev->disable_clk(tctx->dev); -+ crypto_free_ahash(tctx->fallback_tfm); - } - - struct rk_crypto_tmp rk_ahash_sha1 = { -diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c -index 1cece1a7d3f00..67a7e05d5ae31 100644 ---- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c -+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c -@@ -9,23 +9,77 @@ - * Some ideas are from marvell-cesa.c and s5p-sss.c driver. - */ - #include -+#include - #include "rk3288_crypto.h" - - #define RK_CRYPTO_DEC BIT(0) - --static void rk_crypto_complete(struct crypto_async_request *base, int err) -+static int rk_cipher_need_fallback(struct skcipher_request *req) - { -- if (base->complete) -- base->complete(base, err); -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ unsigned int bs = crypto_skcipher_blocksize(tfm); -+ struct scatterlist *sgs, *sgd; -+ unsigned int stodo, dtodo, len; -+ -+ if (!req->cryptlen) -+ return true; -+ -+ len = req->cryptlen; -+ sgs = req->src; -+ sgd = req->dst; -+ while (sgs && sgd) { -+ if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { -+ return true; -+ } -+ if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { -+ return true; -+ } -+ stodo = min(len, sgs->length); -+ if (stodo % bs) { -+ return true; -+ } -+ dtodo = min(len, sgd->length); -+ if (dtodo % bs) { -+ return true; -+ } -+ if (stodo != dtodo) { -+ return true; -+ } -+ len -= stodo; -+ sgs = sg_next(sgs); -+ sgd = sg_next(sgd); -+ } -+ return false; -+} -+ -+static int rk_cipher_fallback(struct skcipher_request *areq) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); -+ struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); -+ int err; -+ -+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); -+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, -+ areq->base.complete, areq->base.data); -+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, -+ areq->cryptlen, areq->iv); -+ if (rctx->mode & RK_CRYPTO_DEC) -+ err = crypto_skcipher_decrypt(&rctx->fallback_req); -+ else -+ err = crypto_skcipher_encrypt(&rctx->fallback_req); -+ return err; - } - - static int rk_handle_req(struct rk_crypto_info *dev, - struct skcipher_request *req) - { -- if (!IS_ALIGNED(req->cryptlen, dev->align_size)) -- return -EINVAL; -- else -- return dev->enqueue(dev, &req->base); -+ struct crypto_engine *engine = dev->engine; -+ -+ if (rk_cipher_need_fallback(req)) -+ return rk_cipher_fallback(req); -+ -+ return crypto_transfer_skcipher_request_to_engine(engine, req); - } - - static int rk_aes_setkey(struct crypto_skcipher *cipher, -@@ -38,8 +92,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher, - keylen != AES_KEYSIZE_256) - return -EINVAL; - ctx->keylen = keylen; -- memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); -- return 0; -+ memcpy(ctx->key, key, keylen); -+ -+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); - } - - static int rk_des_setkey(struct crypto_skcipher *cipher, -@@ -53,8 +108,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher, - return err; - - ctx->keylen = keylen; -- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); -- return 0; -+ memcpy(ctx->key, key, keylen); -+ -+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); - } - - static int rk_tdes_setkey(struct crypto_skcipher *cipher, -@@ -68,17 +124,19 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher, - return err; - - ctx->keylen = keylen; -- memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); -- return 0; -+ memcpy(ctx->key, key, keylen); -+ -+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); - } - - static int rk_aes_ecb_encrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_AES_ECB_MODE; -+ rctx->mode = RK_CRYPTO_AES_ECB_MODE; - return rk_handle_req(dev, req); - } - -@@ -86,9 +144,10 @@ static int rk_aes_ecb_decrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; -+ rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; - return rk_handle_req(dev, req); - } - -@@ -96,9 +155,10 @@ static int rk_aes_cbc_encrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_AES_CBC_MODE; -+ rctx->mode = RK_CRYPTO_AES_CBC_MODE; - return rk_handle_req(dev, req); - } - -@@ -106,9 +166,10 @@ static int rk_aes_cbc_decrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; -+ rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; - return rk_handle_req(dev, req); - } - -@@ -116,9 +177,10 @@ static int rk_des_ecb_encrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = 0; -+ rctx->mode = 0; - return rk_handle_req(dev, req); - } - -@@ -126,9 +188,10 @@ static int rk_des_ecb_decrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_DEC; -+ rctx->mode = RK_CRYPTO_DEC; - return rk_handle_req(dev, req); - } - -@@ -136,9 +199,10 @@ static int rk_des_cbc_encrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; -+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; - return rk_handle_req(dev, req); - } - -@@ -146,9 +210,10 @@ static int rk_des_cbc_decrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; -+ rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; - return rk_handle_req(dev, req); - } - -@@ -156,9 +221,10 @@ static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_TDES_SELECT; -+ rctx->mode = RK_CRYPTO_TDES_SELECT; - return rk_handle_req(dev, req); - } - -@@ -166,9 +232,10 @@ static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; -+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; - return rk_handle_req(dev, req); - } - -@@ -176,9 +243,10 @@ static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; -+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; - return rk_handle_req(dev, req); - } - -@@ -186,43 +254,42 @@ static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_crypto_info *dev = ctx->dev; - -- ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | -+ rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | - RK_CRYPTO_DEC; - return rk_handle_req(dev, req); - } - --static void rk_ablk_hw_init(struct rk_crypto_info *dev) -+static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) - { -- struct skcipher_request *req = -- skcipher_request_cast(dev->async_req); - struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); -- u32 ivsize, block, conf_reg = 0; -+ u32 block, conf_reg = 0; - - block = crypto_tfm_alg_blocksize(tfm); -- ivsize = crypto_skcipher_ivsize(cipher); - - if (block == DES_BLOCK_SIZE) { -- ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | -+ rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | - RK_CRYPTO_TDES_BYTESWAP_KEY | - RK_CRYPTO_TDES_BYTESWAP_IV; -- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode); -- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize); -+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); -+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); - conf_reg = RK_CRYPTO_DESSEL; - } else { -- ctx->mode |= RK_CRYPTO_AES_FIFO_MODE | -+ rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | - RK_CRYPTO_AES_KEY_CHANGE | - RK_CRYPTO_AES_BYTESWAP_KEY | - RK_CRYPTO_AES_BYTESWAP_IV; - if (ctx->keylen == AES_KEYSIZE_192) -- ctx->mode |= RK_CRYPTO_AES_192BIT_key; -+ rctx->mode |= RK_CRYPTO_AES_192BIT_key; - else if (ctx->keylen == AES_KEYSIZE_256) -- ctx->mode |= RK_CRYPTO_AES_256BIT_key; -- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode); -- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize); -+ rctx->mode |= RK_CRYPTO_AES_256BIT_key; -+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); -+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); - } - conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | - RK_CRYPTO_BYTESWAP_BRFIFO; -@@ -231,146 +298,138 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev) - RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); - } - --static void crypto_dma_start(struct rk_crypto_info *dev) -+static void crypto_dma_start(struct rk_crypto_info *dev, -+ struct scatterlist *sgs, -+ struct scatterlist *sgd, unsigned int todo) - { -- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); -- CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); -- CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); -+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs)); -+ CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo); -+ CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd)); - CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | - _SBF(RK_CRYPTO_BLOCK_START, 16)); - } - --static int rk_set_data_start(struct rk_crypto_info *dev) -+static int rk_cipher_run(struct crypto_engine *engine, void *async_req) - { -- int err; -- struct skcipher_request *req = -- skcipher_request_cast(dev->async_req); -- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -- u32 ivsize = crypto_skcipher_ivsize(tfm); -- u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + -- dev->sg_src->offset + dev->sg_src->length - ivsize; -- -- /* Store the iv that need to be updated in chain mode. -- * And update the IV buffer to contain the next IV for decryption mode. -- */ -- if (ctx->mode & RK_CRYPTO_DEC) { -- memcpy(ctx->iv, src_last_blk, ivsize); -- sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv, -- ivsize, dev->total - ivsize); -- } -- -- err = dev->load_data(dev, dev->sg_src, dev->sg_dst); -- if (!err) -- crypto_dma_start(dev); -- return err; --} -- --static int rk_ablk_start(struct rk_crypto_info *dev) --{ -- struct skcipher_request *req = -- skcipher_request_cast(dev->async_req); -- unsigned long flags; -+ struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); -+ struct scatterlist *sgs, *sgd; - int err = 0; -+ int ivsize = crypto_skcipher_ivsize(tfm); -+ int offset; -+ u8 iv[AES_BLOCK_SIZE]; -+ u8 biv[AES_BLOCK_SIZE]; -+ u8 *ivtouse = areq->iv; -+ unsigned int len = areq->cryptlen; -+ unsigned int todo; -+ -+ ivsize = crypto_skcipher_ivsize(tfm); -+ if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { -+ if (rctx->mode & RK_CRYPTO_DEC) { -+ offset = areq->cryptlen - ivsize; -+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src, -+ offset, ivsize, 0); -+ } -+ } - -- dev->left_bytes = req->cryptlen; -- dev->total = req->cryptlen; -- dev->sg_src = req->src; -- dev->first = req->src; -- dev->src_nents = sg_nents(req->src); -- dev->sg_dst = req->dst; -- dev->dst_nents = sg_nents(req->dst); -- dev->aligned = 1; -- -- spin_lock_irqsave(&dev->lock, flags); -- rk_ablk_hw_init(dev); -- err = rk_set_data_start(dev); -- spin_unlock_irqrestore(&dev->lock, flags); -- return err; --} -- --static void rk_iv_copyback(struct rk_crypto_info *dev) --{ -- struct skcipher_request *req = -- skcipher_request_cast(dev->async_req); -- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -- u32 ivsize = crypto_skcipher_ivsize(tfm); -+ sgs = areq->src; -+ sgd = areq->dst; - -- /* Update the IV buffer to contain the next IV for encryption mode. */ -- if (!(ctx->mode & RK_CRYPTO_DEC)) { -- if (dev->aligned) { -- memcpy(req->iv, sg_virt(dev->sg_dst) + -- dev->sg_dst->length - ivsize, ivsize); -+ while (sgs && sgd && len) { -+ if (!sgs->length) { -+ sgs = sg_next(sgs); -+ sgd = sg_next(sgd); -+ continue; -+ } -+ if (rctx->mode & RK_CRYPTO_DEC) { -+ /* we backup last block of source to be used as IV at next step */ -+ offset = sgs->length - ivsize; -+ scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); -+ } -+ if (sgs == sgd) { -+ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); -+ if (err <= 0) { -+ err = -EINVAL; -+ goto theend_iv; -+ } - } else { -- memcpy(req->iv, dev->addr_vir + -- dev->count - ivsize, ivsize); -+ err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); -+ if (err <= 0) { -+ err = -EINVAL; -+ goto theend_iv; -+ } -+ err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); -+ if (err <= 0) { -+ err = -EINVAL; -+ goto theend_sgs; -+ } -+ } -+ err = 0; -+ rk_ablk_hw_init(ctx->dev, areq); -+ if (ivsize) { -+ if (ivsize == DES_BLOCK_SIZE) -+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); -+ else -+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); -+ } -+ reinit_completion(&ctx->dev->complete); -+ ctx->dev->status = 0; -+ -+ todo = min(sg_dma_len(sgs), len); -+ len -= todo; -+ crypto_dma_start(ctx->dev, sgs, sgd, todo / 4); -+ wait_for_completion_interruptible_timeout(&ctx->dev->complete, -+ msecs_to_jiffies(2000)); -+ if (!ctx->dev->status) { -+ dev_err(ctx->dev->dev, "DMA timeout\n"); -+ err = -EFAULT; -+ goto theend; - } -+ if (sgs == sgd) { -+ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); -+ } else { -+ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); -+ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); -+ } -+ if (rctx->mode & RK_CRYPTO_DEC) { -+ memcpy(iv, biv, ivsize); -+ ivtouse = iv; -+ } else { -+ offset = sgd->length - ivsize; -+ scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0); -+ ivtouse = iv; -+ } -+ sgs = sg_next(sgs); -+ sgd = sg_next(sgd); - } --} -- --static void rk_update_iv(struct rk_crypto_info *dev) --{ -- struct skcipher_request *req = -- skcipher_request_cast(dev->async_req); -- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -- struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); -- u32 ivsize = crypto_skcipher_ivsize(tfm); -- u8 *new_iv = NULL; - -- if (ctx->mode & RK_CRYPTO_DEC) { -- new_iv = ctx->iv; -- } else { -- new_iv = page_address(sg_page(dev->sg_dst)) + -- dev->sg_dst->offset + dev->sg_dst->length - ivsize; -+ if (areq->iv && ivsize > 0) { -+ offset = areq->cryptlen - ivsize; -+ if (rctx->mode & RK_CRYPTO_DEC) { -+ memcpy(areq->iv, rctx->backup_iv, ivsize); -+ memzero_explicit(rctx->backup_iv, ivsize); -+ } else { -+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset, -+ ivsize, 0); -+ } - } - -- if (ivsize == DES_BLOCK_SIZE) -- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); -- else if (ivsize == AES_BLOCK_SIZE) -- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); --} -+theend: -+ local_bh_disable(); -+ crypto_finalize_skcipher_request(engine, areq, err); -+ local_bh_enable(); -+ return 0; - --/* return: -- * true some err was occurred -- * fault no err, continue -- */ --static int rk_ablk_rx(struct rk_crypto_info *dev) --{ -- int err = 0; -- struct skcipher_request *req = -- skcipher_request_cast(dev->async_req); -- -- dev->unload_data(dev); -- if (!dev->aligned) { -- if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, -- dev->addr_vir, dev->count, -- dev->total - dev->left_bytes - -- dev->count)) { -- err = -EINVAL; -- goto out_rx; -- } -- } -- if (dev->left_bytes) { -- rk_update_iv(dev); -- if (dev->aligned) { -- if (sg_is_last(dev->sg_src)) { -- dev_err(dev->dev, "[%s:%d] Lack of data\n", -- __func__, __LINE__); -- err = -ENOMEM; -- goto out_rx; -- } -- dev->sg_src = sg_next(dev->sg_src); -- dev->sg_dst = sg_next(dev->sg_dst); -- } -- err = rk_set_data_start(dev); -+theend_sgs: -+ if (sgs == sgd) { -+ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); - } else { -- rk_iv_copyback(dev); -- /* here show the calculation is over without any err */ -- dev->complete(dev->async_req, 0); -- tasklet_schedule(&dev->queue_task); -+ dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); -+ dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); - } --out_rx: -+theend_iv: - return err; - } - -@@ -378,26 +437,34 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm) - { - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); -+ const char *name = crypto_tfm_alg_name(&tfm->base); - struct rk_crypto_tmp *algt; - - algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); - - ctx->dev = algt->dev; -- ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1; -- ctx->dev->start = rk_ablk_start; -- ctx->dev->update = rk_ablk_rx; -- ctx->dev->complete = rk_crypto_complete; -- ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); - -- return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; -+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); -+ if (IS_ERR(ctx->fallback_tfm)) { -+ dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", -+ name, PTR_ERR(ctx->fallback_tfm)); -+ return PTR_ERR(ctx->fallback_tfm); -+ } -+ -+ tfm->reqsize = sizeof(struct rk_cipher_rctx) + -+ crypto_skcipher_reqsize(ctx->fallback_tfm); -+ -+ ctx->enginectx.op.do_one_request = rk_cipher_run; -+ -+ return 0; - } - - static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm) - { - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - -- free_page((unsigned long)ctx->dev->addr_vir); -- ctx->dev->disable_clk(ctx->dev); -+ memzero_explicit(ctx->key, ctx->keylen); -+ crypto_free_skcipher(ctx->fallback_tfm); - } - - struct rk_crypto_tmp rk_ecb_aes_alg = { -@@ -406,7 +473,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { - .base.cra_name = "ecb(aes)", - .base.cra_driver_name = "ecb-aes-rk", - .base.cra_priority = 300, -- .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), - .base.cra_alignmask = 0x0f, -@@ -428,7 +495,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { - .base.cra_name = "cbc(aes)", - .base.cra_driver_name = "cbc-aes-rk", - .base.cra_priority = 300, -- .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), - .base.cra_alignmask = 0x0f, -@@ -451,7 +518,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = { - .base.cra_name = "ecb(des)", - .base.cra_driver_name = "ecb-des-rk", - .base.cra_priority = 300, -- .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), - .base.cra_alignmask = 0x07, -@@ -473,7 +540,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = { - .base.cra_name = "cbc(des)", - .base.cra_driver_name = "cbc-des-rk", - .base.cra_priority = 300, -- .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), - .base.cra_alignmask = 0x07, -@@ -496,7 +563,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { - .base.cra_name = "ecb(des3_ede)", - .base.cra_driver_name = "ecb-des3-ede-rk", - .base.cra_priority = 300, -- .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), - .base.cra_alignmask = 0x07, -@@ -506,7 +573,6 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { - .exit = rk_ablk_exit_tfm, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, -- .ivsize = DES_BLOCK_SIZE, - .setkey = rk_tdes_setkey, - .encrypt = rk_des3_ede_ecb_encrypt, - .decrypt = rk_des3_ede_ecb_decrypt, -@@ -519,7 +585,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = { - .base.cra_name = "cbc(des3_ede)", - .base.cra_driver_name = "cbc-des3-ede-rk", - .base.cra_priority = 300, -- .base.cra_flags = CRYPTO_ALG_ASYNC, -+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), - .base.cra_alignmask = 0x07, -diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c -index 55aa3a71169b0..7717e9e5977bb 100644 ---- a/drivers/crypto/s5p-sss.c -+++ b/drivers/crypto/s5p-sss.c -@@ -2171,6 +2171,8 @@ static int s5p_aes_probe(struct platform_device *pdev) - - variant = find_s5p_sss_version(pdev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!res) -+ return -EINVAL; - - /* - * Note: HASH and PRNG uses the same registers in secss, avoid -diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c -index 457084b344c17..b07ae4ba165e7 100644 ---- a/drivers/crypto/sahara.c -+++ b/drivers/crypto/sahara.c -@@ -26,10 +26,10 @@ - #include - #include - #include --#include - #include - #include - #include -+#include - - #define SHA_BUFFER_LEN PAGE_SIZE - #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE -@@ -196,7 +196,7 @@ struct sahara_dev { - void __iomem *regs_base; - struct clk *clk_ipg; - struct clk *clk_ahb; -- struct mutex queue_mutex; -+ spinlock_t queue_spinlock; - struct task_struct *kthread; - struct completion dma_completion; - -@@ -642,9 +642,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) - - rctx->mode = mode; - -- mutex_lock(&dev->queue_mutex); -+ spin_lock_bh(&dev->queue_spinlock); - err = crypto_enqueue_request(&dev->queue, &req->base); -- mutex_unlock(&dev->queue_mutex); -+ spin_unlock_bh(&dev->queue_spinlock); - - wake_up_process(dev->kthread); - -@@ -1043,10 +1043,10 @@ static int sahara_queue_manage(void *data) - do { - __set_current_state(TASK_INTERRUPTIBLE); - -- mutex_lock(&dev->queue_mutex); -+ spin_lock_bh(&dev->queue_spinlock); - backlog = crypto_get_backlog(&dev->queue); - async_req = crypto_dequeue_request(&dev->queue); -- mutex_unlock(&dev->queue_mutex); -+ spin_unlock_bh(&dev->queue_spinlock); - - if (backlog) - backlog->complete(backlog, -EINPROGRESS); -@@ -1092,9 +1092,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last) - rctx->first = 1; - } - -- mutex_lock(&dev->queue_mutex); -+ spin_lock_bh(&dev->queue_spinlock); - ret = crypto_enqueue_request(&dev->queue, &req->base); -- mutex_unlock(&dev->queue_mutex); -+ spin_unlock_bh(&dev->queue_spinlock); - - wake_up_process(dev->kthread); - -@@ -1449,7 +1449,7 @@ static int sahara_probe(struct platform_device *pdev) - - crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); - -- mutex_init(&dev->queue_mutex); -+ spin_lock_init(&dev->queue_spinlock); - - dev_ptr = dev; - -diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c -index 75867c0b00172..90a920e7f6642 100644 ---- a/drivers/crypto/stm32/stm32-crc32.c -+++ b/drivers/crypto/stm32/stm32-crc32.c -@@ -279,7 +279,7 @@ static struct shash_alg algs[] = { - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32", -- .cra_driver_name = DRIVER_NAME, -+ .cra_driver_name = "stm32-crc32-crc32", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, -@@ -301,7 +301,7 @@ static struct shash_alg algs[] = { - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32c", -- .cra_driver_name = DRIVER_NAME, -+ .cra_driver_name = "stm32-crc32-crc32c", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, -@@ -384,8 +384,10 @@ static int stm32_crc_remove(struct platform_device *pdev) - struct stm32_crc *crc = platform_get_drvdata(pdev); - int ret = pm_runtime_get_sync(crc->dev); - -- if (ret < 0) -+ if (ret < 0) { -+ pm_runtime_put_noidle(crc->dev); - return ret; -+ } - - spin_lock(&crc_list.lock); - list_del(&crc->list); -diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c -index 7389a0536ff02..81eb136b6c11d 100644 ---- a/drivers/crypto/stm32/stm32-cryp.c -+++ b/drivers/crypto/stm32/stm32-cryp.c -@@ -37,7 +37,6 @@ - /* Mode mask = bits [15..0] */ - #define FLG_MODE_MASK GENMASK(15, 0) - /* Bit [31..16] status */ --#define FLG_CCM_PADDED_WA BIT(16) - - /* Registers */ - #define CRYP_CR 0x00000000 -@@ -105,8 +104,6 @@ - /* Misc */ - #define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) - #define GCM_CTR_INIT 2 --#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset) --#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset) - #define CRYP_AUTOSUSPEND_DELAY 50 - - struct stm32_cryp_caps { -@@ -144,26 +141,16 @@ struct stm32_cryp { - size_t authsize; - size_t hw_blocksize; - -- size_t total_in; -- size_t total_in_save; -- size_t total_out; -- size_t total_out_save; -+ size_t payload_in; -+ size_t header_in; -+ size_t payload_out; - -- struct scatterlist *in_sg; - struct scatterlist *out_sg; -- struct scatterlist *out_sg_save; -- -- struct scatterlist in_sgl; -- struct scatterlist out_sgl; -- bool sgs_copied; -- -- int in_sg_len; -- int out_sg_len; - - struct scatter_walk in_walk; - struct scatter_walk out_walk; - -- u32 last_ctr[4]; -+ __be32 last_ctr[4]; - u32 gcm_ctr; - }; - -@@ -262,6 +249,7 @@ static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp) - } - - static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp); -+static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err); - - static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) - { -@@ -283,103 +271,6 @@ static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) - return cryp; - } - --static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total, -- size_t align) --{ -- int len = 0; -- -- if (!total) -- return 0; -- -- if (!IS_ALIGNED(total, align)) -- return -EINVAL; -- -- while (sg) { -- if (!IS_ALIGNED(sg->offset, sizeof(u32))) -- return -EINVAL; -- -- if (!IS_ALIGNED(sg->length, align)) -- return -EINVAL; -- -- len += sg->length; -- sg = sg_next(sg); -- } -- -- if (len != total) -- return -EINVAL; -- -- return 0; --} -- --static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp) --{ -- int ret; -- -- ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in, -- cryp->hw_blocksize); -- if (ret) -- return ret; -- -- ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out, -- cryp->hw_blocksize); -- -- return ret; --} -- --static void sg_copy_buf(void *buf, struct scatterlist *sg, -- unsigned int start, unsigned int nbytes, int out) --{ -- struct scatter_walk walk; -- -- if (!nbytes) -- return; -- -- scatterwalk_start(&walk, sg); -- scatterwalk_advance(&walk, start); -- scatterwalk_copychunks(buf, &walk, nbytes, out); -- scatterwalk_done(&walk, out, 0); --} -- --static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp) --{ -- void *buf_in, *buf_out; -- int pages, total_in, total_out; -- -- if (!stm32_cryp_check_io_aligned(cryp)) { -- cryp->sgs_copied = 0; -- return 0; -- } -- -- total_in = ALIGN(cryp->total_in, cryp->hw_blocksize); -- pages = total_in ? get_order(total_in) : 1; -- buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); -- -- total_out = ALIGN(cryp->total_out, cryp->hw_blocksize); -- pages = total_out ? get_order(total_out) : 1; -- buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); -- -- if (!buf_in || !buf_out) { -- dev_err(cryp->dev, "Can't allocate pages when unaligned\n"); -- cryp->sgs_copied = 0; -- return -EFAULT; -- } -- -- sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0); -- -- sg_init_one(&cryp->in_sgl, buf_in, total_in); -- cryp->in_sg = &cryp->in_sgl; -- cryp->in_sg_len = 1; -- -- sg_init_one(&cryp->out_sgl, buf_out, total_out); -- cryp->out_sg_save = cryp->out_sg; -- cryp->out_sg = &cryp->out_sgl; -- cryp->out_sg_len = 1; -- -- cryp->sgs_copied = 1; -- -- return 0; --} -- - static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv) - { - if (!iv) -@@ -481,16 +372,99 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg) - - /* Wait for end of processing */ - ret = stm32_cryp_wait_enable(cryp); -- if (ret) -+ if (ret) { - dev_err(cryp->dev, "Timeout (gcm init)\n"); -+ return ret; -+ } - -- return ret; -+ /* Prepare next phase */ -+ if (cryp->areq->assoclen) { -+ cfg |= CR_PH_HEADER; -+ stm32_cryp_write(cryp, CRYP_CR, cfg); -+ } else if (stm32_cryp_get_input_text_len(cryp)) { -+ cfg |= CR_PH_PAYLOAD; -+ stm32_cryp_write(cryp, CRYP_CR, cfg); -+ } -+ -+ return 0; -+} -+ -+static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp) -+{ -+ u32 cfg; -+ int err; -+ -+ /* Check if whole header written */ -+ if (!cryp->header_in) { -+ /* Wait for completion */ -+ err = stm32_cryp_wait_busy(cryp); -+ if (err) { -+ dev_err(cryp->dev, "Timeout (gcm/ccm header)\n"); -+ stm32_cryp_write(cryp, CRYP_IMSCR, 0); -+ stm32_cryp_finish_req(cryp, err); -+ return; -+ } -+ -+ if (stm32_cryp_get_input_text_len(cryp)) { -+ /* Phase 3 : payload */ -+ cfg = stm32_cryp_read(cryp, CRYP_CR); -+ cfg &= ~CR_CRYPEN; -+ stm32_cryp_write(cryp, CRYP_CR, cfg); -+ -+ cfg &= ~CR_PH_MASK; -+ cfg |= CR_PH_PAYLOAD | CR_CRYPEN; -+ stm32_cryp_write(cryp, CRYP_CR, cfg); -+ } else { -+ /* -+ * Phase 4 : tag. -+ * Nothing to read, nothing to write, caller have to -+ * end request -+ */ -+ } -+ } -+} -+ -+static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) -+{ -+ unsigned int i; -+ size_t written; -+ size_t len; -+ u32 alen = cryp->areq->assoclen; -+ u32 block[AES_BLOCK_32] = {0}; -+ u8 *b8 = (u8 *)block; -+ -+ if (alen <= 65280) { -+ /* Write first u32 of B1 */ -+ b8[0] = (alen >> 8) & 0xFF; -+ b8[1] = alen & 0xFF; -+ len = 2; -+ } else { -+ /* Build the two first u32 of B1 */ -+ b8[0] = 0xFF; -+ b8[1] = 0xFE; -+ b8[2] = (alen & 0xFF000000) >> 24; -+ b8[3] = (alen & 0x00FF0000) >> 16; -+ b8[4] = (alen & 0x0000FF00) >> 8; -+ b8[5] = alen & 0x000000FF; -+ len = 6; -+ } -+ -+ written = min_t(size_t, AES_BLOCK_SIZE - len, alen); -+ -+ scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0); -+ for (i = 0; i < AES_BLOCK_32; i++) -+ stm32_cryp_write(cryp, CRYP_DIN, block[i]); -+ -+ cryp->header_in -= written; -+ -+ stm32_crypt_gcmccm_end_header(cryp); - } - - static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) - { - int ret; -- u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE]; -+ u32 iv_32[AES_BLOCK_32], b0_32[AES_BLOCK_32]; -+ u8 *iv = (u8 *)iv_32, *b0 = (u8 *)b0_32; - __be32 *bd; - u32 *d; - unsigned int i, textlen; -@@ -531,10 +505,24 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) - - /* Wait for end of processing */ - ret = stm32_cryp_wait_enable(cryp); -- if (ret) -+ if (ret) { - dev_err(cryp->dev, "Timeout (ccm init)\n"); -+ return ret; -+ } - -- return ret; -+ /* Prepare next phase */ -+ if (cryp->areq->assoclen) { -+ cfg |= CR_PH_HEADER | CR_CRYPEN; -+ stm32_cryp_write(cryp, CRYP_CR, cfg); -+ -+ /* Write first (special) block (may move to next phase [payload]) */ -+ stm32_cryp_write_ccm_first_header(cryp); -+ } else if (stm32_cryp_get_input_text_len(cryp)) { -+ cfg |= CR_PH_PAYLOAD; -+ stm32_cryp_write(cryp, CRYP_CR, cfg); -+ } -+ -+ return 0; - } - - static int stm32_cryp_hw_init(struct stm32_cryp *cryp) -@@ -542,7 +530,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) - int ret; - u32 cfg, hw_mode; - -- pm_runtime_resume_and_get(cryp->dev); -+ pm_runtime_get_sync(cryp->dev); - - /* Disable interrupt */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); -@@ -605,16 +593,6 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) - if (ret) - return ret; - -- /* Phase 2 : header (authenticated data) */ -- if (cryp->areq->assoclen) { -- cfg |= CR_PH_HEADER; -- } else if (stm32_cryp_get_input_text_len(cryp)) { -- cfg |= CR_PH_PAYLOAD; -- stm32_cryp_write(cryp, CRYP_CR, cfg); -- } else { -- cfg |= CR_PH_INIT; -- } -- - break; - - case CR_DES_CBC: -@@ -633,8 +611,6 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) - - stm32_cryp_write(cryp, CRYP_CR, cfg); - -- cryp->flags &= ~FLG_CCM_PADDED_WA; -- - return 0; - } - -@@ -644,28 +620,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) - /* Phase 4 : output tag */ - err = stm32_cryp_read_auth_tag(cryp); - -- if (!err && (!(is_gcm(cryp) || is_ccm(cryp)))) -+ if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp)))) - stm32_cryp_get_iv(cryp); - -- if (cryp->sgs_copied) { -- void *buf_in, *buf_out; -- int pages, len; -- -- buf_in = sg_virt(&cryp->in_sgl); -- buf_out = sg_virt(&cryp->out_sgl); -- -- sg_copy_buf(buf_out, cryp->out_sg_save, 0, -- cryp->total_out_save, 1); -- -- len = ALIGN(cryp->total_in_save, cryp->hw_blocksize); -- pages = len ? get_order(len) : 1; -- free_pages((unsigned long)buf_in, pages); -- -- len = ALIGN(cryp->total_out_save, cryp->hw_blocksize); -- pages = len ? get_order(len) : 1; -- free_pages((unsigned long)buf_out, pages); -- } -- - pm_runtime_mark_last_busy(cryp->dev); - pm_runtime_put_autosuspend(cryp->dev); - -@@ -674,8 +631,6 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) - else - crypto_finalize_skcipher_request(cryp->engine, cryp->req, - err); -- -- memset(cryp->ctx->key, 0, cryp->ctx->keylen); - } - - static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) -@@ -801,7 +756,20 @@ static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, - static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) - { -- return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL; -+ switch (authsize) { -+ case 4: -+ case 8: -+ case 12: -+ case 13: -+ case 14: -+ case 15: -+ case 16: -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ return 0; - } - - static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm, -@@ -825,31 +793,61 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm, - - static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % AES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT); - } - - static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % AES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_AES | FLG_ECB); - } - - static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % AES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT); - } - - static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % AES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_AES | FLG_CBC); - } - - static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req) - { -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT); - } - - static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req) - { -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_AES | FLG_CTR); - } - -@@ -863,53 +861,122 @@ static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req) - return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM); - } - -+static inline int crypto_ccm_check_iv(const u8 *iv) -+{ -+ /* 2 <= L <= 8, so 1 <= L' <= 7. */ -+ if (iv[0] < 1 || iv[0] > 7) -+ return -EINVAL; -+ -+ return 0; -+} -+ - static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req) - { -+ int err; -+ -+ err = crypto_ccm_check_iv(req->iv); -+ if (err) -+ return err; -+ - return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT); - } - - static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req) - { -+ int err; -+ -+ err = crypto_ccm_check_iv(req->iv); -+ if (err) -+ return err; -+ - return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM); - } - - static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % DES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT); - } - - static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % DES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_DES | FLG_ECB); - } - - static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % DES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT); - } - - static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % DES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_DES | FLG_CBC); - } - - static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % DES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT); - } - - static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % DES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB); - } - - static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % DES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT); - } - - static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req) - { -+ if (req->cryptlen % DES_BLOCK_SIZE) -+ return -EINVAL; -+ -+ if (req->cryptlen == 0) -+ return 0; -+ - return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC); - } - -@@ -919,6 +986,7 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, - struct stm32_cryp_ctx *ctx; - struct stm32_cryp *cryp; - struct stm32_cryp_reqctx *rctx; -+ struct scatterlist *in_sg; - int ret; - - if (!req && !areq) -@@ -944,76 +1012,55 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, - if (req) { - cryp->req = req; - cryp->areq = NULL; -- cryp->total_in = req->cryptlen; -- cryp->total_out = cryp->total_in; -+ cryp->header_in = 0; -+ cryp->payload_in = req->cryptlen; -+ cryp->payload_out = req->cryptlen; -+ cryp->authsize = 0; - } else { - /* - * Length of input and output data: - * Encryption case: -- * INPUT = AssocData || PlainText -+ * INPUT = AssocData || PlainText - * <- assoclen -> <- cryptlen -> -- * <------- total_in -----------> - * -- * OUTPUT = AssocData || CipherText || AuthTag -- * <- assoclen -> <- cryptlen -> <- authsize -> -- * <---------------- total_out -----------------> -+ * OUTPUT = AssocData || CipherText || AuthTag -+ * <- assoclen -> <-- cryptlen --> <- authsize -> - * - * Decryption case: -- * INPUT = AssocData || CipherText || AuthTag -- * <- assoclen -> <--------- cryptlen ---------> -- * <- authsize -> -- * <---------------- total_in ------------------> -+ * INPUT = AssocData || CipherTex || AuthTag -+ * <- assoclen ---> <---------- cryptlen ----------> - * -- * OUTPUT = AssocData || PlainText -- * <- assoclen -> <- crypten - authsize -> -- * <---------- total_out -----------------> -+ * OUTPUT = AssocData || PlainText -+ * <- assoclen -> <- cryptlen - authsize -> - */ - cryp->areq = areq; - cryp->req = NULL; - cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq)); -- cryp->total_in = areq->assoclen + areq->cryptlen; -- if (is_encrypt(cryp)) -- /* Append auth tag to output */ -- cryp->total_out = cryp->total_in + cryp->authsize; -- else -- /* No auth tag in output */ -- cryp->total_out = cryp->total_in - cryp->authsize; -+ if (is_encrypt(cryp)) { -+ cryp->payload_in = areq->cryptlen; -+ cryp->header_in = areq->assoclen; -+ cryp->payload_out = areq->cryptlen; -+ } else { -+ cryp->payload_in = areq->cryptlen - cryp->authsize; -+ cryp->header_in = areq->assoclen; -+ cryp->payload_out = cryp->payload_in; -+ } - } - -- cryp->total_in_save = cryp->total_in; -- cryp->total_out_save = cryp->total_out; -+ in_sg = req ? req->src : areq->src; -+ scatterwalk_start(&cryp->in_walk, in_sg); - -- cryp->in_sg = req ? req->src : areq->src; - cryp->out_sg = req ? req->dst : areq->dst; -- cryp->out_sg_save = cryp->out_sg; -- -- cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in); -- if (cryp->in_sg_len < 0) { -- dev_err(cryp->dev, "Cannot get in_sg_len\n"); -- ret = cryp->in_sg_len; -- return ret; -- } -- -- cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out); -- if (cryp->out_sg_len < 0) { -- dev_err(cryp->dev, "Cannot get out_sg_len\n"); -- ret = cryp->out_sg_len; -- return ret; -- } -- -- ret = stm32_cryp_copy_sgs(cryp); -- if (ret) -- return ret; -- -- scatterwalk_start(&cryp->in_walk, cryp->in_sg); - scatterwalk_start(&cryp->out_walk, cryp->out_sg); - - if (is_gcm(cryp) || is_ccm(cryp)) { - /* In output, jump after assoc data */ -- scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen); -- cryp->total_out -= cryp->areq->assoclen; -+ scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2); - } - -+ if (is_ctr(cryp)) -+ memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr)); -+ - ret = stm32_cryp_hw_init(cryp); - return ret; - } -@@ -1061,8 +1108,7 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) - if (!cryp) - return -ENODEV; - -- if (unlikely(!cryp->areq->assoclen && -- !stm32_cryp_get_input_text_len(cryp))) { -+ if (unlikely(!cryp->payload_in && !cryp->header_in)) { - /* No input data to process: get tag and finish */ - stm32_cryp_finish_req(cryp, 0); - return 0; -@@ -1071,43 +1117,10 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) - return stm32_cryp_cpu_start(cryp); - } - --static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst, -- unsigned int n) --{ -- scatterwalk_advance(&cryp->out_walk, n); -- -- if (unlikely(cryp->out_sg->length == _walked_out)) { -- cryp->out_sg = sg_next(cryp->out_sg); -- if (cryp->out_sg) { -- scatterwalk_start(&cryp->out_walk, cryp->out_sg); -- return (sg_virt(cryp->out_sg) + _walked_out); -- } -- } -- -- return (u32 *)((u8 *)dst + n); --} -- --static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src, -- unsigned int n) --{ -- scatterwalk_advance(&cryp->in_walk, n); -- -- if (unlikely(cryp->in_sg->length == _walked_in)) { -- cryp->in_sg = sg_next(cryp->in_sg); -- if (cryp->in_sg) { -- scatterwalk_start(&cryp->in_walk, cryp->in_sg); -- return (sg_virt(cryp->in_sg) + _walked_in); -- } -- } -- -- return (u32 *)((u8 *)src + n); --} -- - static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) - { -- u32 cfg, size_bit, *dst, d32; -- u8 *d8; -- unsigned int i, j; -+ u32 cfg, size_bit; -+ unsigned int i; - int ret = 0; - - /* Update Config */ -@@ -1130,7 +1143,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) - stm32_cryp_write(cryp, CRYP_DIN, size_bit); - - size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen : -- cryp->areq->cryptlen - AES_BLOCK_SIZE; -+ cryp->areq->cryptlen - cryp->authsize; - size_bit *= 8; - if (cryp->caps->swap_final) - size_bit = (__force u32)cpu_to_be32(size_bit); -@@ -1139,11 +1152,9 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) - stm32_cryp_write(cryp, CRYP_DIN, size_bit); - } else { - /* CCM: write CTR0 */ -- u8 iv[AES_BLOCK_SIZE]; -- u32 *iv32 = (u32 *)iv; -- __be32 *biv; -- -- biv = (void *)iv; -+ u32 iv32[AES_BLOCK_32]; -+ u8 *iv = (u8 *)iv32; -+ __be32 *biv = (__be32 *)iv32; - - memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE); - memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1); -@@ -1165,39 +1176,18 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) - } - - if (is_encrypt(cryp)) { -+ u32 out_tag[AES_BLOCK_32]; -+ - /* Get and write tag */ -- dst = sg_virt(cryp->out_sg) + _walked_out; -+ for (i = 0; i < AES_BLOCK_32; i++) -+ out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT); - -- for (i = 0; i < AES_BLOCK_32; i++) { -- if (cryp->total_out >= sizeof(u32)) { -- /* Read a full u32 */ -- *dst = stm32_cryp_read(cryp, CRYP_DOUT); -- -- dst = stm32_cryp_next_out(cryp, dst, -- sizeof(u32)); -- cryp->total_out -= sizeof(u32); -- } else if (!cryp->total_out) { -- /* Empty fifo out (data from input padding) */ -- stm32_cryp_read(cryp, CRYP_DOUT); -- } else { -- /* Read less than an u32 */ -- d32 = stm32_cryp_read(cryp, CRYP_DOUT); -- d8 = (u8 *)&d32; -- -- for (j = 0; j < cryp->total_out; j++) { -- *((u8 *)dst) = *(d8++); -- dst = stm32_cryp_next_out(cryp, dst, 1); -- } -- cryp->total_out = 0; -- } -- } -+ scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1); - } else { - /* Get and check tag */ - u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32]; - -- scatterwalk_map_and_copy(in_tag, cryp->in_sg, -- cryp->total_in_save - cryp->authsize, -- cryp->authsize, 0); -+ scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0); - - for (i = 0; i < AES_BLOCK_32; i++) - out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT); -@@ -1217,115 +1207,59 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) - { - u32 cr; - -- if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) { -- cryp->last_ctr[3] = 0; -- cryp->last_ctr[2]++; -- if (!cryp->last_ctr[2]) { -- cryp->last_ctr[1]++; -- if (!cryp->last_ctr[1]) -- cryp->last_ctr[0]++; -- } -+ if (unlikely(cryp->last_ctr[3] == cpu_to_be32(0xFFFFFFFF))) { -+ /* -+ * In this case, we need to increment manually the ctr counter, -+ * as HW doesn't handle the U32 carry. -+ */ -+ crypto_inc((u8 *)cryp->last_ctr, sizeof(cryp->last_ctr)); - - cr = stm32_cryp_read(cryp, CRYP_CR); - stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN); - -- stm32_cryp_hw_write_iv(cryp, (__be32 *)cryp->last_ctr); -+ stm32_cryp_hw_write_iv(cryp, cryp->last_ctr); - - stm32_cryp_write(cryp, CRYP_CR, cr); - } - -- cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR); -- cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR); -- cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR); -- cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR); -+ /* The IV registers are BE */ -+ cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR)); -+ cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR)); -+ cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR)); -+ cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR)); - } - --static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp) -+static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) - { -- unsigned int i, j; -- u32 d32, *dst; -- u8 *d8; -- size_t tag_size; -- -- /* Do no read tag now (if any) */ -- if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp))) -- tag_size = cryp->authsize; -- else -- tag_size = 0; -- -- dst = sg_virt(cryp->out_sg) + _walked_out; -+ unsigned int i; -+ u32 block[AES_BLOCK_32]; - -- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { -- if (likely(cryp->total_out - tag_size >= sizeof(u32))) { -- /* Read a full u32 */ -- *dst = stm32_cryp_read(cryp, CRYP_DOUT); -+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) -+ block[i] = stm32_cryp_read(cryp, CRYP_DOUT); - -- dst = stm32_cryp_next_out(cryp, dst, sizeof(u32)); -- cryp->total_out -= sizeof(u32); -- } else if (cryp->total_out == tag_size) { -- /* Empty fifo out (data from input padding) */ -- d32 = stm32_cryp_read(cryp, CRYP_DOUT); -- } else { -- /* Read less than an u32 */ -- d32 = stm32_cryp_read(cryp, CRYP_DOUT); -- d8 = (u8 *)&d32; -- -- for (j = 0; j < cryp->total_out - tag_size; j++) { -- *((u8 *)dst) = *(d8++); -- dst = stm32_cryp_next_out(cryp, dst, 1); -- } -- cryp->total_out = tag_size; -- } -- } -- -- return !(cryp->total_out - tag_size) || !cryp->total_in; -+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, -+ cryp->payload_out), 1); -+ cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, -+ cryp->payload_out); - } - - static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) - { -- unsigned int i, j; -- u32 *src; -- u8 d8[4]; -- size_t tag_size; -- -- /* Do no write tag (if any) */ -- if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp))) -- tag_size = cryp->authsize; -- else -- tag_size = 0; -- -- src = sg_virt(cryp->in_sg) + _walked_in; -+ unsigned int i; -+ u32 block[AES_BLOCK_32] = {0}; - -- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { -- if (likely(cryp->total_in - tag_size >= sizeof(u32))) { -- /* Write a full u32 */ -- stm32_cryp_write(cryp, CRYP_DIN, *src); -+ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, -+ cryp->payload_in), 0); -+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) -+ stm32_cryp_write(cryp, CRYP_DIN, block[i]); - -- src = stm32_cryp_next_in(cryp, src, sizeof(u32)); -- cryp->total_in -= sizeof(u32); -- } else if (cryp->total_in == tag_size) { -- /* Write padding data */ -- stm32_cryp_write(cryp, CRYP_DIN, 0); -- } else { -- /* Write less than an u32 */ -- memset(d8, 0, sizeof(u32)); -- for (j = 0; j < cryp->total_in - tag_size; j++) { -- d8[j] = *((u8 *)src); -- src = stm32_cryp_next_in(cryp, src, 1); -- } -- -- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); -- cryp->total_in = tag_size; -- } -- } -+ cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in); - } - - static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) - { - int err; -- u32 cfg, tmp[AES_BLOCK_32]; -- size_t total_in_ori = cryp->total_in; -- struct scatterlist *out_sg_ori = cryp->out_sg; -+ u32 cfg, block[AES_BLOCK_32] = {0}; - unsigned int i; - - /* 'Special workaround' procedure described in the datasheet */ -@@ -1350,18 +1284,25 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) - - /* b) pad and write the last block */ - stm32_cryp_irq_write_block(cryp); -- cryp->total_in = total_in_ori; -+ /* wait end of process */ - err = stm32_cryp_wait_output(cryp); - if (err) { -- dev_err(cryp->dev, "Timeout (write gcm header)\n"); -+ dev_err(cryp->dev, "Timeout (write gcm last data)\n"); - return stm32_cryp_finish_req(cryp, err); - } - - /* c) get and store encrypted data */ -- stm32_cryp_irq_read_data(cryp); -- scatterwalk_map_and_copy(tmp, out_sg_ori, -- cryp->total_in_save - total_in_ori, -- total_in_ori, 0); -+ /* -+ * Same code as stm32_cryp_irq_read_data(), but we want to store -+ * block value -+ */ -+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) -+ block[i] = stm32_cryp_read(cryp, CRYP_DOUT); -+ -+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, -+ cryp->payload_out), 1); -+ cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, -+ cryp->payload_out); - - /* d) change mode back to AES GCM */ - cfg &= ~CR_ALGO_MASK; -@@ -1374,19 +1315,13 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) - stm32_cryp_write(cryp, CRYP_CR, cfg); - - /* f) write padded data */ -- for (i = 0; i < AES_BLOCK_32; i++) { -- if (cryp->total_in) -- stm32_cryp_write(cryp, CRYP_DIN, tmp[i]); -- else -- stm32_cryp_write(cryp, CRYP_DIN, 0); -- -- cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in); -- } -+ for (i = 0; i < AES_BLOCK_32; i++) -+ stm32_cryp_write(cryp, CRYP_DIN, block[i]); - - /* g) Empty fifo out */ - err = stm32_cryp_wait_output(cryp); - if (err) { -- dev_err(cryp->dev, "Timeout (write gcm header)\n"); -+ dev_err(cryp->dev, "Timeout (write gcm padded data)\n"); - return stm32_cryp_finish_req(cryp, err); - } - -@@ -1399,16 +1334,14 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) - - static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp) - { -- u32 cfg, payload_bytes; -+ u32 cfg; - - /* disable ip, set NPBLB and reneable ip */ - cfg = stm32_cryp_read(cryp, CRYP_CR); - cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - -- payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize : -- cryp->total_in; -- cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT; -+ cfg |= (cryp->hw_blocksize - cryp->payload_in) << CR_NBPBL_SHIFT; - cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } -@@ -1417,13 +1350,11 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) - { - int err = 0; - u32 cfg, iv1tmp; -- u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32]; -- size_t last_total_out, total_in_ori = cryp->total_in; -- struct scatterlist *out_sg_ori = cryp->out_sg; -+ u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32]; -+ u32 block[AES_BLOCK_32] = {0}; - unsigned int i; - - /* 'Special workaround' procedure described in the datasheet */ -- cryp->flags |= FLG_CCM_PADDED_WA; - - /* a) disable ip */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); -@@ -1453,7 +1384,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) - - /* b) pad and write the last block */ - stm32_cryp_irq_write_block(cryp); -- cryp->total_in = total_in_ori; -+ /* wait end of process */ - err = stm32_cryp_wait_output(cryp); - if (err) { - dev_err(cryp->dev, "Timeout (wite ccm padded data)\n"); -@@ -1461,13 +1392,16 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) - } - - /* c) get and store decrypted data */ -- last_total_out = cryp->total_out; -- stm32_cryp_irq_read_data(cryp); -+ /* -+ * Same code as stm32_cryp_irq_read_data(), but we want to store -+ * block value -+ */ -+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) -+ block[i] = stm32_cryp_read(cryp, CRYP_DOUT); - -- memset(tmp, 0, sizeof(tmp)); -- scatterwalk_map_and_copy(tmp, out_sg_ori, -- cryp->total_out_save - last_total_out, -- last_total_out, 0); -+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, -+ cryp->payload_out), 1); -+ cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); - - /* d) Load again CRYP_CSGCMCCMxR */ - for (i = 0; i < ARRAY_SIZE(cstmp2); i++) -@@ -1484,10 +1418,10 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) - stm32_cryp_write(cryp, CRYP_CR, cfg); - - /* g) XOR and write padded data */ -- for (i = 0; i < ARRAY_SIZE(tmp); i++) { -- tmp[i] ^= cstmp1[i]; -- tmp[i] ^= cstmp2[i]; -- stm32_cryp_write(cryp, CRYP_DIN, tmp[i]); -+ for (i = 0; i < ARRAY_SIZE(block); i++) { -+ block[i] ^= cstmp1[i]; -+ block[i] ^= cstmp2[i]; -+ stm32_cryp_write(cryp, CRYP_DIN, block[i]); - } - - /* h) wait for completion */ -@@ -1501,30 +1435,34 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) - - static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) - { -- if (unlikely(!cryp->total_in)) { -+ if (unlikely(!cryp->payload_in)) { - dev_warn(cryp->dev, "No more data to process\n"); - return; - } - -- if (unlikely(cryp->total_in < AES_BLOCK_SIZE && -+ if (unlikely(cryp->payload_in < AES_BLOCK_SIZE && - (stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) && - is_encrypt(cryp))) { - /* Padding for AES GCM encryption */ -- if (cryp->caps->padding_wa) -+ if (cryp->caps->padding_wa) { - /* Special case 1 */ -- return stm32_cryp_irq_write_gcm_padded_data(cryp); -+ stm32_cryp_irq_write_gcm_padded_data(cryp); -+ return; -+ } - - /* Setting padding bytes (NBBLB) */ - stm32_cryp_irq_set_npblb(cryp); - } - -- if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) && -+ if (unlikely((cryp->payload_in < AES_BLOCK_SIZE) && - (stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) && - is_decrypt(cryp))) { - /* Padding for AES CCM decryption */ -- if (cryp->caps->padding_wa) -+ if (cryp->caps->padding_wa) { - /* Special case 2 */ -- return stm32_cryp_irq_write_ccm_padded_data(cryp); -+ stm32_cryp_irq_write_ccm_padded_data(cryp); -+ return; -+ } - - /* Setting padding bytes (NBBLB) */ - stm32_cryp_irq_set_npblb(cryp); -@@ -1536,192 +1474,60 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) - stm32_cryp_irq_write_block(cryp); - } - --static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp) -+static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp) - { -- int err; -- unsigned int i, j; -- u32 cfg, *src; -- -- src = sg_virt(cryp->in_sg) + _walked_in; -- -- for (i = 0; i < AES_BLOCK_32; i++) { -- stm32_cryp_write(cryp, CRYP_DIN, *src); -- -- src = stm32_cryp_next_in(cryp, src, sizeof(u32)); -- cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in); -- -- /* Check if whole header written */ -- if ((cryp->total_in_save - cryp->total_in) == -- cryp->areq->assoclen) { -- /* Write padding if needed */ -- for (j = i + 1; j < AES_BLOCK_32; j++) -- stm32_cryp_write(cryp, CRYP_DIN, 0); -- -- /* Wait for completion */ -- err = stm32_cryp_wait_busy(cryp); -- if (err) { -- dev_err(cryp->dev, "Timeout (gcm header)\n"); -- return stm32_cryp_finish_req(cryp, err); -- } -- -- if (stm32_cryp_get_input_text_len(cryp)) { -- /* Phase 3 : payload */ -- cfg = stm32_cryp_read(cryp, CRYP_CR); -- cfg &= ~CR_CRYPEN; -- stm32_cryp_write(cryp, CRYP_CR, cfg); -- -- cfg &= ~CR_PH_MASK; -- cfg |= CR_PH_PAYLOAD; -- cfg |= CR_CRYPEN; -- stm32_cryp_write(cryp, CRYP_CR, cfg); -- } else { -- /* Phase 4 : tag */ -- stm32_cryp_write(cryp, CRYP_IMSCR, 0); -- stm32_cryp_finish_req(cryp, 0); -- } -- -- break; -- } -- -- if (!cryp->total_in) -- break; -- } --} -+ unsigned int i; -+ u32 block[AES_BLOCK_32] = {0}; -+ size_t written; - --static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp) --{ -- int err; -- unsigned int i = 0, j, k; -- u32 alen, cfg, *src; -- u8 d8[4]; -- -- src = sg_virt(cryp->in_sg) + _walked_in; -- alen = cryp->areq->assoclen; -- -- if (!_walked_in) { -- if (cryp->areq->assoclen <= 65280) { -- /* Write first u32 of B1 */ -- d8[0] = (alen >> 8) & 0xFF; -- d8[1] = alen & 0xFF; -- d8[2] = *((u8 *)src); -- src = stm32_cryp_next_in(cryp, src, 1); -- d8[3] = *((u8 *)src); -- src = stm32_cryp_next_in(cryp, src, 1); -- -- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); -- i++; -- -- cryp->total_in -= min_t(size_t, 2, cryp->total_in); -- } else { -- /* Build the two first u32 of B1 */ -- d8[0] = 0xFF; -- d8[1] = 0xFE; -- d8[2] = alen & 0xFF000000; -- d8[3] = alen & 0x00FF0000; -- -- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); -- i++; -- -- d8[0] = alen & 0x0000FF00; -- d8[1] = alen & 0x000000FF; -- d8[2] = *((u8 *)src); -- src = stm32_cryp_next_in(cryp, src, 1); -- d8[3] = *((u8 *)src); -- src = stm32_cryp_next_in(cryp, src, 1); -- -- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); -- i++; -- -- cryp->total_in -= min_t(size_t, 2, cryp->total_in); -- } -- } -+ written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in); - -- /* Write next u32 */ -- for (; i < AES_BLOCK_32; i++) { -- /* Build an u32 */ -- memset(d8, 0, sizeof(u32)); -- for (k = 0; k < sizeof(u32); k++) { -- d8[k] = *((u8 *)src); -- src = stm32_cryp_next_in(cryp, src, 1); -- -- cryp->total_in -= min_t(size_t, 1, cryp->total_in); -- if ((cryp->total_in_save - cryp->total_in) == alen) -- break; -- } -+ scatterwalk_copychunks(block, &cryp->in_walk, written, 0); -+ for (i = 0; i < AES_BLOCK_32; i++) -+ stm32_cryp_write(cryp, CRYP_DIN, block[i]); - -- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); -- -- if ((cryp->total_in_save - cryp->total_in) == alen) { -- /* Write padding if needed */ -- for (j = i + 1; j < AES_BLOCK_32; j++) -- stm32_cryp_write(cryp, CRYP_DIN, 0); -- -- /* Wait for completion */ -- err = stm32_cryp_wait_busy(cryp); -- if (err) { -- dev_err(cryp->dev, "Timeout (ccm header)\n"); -- return stm32_cryp_finish_req(cryp, err); -- } -- -- if (stm32_cryp_get_input_text_len(cryp)) { -- /* Phase 3 : payload */ -- cfg = stm32_cryp_read(cryp, CRYP_CR); -- cfg &= ~CR_CRYPEN; -- stm32_cryp_write(cryp, CRYP_CR, cfg); -- -- cfg &= ~CR_PH_MASK; -- cfg |= CR_PH_PAYLOAD; -- cfg |= CR_CRYPEN; -- stm32_cryp_write(cryp, CRYP_CR, cfg); -- } else { -- /* Phase 4 : tag */ -- stm32_cryp_write(cryp, CRYP_IMSCR, 0); -- stm32_cryp_finish_req(cryp, 0); -- } -+ cryp->header_in -= written; - -- break; -- } -- } -+ stm32_crypt_gcmccm_end_header(cryp); - } - - static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) - { - struct stm32_cryp *cryp = arg; - u32 ph; -+ u32 it_mask = stm32_cryp_read(cryp, CRYP_IMSCR); - - if (cryp->irq_status & MISR_OUT) - /* Output FIFO IRQ: read data */ -- if (unlikely(stm32_cryp_irq_read_data(cryp))) { -- /* All bytes processed, finish */ -- stm32_cryp_write(cryp, CRYP_IMSCR, 0); -- stm32_cryp_finish_req(cryp, 0); -- return IRQ_HANDLED; -- } -+ stm32_cryp_irq_read_data(cryp); - - if (cryp->irq_status & MISR_IN) { -- if (is_gcm(cryp)) { -+ if (is_gcm(cryp) || is_ccm(cryp)) { - ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK; - if (unlikely(ph == CR_PH_HEADER)) - /* Write Header */ -- stm32_cryp_irq_write_gcm_header(cryp); -- else -- /* Input FIFO IRQ: write data */ -- stm32_cryp_irq_write_data(cryp); -- cryp->gcm_ctr++; -- } else if (is_ccm(cryp)) { -- ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK; -- if (unlikely(ph == CR_PH_HEADER)) -- /* Write Header */ -- stm32_cryp_irq_write_ccm_header(cryp); -+ stm32_cryp_irq_write_gcmccm_header(cryp); - else - /* Input FIFO IRQ: write data */ - stm32_cryp_irq_write_data(cryp); -+ if (is_gcm(cryp)) -+ cryp->gcm_ctr++; - } else { - /* Input FIFO IRQ: write data */ - stm32_cryp_irq_write_data(cryp); - } - } - -+ /* Mask useless interrupts */ -+ if (!cryp->payload_in && !cryp->header_in) -+ it_mask &= ~IMSCR_IN; -+ if (!cryp->payload_out) -+ it_mask &= ~IMSCR_OUT; -+ stm32_cryp_write(cryp, CRYP_IMSCR, it_mask); -+ -+ if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) -+ stm32_cryp_finish_req(cryp, 0); -+ - return IRQ_HANDLED; - } - -@@ -1742,7 +1548,7 @@ static struct skcipher_alg crypto_algs[] = { - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), -- .base.cra_alignmask = 0xf, -+ .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - - .init = stm32_cryp_init_tfm, -@@ -1759,7 +1565,7 @@ static struct skcipher_alg crypto_algs[] = { - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), -- .base.cra_alignmask = 0xf, -+ .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - - .init = stm32_cryp_init_tfm, -@@ -1777,7 +1583,7 @@ static struct skcipher_alg crypto_algs[] = { - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), -- .base.cra_alignmask = 0xf, -+ .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - - .init = stm32_cryp_init_tfm, -@@ -1795,7 +1601,7 @@ static struct skcipher_alg crypto_algs[] = { - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), -- .base.cra_alignmask = 0xf, -+ .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - - .init = stm32_cryp_init_tfm, -@@ -1812,7 +1618,7 @@ static struct skcipher_alg crypto_algs[] = { - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), -- .base.cra_alignmask = 0xf, -+ .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - - .init = stm32_cryp_init_tfm, -@@ -1830,7 +1636,7 @@ static struct skcipher_alg crypto_algs[] = { - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), -- .base.cra_alignmask = 0xf, -+ .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - - .init = stm32_cryp_init_tfm, -@@ -1847,7 +1653,7 @@ static struct skcipher_alg crypto_algs[] = { - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), -- .base.cra_alignmask = 0xf, -+ .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, - - .init = stm32_cryp_init_tfm, -@@ -1877,7 +1683,7 @@ static struct aead_alg aead_algs[] = { - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct stm32_cryp_ctx), -- .cra_alignmask = 0xf, -+ .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - }, -@@ -1897,7 +1703,7 @@ static struct aead_alg aead_algs[] = { - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct stm32_cryp_ctx), -- .cra_alignmask = 0xf, -+ .cra_alignmask = 0, - .cra_module = THIS_MODULE, - }, - }, -@@ -2025,8 +1831,6 @@ err_engine1: - list_del(&cryp->list); - spin_unlock(&cryp_list.lock); - -- pm_runtime_disable(dev); -- pm_runtime_put_noidle(dev); - pm_runtime_disable(dev); - pm_runtime_put_noidle(dev); - -diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c -index 389de9e3302d5..4df5330afaa1d 100644 ---- a/drivers/crypto/stm32/stm32-hash.c -+++ b/drivers/crypto/stm32/stm32-hash.c -@@ -565,9 +565,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) - } - - for_each_sg(rctx->sg, tsg, rctx->nents, i) { -+ sg[0] = *tsg; - len = sg->length; - -- sg[0] = *tsg; - if (sg_is_last(sg)) { - if (hdev->dma_mode == 1) { - len = (ALIGN(sg->length, 16) - 16); -@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err) - static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, - struct stm32_hash_request_ctx *rctx) - { -- pm_runtime_resume_and_get(hdev->dev); -+ pm_runtime_get_sync(hdev->dev); - - if (!(HASH_FLAGS_INIT & hdev->flags)) { - stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); -@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out) - u32 *preg; - unsigned int i; - -- pm_runtime_resume_and_get(hdev->dev); -+ pm_runtime_get_sync(hdev->dev); - - while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY)) - cpu_relax(); -@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in) - - preg = rctx->hw_context; - -- pm_runtime_resume_and_get(hdev->dev); -+ pm_runtime_get_sync(hdev->dev); - - stm32_hash_write(hdev, HASH_IMR, *preg++); - stm32_hash_write(hdev, HASH_STR, *preg++); -@@ -1566,9 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev) - if (!hdev) - return -ENODEV; - -- ret = pm_runtime_resume_and_get(hdev->dev); -- if (ret < 0) -- return ret; -+ ret = pm_runtime_get_sync(hdev->dev); - - stm32_hash_unregister_algs(hdev); - -@@ -1584,7 +1582,8 @@ static int stm32_hash_remove(struct platform_device *pdev) - pm_runtime_disable(hdev->dev); - pm_runtime_put_noidle(hdev->dev); - -- clk_disable_unprepare(hdev->clk); -+ if (ret >= 0) -+ clk_disable_unprepare(hdev->clk); - - return 0; - } -diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig -index c85fab7ef0bdd..b2c28b87f14b3 100644 ---- a/drivers/crypto/vmx/Kconfig -+++ b/drivers/crypto/vmx/Kconfig -@@ -2,7 +2,11 @@ - config CRYPTO_DEV_VMX_ENCRYPT - tristate "Encryption acceleration support on P8 CPU" - depends on CRYPTO_DEV_VMX -+ select CRYPTO_AES -+ select CRYPTO_CBC -+ select CRYPTO_CTR - select CRYPTO_GHASH -+ select CRYPTO_XTS - default m - help - Support for VMX cryptographic acceleration instructions on Power8 CPU. -diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c -index 267d8042bec22..0987a6423ee06 100644 ---- a/drivers/cxl/core/bus.c -+++ b/drivers/cxl/core/bus.c -@@ -182,6 +182,7 @@ static void cxl_decoder_release(struct device *dev) - - ida_free(&port->decoder_ida, cxld->id); - kfree(cxld); -+ put_device(&port->dev); - } - - static const struct device_type cxl_decoder_switch_type = { -@@ -481,6 +482,9 @@ cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base, - if (rc < 0) - goto err; - -+ /* need parent to stick around to release the id */ -+ get_device(&port->dev); -+ - *cxld = (struct cxl_decoder) { - .id = rc, - .range = { -diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c -index 41de4a136ecd7..2e7027a3fef3b 100644 ---- a/drivers/cxl/core/regs.c -+++ b/drivers/cxl/core/regs.c -@@ -35,7 +35,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, - struct cxl_component_reg_map *map) - { - int cap, cap_count; -- u64 cap_array; -+ u32 cap_array; - - *map = (struct cxl_component_reg_map) { 0 }; - -@@ -45,11 +45,11 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, - */ - base += CXL_CM_OFFSET; - -- cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET); -+ cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET); - - if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { - dev_err(dev, -- "Couldn't locate the CXL.cache and CXL.mem capability array header./n"); -+ "Couldn't locate the CXL.cache and CXL.mem capability array header.\n"); - return; - } - -diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c -index 8e45aa07d662f..5444b5a7fd3c4 100644 ---- a/drivers/cxl/pci.c -+++ b/drivers/cxl/pci.c -@@ -972,7 +972,7 @@ static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, - if (pci_resource_len(pdev, bar) < offset) { - dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, - &pdev->resource[bar], (unsigned long long)offset); -- return IOMEM_ERR_PTR(-ENXIO); -+ return NULL; - } - - addr = pci_iomap(pdev, bar, 0); -diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c -index 9652c3ee41e7f..2bb2f9a0499f7 100644 ---- a/drivers/cxl/pmem.c -+++ b/drivers/cxl/pmem.c -@@ -149,14 +149,24 @@ static void cxl_nvb_update_state(struct work_struct *work) - put_device(&cxl_nvb->dev); - } - -+static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb) -+{ -+ /* -+ * Take a reference that the workqueue will drop if new work -+ * gets queued. -+ */ -+ get_device(&cxl_nvb->dev); -+ if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work)) -+ put_device(&cxl_nvb->dev); -+} -+ - static void cxl_nvdimm_bridge_remove(struct device *dev) - { - struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); - - if (cxl_nvb->state == CXL_NVB_ONLINE) - cxl_nvb->state = CXL_NVB_OFFLINE; -- if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work)) -- get_device(&cxl_nvb->dev); -+ cxl_nvdimm_bridge_state_work(cxl_nvb); - } - - static int cxl_nvdimm_bridge_probe(struct device *dev) -@@ -177,8 +187,7 @@ static int cxl_nvdimm_bridge_probe(struct device *dev) - } - - cxl_nvb->state = CXL_NVB_ONLINE; -- if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work)) -- get_device(&cxl_nvb->dev); -+ cxl_nvdimm_bridge_state_work(cxl_nvb); - - return 0; - } -diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c -index 6cc4da4c713d9..7ad61c707687f 100644 ---- a/drivers/dax/bus.c -+++ b/drivers/dax/bus.c -@@ -397,23 +397,39 @@ static void unregister_dev_dax(void *dev) - dev_dbg(dev, "%s\n", __func__); - - kill_dev_dax(dev_dax); -- free_dev_dax_ranges(dev_dax); - device_del(dev); -+ free_dev_dax_ranges(dev_dax); - put_device(dev); - } - -+static void dax_region_free(struct kref *kref) -+{ -+ struct dax_region *dax_region; -+ -+ dax_region = container_of(kref, struct dax_region, kref); -+ kfree(dax_region); -+} -+ -+void dax_region_put(struct dax_region *dax_region) -+{ -+ kref_put(&dax_region->kref, dax_region_free); -+} -+EXPORT_SYMBOL_GPL(dax_region_put); -+ - /* a return value >= 0 indicates this invocation invalidated the id */ - static int __free_dev_dax_id(struct dev_dax *dev_dax) - { -- struct dax_region *dax_region = dev_dax->region; - struct device *dev = &dev_dax->dev; -+ struct dax_region *dax_region; - int rc = dev_dax->id; - - device_lock_assert(dev); - -- if (is_static(dax_region) || dev_dax->id < 0) -+ if (!dev_dax->dyn_id || dev_dax->id < 0) - return -1; -+ dax_region = dev_dax->region; - ida_free(&dax_region->ida, dev_dax->id); -+ dax_region_put(dax_region); - dev_dax->id = -1; - return rc; - } -@@ -429,6 +445,20 @@ static int free_dev_dax_id(struct dev_dax *dev_dax) - return rc; - } - -+static int alloc_dev_dax_id(struct dev_dax *dev_dax) -+{ -+ struct dax_region *dax_region = dev_dax->region; -+ int id; -+ -+ id = ida_alloc(&dax_region->ida, GFP_KERNEL); -+ if (id < 0) -+ return id; -+ kref_get(&dax_region->kref); -+ dev_dax->dyn_id = true; -+ dev_dax->id = id; -+ return id; -+} -+ - static ssize_t delete_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) - { -@@ -516,20 +546,6 @@ static const struct attribute_group *dax_region_attribute_groups[] = { - NULL, - }; - --static void dax_region_free(struct kref *kref) --{ -- struct dax_region *dax_region; -- -- dax_region = container_of(kref, struct dax_region, kref); -- kfree(dax_region); --} -- --void dax_region_put(struct dax_region *dax_region) --{ -- kref_put(&dax_region->kref, dax_region_free); --} --EXPORT_SYMBOL_GPL(dax_region_put); -- - static void dax_region_unregister(void *region) - { - struct dax_region *dax_region = region; -@@ -591,10 +607,12 @@ EXPORT_SYMBOL_GPL(alloc_dax_region); - static void dax_mapping_release(struct device *dev) - { - struct dax_mapping *mapping = to_dax_mapping(dev); -- struct dev_dax *dev_dax = to_dev_dax(dev->parent); -+ struct device *parent = dev->parent; -+ struct dev_dax *dev_dax = to_dev_dax(parent); - - ida_free(&dev_dax->ida, mapping->id); - kfree(mapping); -+ put_device(parent); - } - - static void unregister_dax_mapping(void *data) -@@ -734,6 +752,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id) - dev = &mapping->dev; - device_initialize(dev); - dev->parent = &dev_dax->dev; -+ get_device(dev->parent); - dev->type = &dax_mapping_type; - dev_set_name(dev, "mapping%d", mapping->id); - rc = device_add(dev); -@@ -1251,12 +1270,10 @@ static const struct attribute_group *dax_attribute_groups[] = { - static void dev_dax_release(struct device *dev) - { - struct dev_dax *dev_dax = to_dev_dax(dev); -- struct dax_region *dax_region = dev_dax->region; - struct dax_device *dax_dev = dev_dax->dax_dev; - - put_dax(dax_dev); - free_dev_dax_id(dev_dax); -- dax_region_put(dax_region); - kfree(dev_dax->pgmap); - kfree(dev_dax); - } -@@ -1280,6 +1297,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) - if (!dev_dax) - return ERR_PTR(-ENOMEM); - -+ dev_dax->region = dax_region; - if (is_static(dax_region)) { - if (dev_WARN_ONCE(parent, data->id < 0, - "dynamic id specified to static region\n")) { -@@ -1295,13 +1313,11 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) - goto err_id; - } - -- rc = ida_alloc(&dax_region->ida, GFP_KERNEL); -+ rc = alloc_dev_dax_id(dev_dax); - if (rc < 0) - goto err_id; -- dev_dax->id = rc; - } - -- dev_dax->region = dax_region; - dev = &dev_dax->dev; - device_initialize(dev); - dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); -@@ -1339,7 +1355,6 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) - dev_dax->target_node = dax_region->target_node; - dev_dax->align = dax_region->align; - ida_init(&dev_dax->ida); -- kref_get(&dax_region->kref); - - inode = dax_inode(dax_dev); - dev->devt = inode->i_rdev; -diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h -index 1c974b7caae6e..afcada6fd2eda 100644 ---- a/drivers/dax/dax-private.h -+++ b/drivers/dax/dax-private.h -@@ -52,7 +52,8 @@ struct dax_mapping { - * @region - parent region - * @dax_dev - core dax functionality - * @target_node: effective numa node if dev_dax memory range is onlined -- * @id: ida allocated id -+ * @dyn_id: is this a dynamic or statically created instance -+ * @id: ida allocated id when the dax_region is not static - * @ida: mapping id allocator - * @dev - device core - * @pgmap - pgmap for memmap setup / lifetime (driver owned) -@@ -64,6 +65,7 @@ struct dev_dax { - struct dax_device *dax_dev; - unsigned int align; - int target_node; -+ bool dyn_id; - int id; - struct ida ida; - struct device dev; -diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c -index cb6401c9e9a4f..acf31cc1dbcca 100644 ---- a/drivers/dax/hmem/device.c -+++ b/drivers/dax/hmem/device.c -@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r) - .start = r->start, - .end = r->end, - .flags = IORESOURCE_MEM, -+ .desc = IORES_DESC_SOFT_RESERVED, - }; - struct platform_device *pdev; - struct memregion_info info; -diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c -index a37622060fffa..97723ee15bc68 100644 ---- a/drivers/dax/kmem.c -+++ b/drivers/dax/kmem.c -@@ -88,7 +88,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) - if (!data->res_name) - goto err_res_name; - -- rc = memory_group_register_static(numa_node, total_len); -+ rc = memory_group_register_static(numa_node, PFN_UP(total_len)); - if (rc < 0) - goto err_reg_mgid; - data->mgid = rc; -@@ -135,7 +135,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) - if (rc) { - dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n", - i, range.start, range.end); -- release_resource(res); -+ remove_resource(res); - kfree(res); - data->res[i] = NULL; - if (mapped) -@@ -181,7 +181,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax) - - rc = remove_memory(range.start, range_len(&range)); - if (rc == 0) { -- release_resource(data->res[i]); -+ remove_resource(data->res[i]); - kfree(data->res[i]); - data->res[i] = NULL; - success++; -diff --git a/drivers/dax/super.c b/drivers/dax/super.c -index fc89e91beea7c..7610e4a9ac4e2 100644 ---- a/drivers/dax/super.c -+++ b/drivers/dax/super.c -@@ -678,6 +678,7 @@ static int dax_fs_init(void) - static void dax_fs_exit(void) - { - kern_unmount(dax_mnt); -+ rcu_barrier(); - kmem_cache_destroy(dax_cache); - } - -diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c -index 85faa7a5c7d12..29a14b0ffe334 100644 ---- a/drivers/devfreq/devfreq.c -+++ b/drivers/devfreq/devfreq.c -@@ -762,6 +762,7 @@ static void devfreq_dev_release(struct device *dev) - dev_pm_opp_put_opp_table(devfreq->opp_table); - - mutex_destroy(&devfreq->lock); -+ srcu_cleanup_notifier_head(&devfreq->transition_notifier_list); - kfree(devfreq); - } - -@@ -775,8 +776,7 @@ static void remove_sysfs_files(struct devfreq *devfreq, - * @dev: the device to add devfreq feature. - * @profile: device-specific profile to run devfreq. - * @governor_name: name of the policy to choose frequency. -- * @data: private data for the governor. The devfreq framework does not -- * touch this value. -+ * @data: devfreq driver pass to governors, governor should not change it. - */ - struct devfreq *devfreq_add_device(struct device *dev, - struct devfreq_dev_profile *profile, -@@ -1003,8 +1003,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res) - * @dev: the device to add devfreq feature. - * @profile: device-specific profile to run devfreq. - * @governor_name: name of the policy to choose frequency. -- * @data: private data for the governor. The devfreq framework does not -- * touch this value. -+ * @data: devfreq driver pass to governors, governor should not change it. - * - * This function manages automatically the memory of devfreq device using device - * resource management and simplify the free operation for memory of devfreq -diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c -index 17ed980d90998..d6da9c3e31067 100644 ---- a/drivers/devfreq/event/exynos-ppmu.c -+++ b/drivers/devfreq/event/exynos-ppmu.c -@@ -514,15 +514,19 @@ static int of_get_devfreq_events(struct device_node *np, - - count = of_get_child_count(events_np); - desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL); -- if (!desc) -+ if (!desc) { -+ of_node_put(events_np); - return -ENOMEM; -+ } - info->num_events = count; - - of_id = of_match_device(exynos_ppmu_id_match, dev); - if (of_id) - info->ppmu_type = (enum exynos_ppmu_type)of_id->data; -- else -+ else { -+ of_node_put(events_np); - return -EINVAL; -+ } - - j = 0; - for_each_child_of_node(events_np, node) { -diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c -index ab9db7adb3ade..d69672ccacc49 100644 ---- a/drivers/devfreq/governor_userspace.c -+++ b/drivers/devfreq/governor_userspace.c -@@ -21,7 +21,7 @@ struct userspace_data { - - static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) - { -- struct userspace_data *data = df->data; -+ struct userspace_data *data = df->governor_data; - - if (data->valid) - *freq = data->user_frequency; -@@ -40,7 +40,7 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr, - int err = 0; - - mutex_lock(&devfreq->lock); -- data = devfreq->data; -+ data = devfreq->governor_data; - - sscanf(buf, "%lu", &wanted); - data->user_frequency = wanted; -@@ -60,7 +60,7 @@ static ssize_t set_freq_show(struct device *dev, - int err = 0; - - mutex_lock(&devfreq->lock); -- data = devfreq->data; -+ data = devfreq->governor_data; - - if (data->valid) - err = sprintf(buf, "%lu\n", data->user_frequency); -@@ -91,7 +91,7 @@ static int userspace_init(struct devfreq *devfreq) - goto out; - } - data->valid = false; -- devfreq->data = data; -+ devfreq->governor_data = data; - - err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); - out: -@@ -107,8 +107,8 @@ static void userspace_exit(struct devfreq *devfreq) - if (devfreq->dev.kobj.sd) - sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); - -- kfree(devfreq->data); -- devfreq->data = NULL; -+ kfree(devfreq->governor_data); -+ devfreq->governor_data = NULL; - } - - static int devfreq_userspace_handler(struct devfreq *devfreq, -diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c -index 293857ebfd75d..538e8dc74f40a 100644 ---- a/drivers/devfreq/rk3399_dmc.c -+++ b/drivers/devfreq/rk3399_dmc.c -@@ -477,6 +477,8 @@ static int rk3399_dmcfreq_remove(struct platform_device *pdev) - { - struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev); - -+ devfreq_event_disable_edev(dmcfreq->edev); -+ - /* - * Before remove the opp table we need to unregister the opp notifier. - */ -diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c -index 4c06c93c93d32..c7f7134adc21d 100644 ---- a/drivers/dio/dio.c -+++ b/drivers/dio/dio.c -@@ -110,6 +110,12 @@ static char dio_no_name[] = { 0 }; - - #endif /* CONFIG_DIO_CONSTANTS */ - -+static void dio_dev_release(struct device *dev) -+{ -+ struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev); -+ kfree(ddev); -+} -+ - int __init dio_find(int deviceid) - { - /* Called to find a DIO device before the full bus scan has run. -@@ -224,6 +230,7 @@ static int __init dio_init(void) - dev->bus = &dio_bus; - dev->dev.parent = &dio_bus.dev; - dev->dev.bus = &dio_bus_type; -+ dev->dev.release = dio_dev_release; - dev->scode = scode; - dev->resource.start = pa; - dev->resource.end = pa + DIO_SIZE(scode, va); -@@ -251,6 +258,7 @@ static int __init dio_init(void) - if (error) { - pr_err("DIO: Error registering device %s\n", - dev->name); -+ put_device(&dev->dev); - continue; - } - error = dio_create_sysfs_dev_files(dev); -diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c -index 63d32261b63ff..968c3df2810e6 100644 ---- a/drivers/dma-buf/dma-buf.c -+++ b/drivers/dma-buf/dma-buf.c -@@ -67,14 +67,11 @@ static void dma_buf_release(struct dentry *dentry) - BUG_ON(dmabuf->vmapping_counter); - - /* -- * Any fences that a dma-buf poll can wait on should be signaled -- * before releasing dma-buf. This is the responsibility of each -- * driver that uses the reservation objects. -- * -- * If you hit this BUG() it means someone dropped their ref to the -- * dma-buf while still having pending operation to the buffer. -+ * If you hit this BUG() it could mean: -+ * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else -+ * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback - */ -- BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); -+ BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); - - dma_buf_stats_teardown(dmabuf); - dmabuf->ops->release(dmabuf); -@@ -82,6 +79,7 @@ static void dma_buf_release(struct dentry *dentry) - if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) - dma_resv_fini(dmabuf->resv); - -+ WARN_ON(!list_empty(&dmabuf->attachments)); - module_put(dmabuf->owner); - kfree(dmabuf->name); - kfree(dmabuf); -@@ -199,22 +197,64 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) - static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) - { - struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; -+ struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); - unsigned long flags; - - spin_lock_irqsave(&dcb->poll->lock, flags); - wake_up_locked_poll(dcb->poll, dcb->active); - dcb->active = 0; - spin_unlock_irqrestore(&dcb->poll->lock, flags); -+ dma_fence_put(fence); -+ /* Paired with get_file in dma_buf_poll */ -+ fput(dmabuf->file); -+} -+ -+static bool dma_buf_poll_shared(struct dma_resv *resv, -+ struct dma_buf_poll_cb_t *dcb) -+{ -+ struct dma_resv_list *fobj = dma_resv_shared_list(resv); -+ struct dma_fence *fence; -+ int i, r; -+ -+ if (!fobj) -+ return false; -+ -+ for (i = 0; i < fobj->shared_count; ++i) { -+ fence = rcu_dereference_protected(fobj->shared[i], -+ dma_resv_held(resv)); -+ dma_fence_get(fence); -+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); -+ if (!r) -+ return true; -+ dma_fence_put(fence); -+ } -+ -+ return false; -+} -+ -+static bool dma_buf_poll_excl(struct dma_resv *resv, -+ struct dma_buf_poll_cb_t *dcb) -+{ -+ struct dma_fence *fence = dma_resv_excl_fence(resv); -+ int r; -+ -+ if (!fence) -+ return false; -+ -+ dma_fence_get(fence); -+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); -+ if (!r) -+ return true; -+ dma_fence_put(fence); -+ -+ return false; - } - - static __poll_t dma_buf_poll(struct file *file, poll_table *poll) - { - struct dma_buf *dmabuf; - struct dma_resv *resv; -- struct dma_resv_list *fobj; -- struct dma_fence *fence_excl; - __poll_t events; -- unsigned shared_count, seq; - - dmabuf = file->private_data; - if (!dmabuf || !dmabuf->resv) -@@ -228,101 +268,57 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) - if (!events) - return 0; - --retry: -- seq = read_seqcount_begin(&resv->seq); -- rcu_read_lock(); -- -- fobj = rcu_dereference(resv->fence); -- if (fobj) -- shared_count = fobj->shared_count; -- else -- shared_count = 0; -- fence_excl = dma_resv_excl_fence(resv); -- if (read_seqcount_retry(&resv->seq, seq)) { -- rcu_read_unlock(); -- goto retry; -- } -- -- if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { -- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; -- __poll_t pevents = EPOLLIN; -+ dma_resv_lock(resv, NULL); - -- if (shared_count == 0) -- pevents |= EPOLLOUT; -+ if (events & EPOLLOUT) { -+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; - -+ /* Check that callback isn't busy */ - spin_lock_irq(&dmabuf->poll.lock); -- if (dcb->active) { -- dcb->active |= pevents; -- events &= ~pevents; -- } else -- dcb->active = pevents; -+ if (dcb->active) -+ events &= ~EPOLLOUT; -+ else -+ dcb->active = EPOLLOUT; - spin_unlock_irq(&dmabuf->poll.lock); - -- if (events & pevents) { -- if (!dma_fence_get_rcu(fence_excl)) { -- /* force a recheck */ -- events &= ~pevents; -- dma_buf_poll_cb(NULL, &dcb->cb); -- } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, -- dma_buf_poll_cb)) { -- events &= ~pevents; -- dma_fence_put(fence_excl); -- } else { -- /* -- * No callback queued, wake up any additional -- * waiters. -- */ -- dma_fence_put(fence_excl); -+ if (events & EPOLLOUT) { -+ /* Paired with fput in dma_buf_poll_cb */ -+ get_file(dmabuf->file); -+ -+ if (!dma_buf_poll_shared(resv, dcb) && -+ !dma_buf_poll_excl(resv, dcb)) -+ -+ /* No callback queued, wake up any other waiters */ - dma_buf_poll_cb(NULL, &dcb->cb); -- } -+ else -+ events &= ~EPOLLOUT; - } - } - -- if ((events & EPOLLOUT) && shared_count > 0) { -- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; -- int i; -+ if (events & EPOLLIN) { -+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; - -- /* Only queue a new callback if no event has fired yet */ -+ /* Check that callback isn't busy */ - spin_lock_irq(&dmabuf->poll.lock); - if (dcb->active) -- events &= ~EPOLLOUT; -+ events &= ~EPOLLIN; - else -- dcb->active = EPOLLOUT; -+ dcb->active = EPOLLIN; - spin_unlock_irq(&dmabuf->poll.lock); - -- if (!(events & EPOLLOUT)) -- goto out; -- -- for (i = 0; i < shared_count; ++i) { -- struct dma_fence *fence = rcu_dereference(fobj->shared[i]); -+ if (events & EPOLLIN) { -+ /* Paired with fput in dma_buf_poll_cb */ -+ get_file(dmabuf->file); - -- if (!dma_fence_get_rcu(fence)) { -- /* -- * fence refcount dropped to zero, this means -- * that fobj has been freed -- * -- * call dma_buf_poll_cb and force a recheck! -- */ -- events &= ~EPOLLOUT; -+ if (!dma_buf_poll_excl(resv, dcb)) -+ /* No callback queued, wake up any other waiters */ - dma_buf_poll_cb(NULL, &dcb->cb); -- break; -- } -- if (!dma_fence_add_callback(fence, &dcb->cb, -- dma_buf_poll_cb)) { -- dma_fence_put(fence); -- events &= ~EPOLLOUT; -- break; -- } -- dma_fence_put(fence); -+ else -+ events &= ~EPOLLIN; - } -- -- /* No callback queued, wake up any additional waiters. */ -- if (i == shared_count) -- dma_buf_poll_cb(NULL, &dcb->cb); - } - --out: -- rcu_read_unlock(); -+ dma_resv_unlock(resv); - return events; - } - -@@ -447,6 +443,7 @@ static inline int is_dma_buf_file(struct file *file) - - static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) - { -+ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); - struct file *file; - struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); - -@@ -456,6 +453,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) - inode->i_size = dmabuf->size; - inode_set_bytes(inode, dmabuf->size); - -+ /* -+ * The ->i_ino acquired from get_next_ino() is not unique thus -+ * not suitable for using it as dentry name by dmabuf stats. -+ * Override ->i_ino with the unique and dmabuffs specific -+ * value. -+ */ -+ inode->i_ino = atomic64_add_return(1, &dmabuf_inode); - file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", - flags, &dma_buf_fops); - if (IS_ERR(file)) -@@ -565,8 +569,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) - dmabuf->owner = exp_info->owner; - spin_lock_init(&dmabuf->name_lock); - init_waitqueue_head(&dmabuf->poll); -- dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; -- dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; -+ dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; -+ dmabuf->cb_in.active = dmabuf->cb_out.active = 0; - - if (!resv) { - resv = (struct dma_resv *)&dmabuf[1]; -@@ -583,10 +587,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) - file->f_mode |= FMODE_LSEEK; - dmabuf->file = file; - -- ret = dma_buf_stats_setup(dmabuf); -- if (ret) -- goto err_sysfs; -- - mutex_init(&dmabuf->lock); - INIT_LIST_HEAD(&dmabuf->attachments); - -@@ -594,6 +594,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) - list_add(&dmabuf->list_node, &db_list.head); - mutex_unlock(&db_list.lock); - -+ ret = dma_buf_stats_setup(dmabuf); -+ if (ret) -+ goto err_sysfs; -+ - return dmabuf; - - err_sysfs: -diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c -index d3fbd950be944..3e07f961e2f3d 100644 ---- a/drivers/dma-buf/dma-fence-array.c -+++ b/drivers/dma-buf/dma-fence-array.c -@@ -104,7 +104,11 @@ static bool dma_fence_array_signaled(struct dma_fence *fence) - { - struct dma_fence_array *array = to_dma_fence_array(fence); - -- return atomic_read(&array->num_pending) <= 0; -+ if (atomic_read(&array->num_pending) > 0) -+ return false; -+ -+ dma_fence_array_clear_pending_error(array); -+ return true; - } - - static void dma_fence_array_release(struct dma_fence *fence) -diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c -index 56bf5ad01ad54..59d158873f4cb 100644 ---- a/drivers/dma-buf/dma-heap.c -+++ b/drivers/dma-buf/dma-heap.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -135,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd, - if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) - return -EINVAL; - -+ nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds)); - /* Get the kernel ioctl cmd that matches */ - kcmd = dma_heap_ioctl_cmds[nr]; - -@@ -231,18 +233,6 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) - return ERR_PTR(-EINVAL); - } - -- /* check the name is unique */ -- mutex_lock(&heap_list_lock); -- list_for_each_entry(h, &heap_list, list) { -- if (!strcmp(h->name, exp_info->name)) { -- mutex_unlock(&heap_list_lock); -- pr_err("dma_heap: Already registered heap named %s\n", -- exp_info->name); -- return ERR_PTR(-EINVAL); -- } -- } -- mutex_unlock(&heap_list_lock); -- - heap = kzalloc(sizeof(*heap), GFP_KERNEL); - if (!heap) - return ERR_PTR(-ENOMEM); -@@ -281,13 +271,27 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) - err_ret = ERR_CAST(dev_ret); - goto err2; - } -- /* Add heap to the list */ -+ - mutex_lock(&heap_list_lock); -+ /* check the name is unique */ -+ list_for_each_entry(h, &heap_list, list) { -+ if (!strcmp(h->name, exp_info->name)) { -+ mutex_unlock(&heap_list_lock); -+ pr_err("dma_heap: Already registered heap named %s\n", -+ exp_info->name); -+ err_ret = ERR_PTR(-EINVAL); -+ goto err3; -+ } -+ } -+ -+ /* Add heap to the list */ - list_add(&heap->list, &heap_list); - mutex_unlock(&heap_list_lock); - - return heap; - -+err3: -+ device_destroy(dma_heap_class, heap->heap_devt); - err2: - cdev_del(&heap->heap_cdev); - err1: -diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c -index 0c05b79870f96..83f02bd51dda6 100644 ---- a/drivers/dma-buf/heaps/cma_heap.c -+++ b/drivers/dma-buf/heaps/cma_heap.c -@@ -124,10 +124,11 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - struct cma_heap_buffer *buffer = dmabuf->priv; - struct dma_heap_attachment *a; - -+ mutex_lock(&buffer->lock); -+ - if (buffer->vmap_cnt) - invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); - -- mutex_lock(&buffer->lock); - list_for_each_entry(a, &buffer->attachments, list) { - if (!a->mapped) - continue; -@@ -144,10 +145,11 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - struct cma_heap_buffer *buffer = dmabuf->priv; - struct dma_heap_attachment *a; - -+ mutex_lock(&buffer->lock); -+ - if (buffer->vmap_cnt) - flush_kernel_vmap_range(buffer->vaddr, buffer->len); - -- mutex_lock(&buffer->lock); - list_for_each_entry(a, &buffer->attachments, list) { - if (!a->mapped) - continue; -diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c -index 23a7e74ef9666..8660508f3684f 100644 ---- a/drivers/dma-buf/heaps/system_heap.c -+++ b/drivers/dma-buf/heaps/system_heap.c -@@ -289,7 +289,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf) - int i; - - table = &buffer->sg_table; -- for_each_sg(table->sgl, sg, table->nents, i) { -+ for_each_sgtable_sg(table, sg, i) { - struct page *page = sg_page(sg); - - __free_pages(page, compound_order(page)); -diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c -index 348b3a9170fa4..7f5ed1aa7a9f8 100644 ---- a/drivers/dma-buf/sw_sync.c -+++ b/drivers/dma-buf/sw_sync.c -@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = { - */ - static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) - { -+ LIST_HEAD(signalled); - struct sync_pt *pt, *next; - - trace_sync_timeline(obj); -@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) - if (!timeline_fence_signaled(&pt->base)) - break; - -- list_del_init(&pt->link); -+ dma_fence_get(&pt->base); -+ -+ list_move_tail(&pt->link, &signalled); - rb_erase(&pt->node, &obj->pt_tree); - -- /* -- * A signal callback may release the last reference to this -- * fence, causing it to be freed. That operation has to be -- * last to avoid a use after free inside this loop, and must -- * be after we remove the fence from the timeline in order to -- * prevent deadlocking on timeline->lock inside -- * timeline_fence_release(). -- */ - dma_fence_signal_locked(&pt->base); - } - - spin_unlock_irq(&obj->lock); -+ -+ list_for_each_entry_safe(pt, next, &signalled, link) { -+ list_del_init(&pt->link); -+ dma_fence_put(&pt->base); -+ } - } - - /** -diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c -index c57a609db75be..bf11d32205f38 100644 ---- a/drivers/dma-buf/udmabuf.c -+++ b/drivers/dma-buf/udmabuf.c -@@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) - { - struct vm_area_struct *vma = vmf->vma; - struct udmabuf *ubuf = vma->vm_private_data; -+ pgoff_t pgoff = vmf->pgoff; - -- vmf->page = ubuf->pages[vmf->pgoff]; -+ if (pgoff >= ubuf->pagecount) -+ return VM_FAULT_SIGBUS; -+ vmf->page = ubuf->pages[pgoff]; - get_page(vmf->page); - return 0; - } -@@ -121,17 +124,20 @@ static int begin_cpu_udmabuf(struct dma_buf *buf, - { - struct udmabuf *ubuf = buf->priv; - struct device *dev = ubuf->device->this_device; -+ int ret = 0; - - if (!ubuf->sg) { - ubuf->sg = get_sg_table(dev, buf, direction); -- if (IS_ERR(ubuf->sg)) -- return PTR_ERR(ubuf->sg); -+ if (IS_ERR(ubuf->sg)) { -+ ret = PTR_ERR(ubuf->sg); -+ ubuf->sg = NULL; -+ } - } else { - dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, - direction); - } - -- return 0; -+ return ret; - } - - static int end_cpu_udmabuf(struct dma_buf *buf, -@@ -190,6 +196,10 @@ static long udmabuf_create(struct miscdevice *device, - if (ubuf->pagecount > pglimit) - goto err; - } -+ -+ if (!ubuf->pagecount) -+ goto err; -+ - ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages), - GFP_KERNEL); - if (!ubuf->pages) { -@@ -361,7 +371,23 @@ static struct miscdevice udmabuf_misc = { - - static int __init udmabuf_dev_init(void) - { -- return misc_register(&udmabuf_misc); -+ int ret; -+ -+ ret = misc_register(&udmabuf_misc); -+ if (ret < 0) { -+ pr_err("Could not initialize udmabuf device\n"); -+ return ret; -+ } -+ -+ ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device, -+ DMA_BIT_MASK(64)); -+ if (ret < 0) { -+ pr_err("Could not setup DMA mask for udmabuf device\n"); -+ misc_deregister(&udmabuf_misc); -+ return ret; -+ } -+ -+ return 0; - } - - static void __exit udmabuf_dev_exit(void) -diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig -index 80c2c03cb0141..e1beddcc8c84a 100644 ---- a/drivers/dma/Kconfig -+++ b/drivers/dma/Kconfig -@@ -202,6 +202,7 @@ config FSL_DMA - config FSL_EDMA - tristate "Freescale eDMA engine support" - depends on OF -+ depends on HAS_IOMEM - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - help -@@ -236,7 +237,7 @@ config FSL_RAID - - config HISI_DMA - tristate "HiSilicon DMA Engine support" -- depends on ARM64 || COMPILE_TEST -+ depends on ARCH_HISI || COMPILE_TEST - depends on PCI_MSI - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS -@@ -271,6 +272,7 @@ config IMX_SDMA - - config INTEL_IDMA64 - tristate "Intel integrated DMA 64-bit support" -+ depends on HAS_IOMEM - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - help -diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c -index 30ae36124b1db..4583a8b5e5bd8 100644 ---- a/drivers/dma/at_hdmac.c -+++ b/drivers/dma/at_hdmac.c -@@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) - ATC_SPIP_BOUNDARY(first->boundary)); - channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | - ATC_DPIP_BOUNDARY(first->boundary)); -+ /* Don't allow CPU to reorder channel enable. */ -+ wmb(); - dma_writel(atdma, CHER, atchan->mask); - - vdbg_dump_regs(atchan); -@@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) - struct at_desc *desc_first = atc_first_active(atchan); - struct at_desc *desc; - int ret; -- u32 ctrla, dscr, trials; -+ u32 ctrla, dscr; -+ unsigned int i; - - /* - * If the cookie doesn't match to the currently running transfer then -@@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) - dscr = channel_readl(atchan, DSCR); - rmb(); /* ensure DSCR is read before CTRLA */ - ctrla = channel_readl(atchan, CTRLA); -- for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { -+ for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { - u32 new_dscr; - - rmb(); /* ensure DSCR is read after CTRLA */ -@@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) - rmb(); /* ensure DSCR is read before CTRLA */ - ctrla = channel_readl(atchan, CTRLA); - } -- if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) -+ if (unlikely(i == ATC_MAX_DSCR_TRIALS)) - return -ETIMEDOUT; - - /* for the first descriptor we can be more accurate */ -@@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) - if (!atc_chan_is_cyclic(atchan)) - dma_cookie_complete(txd); - -- /* If the transfer was a memset, free our temporary buffer */ -- if (desc->memset_buffer) { -- dma_pool_free(atdma->memset_pool, desc->memset_vaddr, -- desc->memset_paddr); -- desc->memset_buffer = false; -- } -- -- /* move children to free_list */ -- list_splice_init(&desc->tx_list, &atchan->free_list); -- /* move myself to free_list */ -- list_move(&desc->desc_node, &atchan->free_list); -- - spin_unlock_irqrestore(&atchan->lock, flags); - - dma_descriptor_unmap(txd); -@@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) - dmaengine_desc_get_callback_invoke(txd, NULL); - - dma_run_dependencies(txd); --} -- --/** -- * atc_complete_all - finish work for all transactions -- * @atchan: channel to complete transactions for -- * -- * Eventually submit queued descriptors if any -- * -- * Assume channel is idle while calling this function -- * Called with atchan->lock held and bh disabled -- */ --static void atc_complete_all(struct at_dma_chan *atchan) --{ -- struct at_desc *desc, *_desc; -- LIST_HEAD(list); -- unsigned long flags; -- -- dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); - - spin_lock_irqsave(&atchan->lock, flags); -- -- /* -- * Submit queued descriptors ASAP, i.e. before we go through -- * the completed ones. -- */ -- if (!list_empty(&atchan->queue)) -- atc_dostart(atchan, atc_first_queued(atchan)); -- /* empty active_list now it is completed */ -- list_splice_init(&atchan->active_list, &list); -- /* empty queue list by moving descriptors (if any) to active_list */ -- list_splice_init(&atchan->queue, &atchan->active_list); -- -+ /* move children to free_list */ -+ list_splice_init(&desc->tx_list, &atchan->free_list); -+ /* add myself to free_list */ -+ list_add(&desc->desc_node, &atchan->free_list); - spin_unlock_irqrestore(&atchan->lock, flags); - -- list_for_each_entry_safe(desc, _desc, &list, desc_node) -- atc_chain_complete(atchan, desc); -+ /* If the transfer was a memset, free our temporary buffer */ -+ if (desc->memset_buffer) { -+ dma_pool_free(atdma->memset_pool, desc->memset_vaddr, -+ desc->memset_paddr); -+ desc->memset_buffer = false; -+ } - } - - /** -@@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan) - */ - static void atc_advance_work(struct at_dma_chan *atchan) - { -+ struct at_desc *desc; - unsigned long flags; -- int ret; - - dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); - - spin_lock_irqsave(&atchan->lock, flags); -- ret = atc_chan_is_enabled(atchan); -- spin_unlock_irqrestore(&atchan->lock, flags); -- if (ret) -- return; -- -- if (list_empty(&atchan->active_list) || -- list_is_singular(&atchan->active_list)) -- return atc_complete_all(atchan); -+ if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list)) -+ return spin_unlock_irqrestore(&atchan->lock, flags); - -- atc_chain_complete(atchan, atc_first_active(atchan)); -+ desc = atc_first_active(atchan); -+ /* Remove the transfer node from the active list. */ -+ list_del_init(&desc->desc_node); -+ spin_unlock_irqrestore(&atchan->lock, flags); -+ atc_chain_complete(atchan, desc); - - /* advance work */ - spin_lock_irqsave(&atchan->lock, flags); -- atc_dostart(atchan, atc_first_active(atchan)); -+ if (!list_empty(&atchan->active_list)) { -+ desc = atc_first_queued(atchan); -+ list_move_tail(&desc->desc_node, &atchan->active_list); -+ atc_dostart(atchan, desc); -+ } - spin_unlock_irqrestore(&atchan->lock, flags); - } - -@@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan) - static void atc_handle_error(struct at_dma_chan *atchan) - { - struct at_desc *bad_desc; -+ struct at_desc *desc; - struct at_desc *child; - unsigned long flags; - -@@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan) - bad_desc = atc_first_active(atchan); - list_del_init(&bad_desc->desc_node); - -- /* As we are stopped, take advantage to push queued descriptors -- * in active_list */ -- list_splice_init(&atchan->queue, atchan->active_list.prev); -- - /* Try to restart the controller */ -- if (!list_empty(&atchan->active_list)) -- atc_dostart(atchan, atc_first_active(atchan)); -+ if (!list_empty(&atchan->active_list)) { -+ desc = atc_first_queued(atchan); -+ list_move_tail(&desc->desc_node, &atchan->active_list); -+ atc_dostart(atchan, desc); -+ } - - /* - * KERN_CRITICAL may seem harsh, but since this only happens -@@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) - spin_lock_irqsave(&atchan->lock, flags); - cookie = dma_cookie_assign(tx); - -- if (list_empty(&atchan->active_list)) { -- dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", -- desc->txd.cookie); -- atc_dostart(atchan, desc); -- list_add_tail(&desc->desc_node, &atchan->active_list); -- } else { -- dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", -- desc->txd.cookie); -- list_add_tail(&desc->desc_node, &atchan->queue); -- } -- -+ list_add_tail(&desc->desc_node, &atchan->queue); - spin_unlock_irqrestore(&atchan->lock, flags); - -+ dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", -+ desc->txd.cookie); - return cookie; - } - -@@ -1437,11 +1400,8 @@ static int atc_terminate_all(struct dma_chan *chan) - struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct at_dma *atdma = to_at_dma(chan->device); - int chan_id = atchan->chan_common.chan_id; -- struct at_desc *desc, *_desc; - unsigned long flags; - -- LIST_HEAD(list); -- - dev_vdbg(chan2dev(chan), "%s\n", __func__); - - /* -@@ -1460,19 +1420,15 @@ static int atc_terminate_all(struct dma_chan *chan) - cpu_relax(); - - /* active_list entries will end up before queued entries */ -- list_splice_init(&atchan->queue, &list); -- list_splice_init(&atchan->active_list, &list); -- -- spin_unlock_irqrestore(&atchan->lock, flags); -- -- /* Flush all pending and queued descriptors */ -- list_for_each_entry_safe(desc, _desc, &list, desc_node) -- atc_chain_complete(atchan, desc); -+ list_splice_tail_init(&atchan->queue, &atchan->free_list); -+ list_splice_tail_init(&atchan->active_list, &atchan->free_list); - - clear_bit(ATC_IS_PAUSED, &atchan->status); - /* if channel dedicated to cyclic operations, free it */ - clear_bit(ATC_IS_CYCLIC, &atchan->status); - -+ spin_unlock_irqrestore(&atchan->lock, flags); -+ - return 0; - } - -@@ -1527,20 +1483,26 @@ atc_tx_status(struct dma_chan *chan, - } - - /** -- * atc_issue_pending - try to finish work -+ * atc_issue_pending - takes the first transaction descriptor in the pending -+ * queue and starts the transfer. - * @chan: target DMA channel - */ - static void atc_issue_pending(struct dma_chan *chan) - { -- struct at_dma_chan *atchan = to_at_dma_chan(chan); -+ struct at_dma_chan *atchan = to_at_dma_chan(chan); -+ struct at_desc *desc; -+ unsigned long flags; - - dev_vdbg(chan2dev(chan), "issue_pending\n"); - -- /* Not needed for cyclic transfers */ -- if (atc_chan_is_cyclic(atchan)) -- return; -+ spin_lock_irqsave(&atchan->lock, flags); -+ if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue)) -+ return spin_unlock_irqrestore(&atchan->lock, flags); - -- atc_advance_work(atchan); -+ desc = atc_first_queued(atchan); -+ list_move_tail(&desc->desc_node, &atchan->active_list); -+ atc_dostart(atchan, desc); -+ spin_unlock_irqrestore(&atchan->lock, flags); - } - - /** -@@ -1958,7 +1920,11 @@ static int __init at_dma_probe(struct platform_device *pdev) - dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", - plat_dat->nr_channels); - -- dma_async_device_register(&atdma->dma_common); -+ err = dma_async_device_register(&atdma->dma_common); -+ if (err) { -+ dev_err(&pdev->dev, "Unable to register: %d.\n", err); -+ goto err_dma_async_device_register; -+ } - - /* - * Do not return an error if the dmac node is not present in order to -@@ -1978,6 +1944,7 @@ static int __init at_dma_probe(struct platform_device *pdev) - - err_of_dma_controller_register: - dma_async_device_unregister(&atdma->dma_common); -+err_dma_async_device_register: - dma_pool_destroy(atdma->memset_pool); - err_memset_pool_create: - dma_pool_destroy(atdma->dma_desc_pool); -diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h -index 4d1ebc040031c..d4d382d746078 100644 ---- a/drivers/dma/at_hdmac_regs.h -+++ b/drivers/dma/at_hdmac_regs.h -@@ -186,13 +186,13 @@ - /* LLI == Linked List Item; aka DMA buffer descriptor */ - struct at_lli { - /* values that are not changed by hardware */ -- dma_addr_t saddr; -- dma_addr_t daddr; -+ u32 saddr; -+ u32 daddr; - /* value that may get written back: */ -- u32 ctrla; -+ u32 ctrla; - /* more values that are not changed by hardware */ -- u32 ctrlb; -- dma_addr_t dscr; /* chain to next lli */ -+ u32 ctrlb; -+ u32 dscr; /* chain to next lli */ - }; - - /** -diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c -index ab78e0f6afd70..dd34626df1abc 100644 ---- a/drivers/dma/at_xdmac.c -+++ b/drivers/dma/at_xdmac.c -@@ -99,6 +99,7 @@ - #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */ - #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */ - #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */ -+#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27) - #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */ - #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */ - #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */ -@@ -155,7 +156,7 @@ - #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ - #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) - #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) --#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */ -+#define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */ - #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ - #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ - #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ -@@ -242,6 +243,7 @@ struct at_xdmac { - int irq; - struct clk *clk; - u32 save_gim; -+ u32 save_gs; - struct dma_pool *at_xdmac_desc_pool; - const struct at_xdmac_layout *layout; - struct at_xdmac_chan chan[]; -@@ -252,15 +254,15 @@ struct at_xdmac { - - /* Linked List Descriptor */ - struct at_xdmac_lld { -- dma_addr_t mbr_nda; /* Next Descriptor Member */ -- u32 mbr_ubc; /* Microblock Control Member */ -- dma_addr_t mbr_sa; /* Source Address Member */ -- dma_addr_t mbr_da; /* Destination Address Member */ -- u32 mbr_cfg; /* Configuration Register */ -- u32 mbr_bc; /* Block Control Register */ -- u32 mbr_ds; /* Data Stride Register */ -- u32 mbr_sus; /* Source Microblock Stride Register */ -- u32 mbr_dus; /* Destination Microblock Stride Register */ -+ u32 mbr_nda; /* Next Descriptor Member */ -+ u32 mbr_ubc; /* Microblock Control Member */ -+ u32 mbr_sa; /* Source Address Member */ -+ u32 mbr_da; /* Destination Address Member */ -+ u32 mbr_cfg; /* Configuration Register */ -+ u32 mbr_bc; /* Block Control Register */ -+ u32 mbr_ds; /* Data Stride Register */ -+ u32 mbr_sus; /* Source Microblock Stride Register */ -+ u32 mbr_dus; /* Destination Microblock Stride Register */ - }; - - /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ -@@ -385,9 +387,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, - - dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); - -- if (at_xdmac_chan_is_enabled(atchan)) -- return; -- - /* Set transfer as active to not try to start it again. */ - first->active_xfer = true; - -@@ -405,7 +404,8 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, - */ - if (at_xdmac_chan_is_cyclic(atchan)) - reg = AT_XDMAC_CNDC_NDVIEW_NDV1; -- else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) -+ else if ((first->lld.mbr_ubc & -+ AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3) - reg = AT_XDMAC_CNDC_NDVIEW_NDV3; - else - reg = AT_XDMAC_CNDC_NDVIEW_NDV2; -@@ -476,13 +476,12 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) - spin_lock_irqsave(&atchan->lock, irqflags); - cookie = dma_cookie_assign(tx); - -+ list_add_tail(&desc->xfer_node, &atchan->xfers_list); -+ spin_unlock_irqrestore(&atchan->lock, irqflags); -+ - dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", - __func__, atchan, desc); -- list_add_tail(&desc->xfer_node, &atchan->xfers_list); -- if (list_is_singular(&atchan->xfers_list)) -- at_xdmac_start_xfer(atchan, desc); - -- spin_unlock_irqrestore(&atchan->lock, irqflags); - return cookie; - } - -@@ -733,7 +732,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - if (!desc) { - dev_err(chan2dev(chan), "can't get descriptor\n"); - if (first) -- list_splice_init(&first->descs_list, &atchan->free_descs_list); -+ list_splice_tail_init(&first->descs_list, -+ &atchan->free_descs_list); - goto spin_unlock; - } - -@@ -821,7 +821,8 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, - if (!desc) { - dev_err(chan2dev(chan), "can't get descriptor\n"); - if (first) -- list_splice_init(&first->descs_list, &atchan->free_descs_list); -+ list_splice_tail_init(&first->descs_list, -+ &atchan->free_descs_list); - spin_unlock_irqrestore(&atchan->lock, irqflags); - return NULL; - } -@@ -1025,6 +1026,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, - NULL, - src_addr, dst_addr, - xt, xt->sgl); -+ if (!first) -+ return NULL; - - /* Length of the block is (BLEN+1) microblocks. */ - for (i = 0; i < xt->numf - 1; i++) -@@ -1055,8 +1058,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, - src_addr, dst_addr, - xt, chunk); - if (!desc) { -- list_splice_init(&first->descs_list, -- &atchan->free_descs_list); -+ if (first) -+ list_splice_tail_init(&first->descs_list, -+ &atchan->free_descs_list); - return NULL; - } - -@@ -1136,7 +1140,8 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, - if (!desc) { - dev_err(chan2dev(chan), "can't get descriptor\n"); - if (first) -- list_splice_init(&first->descs_list, &atchan->free_descs_list); -+ list_splice_tail_init(&first->descs_list, -+ &atchan->free_descs_list); - return NULL; - } - -@@ -1312,8 +1317,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, - sg_dma_len(sg), - value); - if (!desc && first) -- list_splice_init(&first->descs_list, -- &atchan->free_descs_list); -+ list_splice_tail_init(&first->descs_list, -+ &atchan->free_descs_list); - - if (!first) - first = desc; -@@ -1452,7 +1457,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - { - struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); - struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); -- struct at_xdmac_desc *desc, *_desc; -+ struct at_xdmac_desc *desc, *_desc, *iter; - struct list_head *descs_list; - enum dma_status ret; - int residue, retry; -@@ -1567,11 +1572,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - * microblock. - */ - descs_list = &desc->descs_list; -- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { -- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); -- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; -- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) -+ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) { -+ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg); -+ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth; -+ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) { -+ desc = iter; - break; -+ } - } - residue += cur_ubc << dwidth; - -@@ -1586,20 +1593,6 @@ spin_unlock: - return ret; - } - --/* Call must be protected by lock. */ --static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, -- struct at_xdmac_desc *desc) --{ -- dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); -- -- /* -- * Remove the transfer from the transfer list then move the transfer -- * descriptors into the free descriptors list. -- */ -- list_del(&desc->xfer_node); -- list_splice_init(&desc->descs_list, &atchan->free_descs_list); --} -- - static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) - { - struct at_xdmac_desc *desc; -@@ -1623,14 +1616,17 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) - struct at_xdmac_desc *desc; - struct dma_async_tx_descriptor *txd; - -- if (!list_empty(&atchan->xfers_list)) { -- desc = list_first_entry(&atchan->xfers_list, -- struct at_xdmac_desc, xfer_node); -- txd = &desc->tx_dma_desc; -- -- if (txd->flags & DMA_PREP_INTERRUPT) -- dmaengine_desc_get_callback_invoke(txd, NULL); -+ spin_lock_irq(&atchan->lock); -+ if (list_empty(&atchan->xfers_list)) { -+ spin_unlock_irq(&atchan->lock); -+ return; - } -+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, -+ xfer_node); -+ spin_unlock_irq(&atchan->lock); -+ txd = &desc->tx_dma_desc; -+ if (txd->flags & DMA_PREP_INTERRUPT) -+ dmaengine_desc_get_callback_invoke(txd, NULL); - } - - static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) -@@ -1707,17 +1703,20 @@ static void at_xdmac_tasklet(struct tasklet_struct *t) - } - - txd = &desc->tx_dma_desc; -- -- at_xdmac_remove_xfer(atchan, desc); -+ dma_cookie_complete(txd); -+ /* Remove the transfer from the transfer list. */ -+ list_del(&desc->xfer_node); - spin_unlock_irq(&atchan->lock); - -- dma_cookie_complete(txd); - if (txd->flags & DMA_PREP_INTERRUPT) - dmaengine_desc_get_callback_invoke(txd, NULL); - - dma_run_dependencies(txd); - - spin_lock_irq(&atchan->lock); -+ /* Move the xfer descriptors into the free descriptors list. */ -+ list_splice_tail_init(&desc->descs_list, -+ &atchan->free_descs_list); - at_xdmac_advance_work(atchan); - spin_unlock_irq(&atchan->lock); - } -@@ -1784,11 +1783,9 @@ static void at_xdmac_issue_pending(struct dma_chan *chan) - - dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); - -- if (!at_xdmac_chan_is_cyclic(atchan)) { -- spin_lock_irqsave(&atchan->lock, flags); -- at_xdmac_advance_work(atchan); -- spin_unlock_irqrestore(&atchan->lock, flags); -- } -+ spin_lock_irqsave(&atchan->lock, flags); -+ at_xdmac_advance_work(atchan); -+ spin_unlock_irqrestore(&atchan->lock, flags); - - return; - } -@@ -1866,8 +1863,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) - cpu_relax(); - - /* Cancel all pending transfers. */ -- list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) -- at_xdmac_remove_xfer(atchan, desc); -+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) { -+ list_del(&desc->xfer_node); -+ list_splice_tail_init(&desc->descs_list, -+ &atchan->free_descs_list); -+ } - - clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); - clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); -@@ -1897,6 +1897,11 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) - for (i = 0; i < init_nr_desc_per_channel; i++) { - desc = at_xdmac_alloc_desc(chan, GFP_KERNEL); - if (!desc) { -+ if (i == 0) { -+ dev_warn(chan2dev(chan), -+ "can't allocate any descriptors\n"); -+ return -EIO; -+ } - dev_warn(chan2dev(chan), - "only %d descriptors have been allocated\n", i); - break; -@@ -1926,6 +1931,30 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan) - return; - } - -+static void at_xdmac_axi_config(struct platform_device *pdev) -+{ -+ struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev); -+ bool dev_m2m = false; -+ u32 dma_requests; -+ -+ if (!atxdmac->layout->axi_config) -+ return; /* Not supported */ -+ -+ if (!of_property_read_u32(pdev->dev.of_node, "dma-requests", -+ &dma_requests)) { -+ dev_info(&pdev->dev, "controller in mem2mem mode.\n"); -+ dev_m2m = true; -+ } -+ -+ if (dev_m2m) { -+ at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M); -+ at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M); -+ } else { -+ at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M); -+ at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M); -+ } -+} -+ - #ifdef CONFIG_PM - static int atmel_xdmac_prepare(struct device *dev) - { -@@ -1964,6 +1993,7 @@ static int atmel_xdmac_suspend(struct device *dev) - } - } - atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); -+ atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS); - - at_xdmac_off(atxdmac); - clk_disable_unprepare(atxdmac->clk); -@@ -1975,6 +2005,7 @@ static int atmel_xdmac_resume(struct device *dev) - struct at_xdmac *atxdmac = dev_get_drvdata(dev); - struct at_xdmac_chan *atchan; - struct dma_chan *chan, *_chan; -+ struct platform_device *pdev = container_of(dev, struct platform_device, dev); - int i; - int ret; - -@@ -1982,6 +2013,8 @@ static int atmel_xdmac_resume(struct device *dev) - if (ret) - return ret; - -+ at_xdmac_axi_config(pdev); -+ - /* Clear pending interrupts. */ - for (i = 0; i < atxdmac->dma.chancnt; i++) { - atchan = &atxdmac->chan[i]; -@@ -2000,37 +2033,14 @@ static int atmel_xdmac_resume(struct device *dev) - at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); - at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); - wmb(); -- at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); -+ if (atxdmac->save_gs & atchan->mask) -+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); - } - } - return 0; - } - #endif /* CONFIG_PM_SLEEP */ - --static void at_xdmac_axi_config(struct platform_device *pdev) --{ -- struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev); -- bool dev_m2m = false; -- u32 dma_requests; -- -- if (!atxdmac->layout->axi_config) -- return; /* Not supported */ -- -- if (!of_property_read_u32(pdev->dev.of_node, "dma-requests", -- &dma_requests)) { -- dev_info(&pdev->dev, "controller in mem2mem mode.\n"); -- dev_m2m = true; -- } -- -- if (dev_m2m) { -- at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M); -- at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M); -- } else { -- at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M); -- at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M); -- } --} -- - static int at_xdmac_probe(struct platform_device *pdev) - { - struct at_xdmac *atxdmac; -diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c -index 2fd87f83cf90b..e169f18da551f 100644 ---- a/drivers/dma/bestcomm/ata.c -+++ b/drivers/dma/bestcomm/ata.c -@@ -133,7 +133,7 @@ void bcom_ata_reset_bd(struct bcom_task *tsk) - struct bcom_ata_var *var; - - /* Reset all BD */ -- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); -+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - tsk->index = 0; - tsk->outdex = 0; -diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c -index d91cbbe7a48fb..8c42e5ca00a99 100644 ---- a/drivers/dma/bestcomm/bestcomm.c -+++ b/drivers/dma/bestcomm/bestcomm.c -@@ -95,7 +95,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size) - tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); - if (!tsk->bd) - goto error; -- memset(tsk->bd, 0x00, bd_count * bd_size); -+ memset_io(tsk->bd, 0x00, bd_count * bd_size); - - tsk->num_bd = bd_count; - tsk->bd_size = bd_size; -@@ -186,16 +186,16 @@ bcom_load_image(int task, u32 *task_image) - inc = bcom_task_inc(task); - - /* Clear & copy */ -- memset(var, 0x00, BCOM_VAR_SIZE); -- memset(inc, 0x00, BCOM_INC_SIZE); -+ memset_io(var, 0x00, BCOM_VAR_SIZE); -+ memset_io(inc, 0x00, BCOM_INC_SIZE); - - desc_src = (u32 *)(hdr + 1); - var_src = desc_src + hdr->desc_size; - inc_src = var_src + hdr->var_size; - -- memcpy(desc, desc_src, hdr->desc_size * sizeof(u32)); -- memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); -- memcpy(inc, inc_src, hdr->inc_size * sizeof(u32)); -+ memcpy_toio(desc, desc_src, hdr->desc_size * sizeof(u32)); -+ memcpy_toio(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); -+ memcpy_toio(inc, inc_src, hdr->inc_size * sizeof(u32)); - - return 0; - } -@@ -302,13 +302,13 @@ static int bcom_engine_init(void) - return -ENOMEM; - } - -- memset(bcom_eng->tdt, 0x00, tdt_size); -- memset(bcom_eng->ctx, 0x00, ctx_size); -- memset(bcom_eng->var, 0x00, var_size); -- memset(bcom_eng->fdt, 0x00, fdt_size); -+ memset_io(bcom_eng->tdt, 0x00, tdt_size); -+ memset_io(bcom_eng->ctx, 0x00, ctx_size); -+ memset_io(bcom_eng->var, 0x00, var_size); -+ memset_io(bcom_eng->fdt, 0x00, fdt_size); - - /* Copy the FDT for the EU#3 */ -- memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); -+ memcpy_toio(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); - - /* Initialize Task base structure */ - for (task=0; taskindex = 0; - tsk->outdex = 0; - -- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); -+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - /* Configure some stuff */ - bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA); -@@ -241,7 +241,7 @@ bcom_fec_tx_reset(struct bcom_task *tsk) - tsk->index = 0; - tsk->outdex = 0; - -- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); -+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - /* Configure some stuff */ - bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA); -diff --git a/drivers/dma/bestcomm/gen_bd.c b/drivers/dma/bestcomm/gen_bd.c -index 906ddba6a6f5d..8a24a5cbc2633 100644 ---- a/drivers/dma/bestcomm/gen_bd.c -+++ b/drivers/dma/bestcomm/gen_bd.c -@@ -142,7 +142,7 @@ bcom_gen_bd_rx_reset(struct bcom_task *tsk) - tsk->index = 0; - tsk->outdex = 0; - -- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); -+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - /* Configure some stuff */ - bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA); -@@ -226,7 +226,7 @@ bcom_gen_bd_tx_reset(struct bcom_task *tsk) - tsk->index = 0; - tsk->outdex = 0; - -- memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); -+ memset_io(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); - - /* Configure some stuff */ - bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA); -diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c -index af3ee288bc117..4ec7bb58c195f 100644 ---- a/drivers/dma/dmaengine.c -+++ b/drivers/dma/dmaengine.c -@@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan) - /* The channel is already in use, update client count */ - if (chan->client_count) { - __module_get(owner); -- goto out; -+ chan->client_count++; -+ return 0; - } - - if (!try_module_get(owner)) -@@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan) - goto err_out; - } - -+ chan->client_count++; -+ - if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) - balance_ref_count(chan); - --out: -- chan->client_count++; - return 0; - - err_out: -diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h -index 1bfbd64b13717..53f16d3f00294 100644 ---- a/drivers/dma/dmaengine.h -+++ b/drivers/dma/dmaengine.h -@@ -176,7 +176,7 @@ dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, - static inline bool - dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) - { -- return (cb->callback) ? true : false; -+ return cb->callback || cb->callback_result; - } - - struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); -diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c -index 35993ab921547..cfc47efcb5d93 100644 ---- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c -+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c -@@ -288,8 +288,6 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, - len = vd_to_axi_desc(vdesc)->hw_desc[0].len; - completed_length = completed_blocks * len; - bytes = length - completed_length; -- } else { -- bytes = vd_to_axi_desc(vdesc)->length; - } - - spin_unlock_irqrestore(&chan->vc.lock, flags); -@@ -944,6 +942,11 @@ static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan, - static void axi_chan_dump_lli(struct axi_dma_chan *chan, - struct axi_dma_hw_desc *desc) - { -+ if (!desc->lli) { -+ dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n"); -+ return; -+ } -+ - dev_err(dchan2dev(&chan->vc.chan), - "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x", - le64_to_cpu(desc->lli->sar), -@@ -975,6 +978,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) - - /* The bad descriptor currently is in the head of vc list */ - vd = vchan_next_desc(&chan->vc); -+ if (!vd) { -+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n", -+ axi_chan_name(chan)); -+ goto out; -+ } - /* Remove the completed descriptor from issued list */ - list_del(&vd->node); - -@@ -989,6 +997,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) - /* Try to restart the controller */ - axi_chan_start_first_queued(chan); - -+out: - spin_unlock_irqrestore(&chan->vc.lock, flags); - } - -@@ -1011,6 +1020,11 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) - - /* The completed descriptor currently is in the head of vc list */ - vd = vchan_next_desc(&chan->vc); -+ if (!vd) { -+ dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n", -+ axi_chan_name(chan)); -+ goto out; -+ } - - if (chan->cyclic) { - desc = vd_to_axi_desc(vd); -@@ -1040,6 +1054,7 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) - axi_chan_start_first_queued(chan); - } - -+out: - spin_unlock_irqrestore(&chan->vc.lock, flags); - } - -diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c -index 53289927dd0d6..799ebbaf35be5 100644 ---- a/drivers/dma/dw-edma/dw-edma-core.c -+++ b/drivers/dma/dw-edma/dw-edma-core.c -@@ -171,7 +171,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc) - dw_edma_free_desc(vd2dw_edma_desc(vdesc)); - } - --static void dw_edma_start_transfer(struct dw_edma_chan *chan) -+static int dw_edma_start_transfer(struct dw_edma_chan *chan) - { - struct dw_edma_chunk *child; - struct dw_edma_desc *desc; -@@ -179,16 +179,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan) - - vd = vchan_next_desc(&chan->vc); - if (!vd) -- return; -+ return 0; - - desc = vd2dw_edma_desc(vd); - if (!desc) -- return; -+ return 0; - - child = list_first_entry_or_null(&desc->chunk->list, - struct dw_edma_chunk, list); - if (!child) -- return; -+ return 0; - - dw_edma_v0_core_start(child, !desc->xfer_sz); - desc->xfer_sz += child->ll_region.sz; -@@ -196,6 +196,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan) - list_del(&child->list); - kfree(child); - desc->chunks_alloc--; -+ -+ return 1; - } - - static int dw_edma_device_config(struct dma_chan *dchan, -@@ -279,9 +281,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan) - struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); - unsigned long flags; - -+ if (!chan->configured) -+ return; -+ - spin_lock_irqsave(&chan->vc.lock, flags); -- if (chan->configured && chan->request == EDMA_REQ_NONE && -- chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) { -+ if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE && -+ chan->status == EDMA_ST_IDLE) { - chan->status = EDMA_ST_BUSY; - dw_edma_start_transfer(chan); - } -@@ -424,7 +429,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) - chunk->ll_region.sz += burst->sz; - desc->alloc_sz += burst->sz; - -- if (chan->dir == EDMA_DIR_WRITE) { -+ if (dir == DMA_DEV_TO_MEM) { - burst->sar = src_addr; - if (xfer->type == EDMA_XFER_CYCLIC) { - burst->dar = xfer->xfer.cyclic.paddr; -@@ -438,6 +443,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) - * and destination addresses are increased - * by the same portion (data length) - */ -+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) { -+ burst->dar = dst_addr; - } - } else { - burst->dar = dst_addr; -@@ -453,6 +460,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) - * and destination addresses are increased - * by the same portion (data length) - */ -+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) { -+ burst->sar = src_addr; - } - } - -@@ -551,14 +560,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan) - switch (chan->request) { - case EDMA_REQ_NONE: - desc = vd2dw_edma_desc(vd); -- if (desc->chunks_alloc) { -- chan->status = EDMA_ST_BUSY; -- dw_edma_start_transfer(chan); -- } else { -+ if (!desc->chunks_alloc) { - list_del(&vd->node); - vchan_cookie_complete(vd); -- chan->status = EDMA_ST_IDLE; - } -+ -+ /* Continue transferring if there are remaining chunks or issued requests. -+ */ -+ chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE; - break; - - case EDMA_REQ_STOP: -diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c -index 329fc2e57b703..043a4f3115fa3 100644 ---- a/drivers/dma/dw-edma/dw-edma-v0-core.c -+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c -@@ -192,7 +192,7 @@ static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, - static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, - const void __iomem *addr) - { -- u32 value; -+ u64 value; - - if (dw->mf == EDMA_MF_EDMA_LEGACY) { - u32 viewport_sel; -@@ -415,8 +415,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) - (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); - /* Linked list */ - #ifdef CONFIG_64BIT -- SET_CH_64(dw, chan->dir, chan->id, llp.reg, -- chunk->ll_region.paddr); -+ /* llp is not aligned on 64bit -> keep 32bit accesses */ -+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb, -+ lower_32_bits(chunk->ll_region.paddr)); -+ SET_CH_32(dw, chan->dir, chan->id, llp.msb, -+ upper_32_bits(chunk->ll_region.paddr)); - #else /* CONFIG_64BIT */ - SET_CH_32(dw, chan->dir, chan->id, llp.lsb, - lower_32_bits(chunk->ll_region.paddr)); -diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c -index c855a0e4f9ff4..df6be7ca340cd 100644 ---- a/drivers/dma/hisi_dma.c -+++ b/drivers/dma/hisi_dma.c -@@ -30,7 +30,7 @@ - #define HISI_DMA_MODE 0x217c - #define HISI_DMA_OFFSET 0x100 - --#define HISI_DMA_MSI_NUM 30 -+#define HISI_DMA_MSI_NUM 32 - #define HISI_DMA_CHAN_NUM 30 - #define HISI_DMA_Q_DEPTH_VAL 1024 - -@@ -180,7 +180,8 @@ static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index) - hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0); - } - --static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) -+static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan, -+ bool disable) - { - struct hisi_dma_dev *hdma_dev = chan->hdma_dev; - u32 index = chan->qp_num, tmp; -@@ -201,8 +202,11 @@ static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) - hisi_dma_do_reset(hdma_dev, index); - hisi_dma_reset_qp_point(hdma_dev, index); - hisi_dma_pause_dma(hdma_dev, index, false); -- hisi_dma_enable_dma(hdma_dev, index, true); -- hisi_dma_unmask_irq(hdma_dev, index); -+ -+ if (!disable) { -+ hisi_dma_enable_dma(hdma_dev, index, true); -+ hisi_dma_unmask_irq(hdma_dev, index); -+ } - - ret = readl_relaxed_poll_timeout(hdma_dev->base + - HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, -@@ -218,7 +222,7 @@ static void hisi_dma_free_chan_resources(struct dma_chan *c) - struct hisi_dma_chan *chan = to_hisi_dma_chan(c); - struct hisi_dma_dev *hdma_dev = chan->hdma_dev; - -- hisi_dma_reset_hw_chan(chan); -+ hisi_dma_reset_or_disable_hw_chan(chan, false); - vchan_free_chan_resources(&chan->vc); - - memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth); -@@ -267,7 +271,6 @@ static void hisi_dma_start_transfer(struct hisi_dma_chan *chan) - - vd = vchan_next_desc(&chan->vc); - if (!vd) { -- dev_err(&hdma_dev->pdev->dev, "no issued task!\n"); - chan->desc = NULL; - return; - } -@@ -299,7 +302,7 @@ static void hisi_dma_issue_pending(struct dma_chan *c) - - spin_lock_irqsave(&chan->vc.lock, flags); - -- if (vchan_issue_pending(&chan->vc)) -+ if (vchan_issue_pending(&chan->vc) && !chan->desc) - hisi_dma_start_transfer(chan); - - spin_unlock_irqrestore(&chan->vc.lock, flags); -@@ -394,7 +397,7 @@ static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) - - static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) - { -- hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]); -+ hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true); - } - - static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev) -@@ -432,18 +435,15 @@ static irqreturn_t hisi_dma_irq(int irq, void *data) - desc = chan->desc; - cqe = chan->cq + chan->cq_head; - if (desc) { -+ chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth; -+ hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, -+ chan->qp_num, chan->cq_head); - if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) { -- chan->cq_head = (chan->cq_head + 1) % -- hdma_dev->chan_depth; -- hisi_dma_chan_write(hdma_dev->base, -- HISI_DMA_CQ_HEAD_PTR, chan->qp_num, -- chan->cq_head); - vchan_cookie_complete(&desc->vd); -+ hisi_dma_start_transfer(chan); - } else { - dev_err(&hdma_dev->pdev->dev, "task error!\n"); - } -- -- chan->desc = NULL; - } - - spin_unlock(&chan->vc.lock); -diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c -index b9b2b4a4124ee..033df43db0cec 100644 ---- a/drivers/dma/idxd/cdev.c -+++ b/drivers/dma/idxd/cdev.c -@@ -369,10 +369,16 @@ int idxd_cdev_register(void) - rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, - ictx[i].name); - if (rc) -- return rc; -+ goto err_free_chrdev_region; - } - - return 0; -+ -+err_free_chrdev_region: -+ for (i--; i >= 0; i--) -+ unregister_chrdev_region(ictx[i].devt, MINORMASK); -+ -+ return rc; - } - - void idxd_cdev_remove(void) -diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c -index 83a5ff2ecf2a0..535f021911c55 100644 ---- a/drivers/dma/idxd/device.c -+++ b/drivers/dma/idxd/device.c -@@ -394,8 +394,6 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq) - lockdep_assert_held(&wq->wq_lock); - memset(wq->wqcfg, 0, idxd->wqcfg_size); - wq->type = IDXD_WQT_NONE; -- wq->size = 0; -- wq->group = NULL; - wq->threshold = 0; - wq->priority = 0; - wq->ats_dis = 0; -@@ -404,6 +402,14 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq) - memset(wq->name, 0, WQ_NAME_SIZE); - } - -+static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq) -+{ -+ lockdep_assert_held(&wq->wq_lock); -+ -+ wq->size = 0; -+ wq->group = NULL; -+} -+ - static void idxd_wq_ref_release(struct percpu_ref *ref) - { - struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active); -@@ -427,7 +433,6 @@ void idxd_wq_quiesce(struct idxd_wq *wq) - { - percpu_ref_kill(&wq->wq_active); - wait_for_completion(&wq->wq_dead); -- percpu_ref_exit(&wq->wq_active); - } - - /* Device control bits */ -@@ -584,6 +589,8 @@ void idxd_device_reset(struct idxd_device *idxd) - spin_lock(&idxd->dev_lock); - idxd_device_clear_state(idxd); - idxd->state = IDXD_DEV_DISABLED; -+ idxd_unmask_error_interrupts(idxd); -+ idxd_msix_perm_setup(idxd); - spin_unlock(&idxd->dev_lock); - } - -@@ -692,11 +699,16 @@ static void idxd_groups_clear_state(struct idxd_device *idxd) - memset(&group->grpcfg, 0, sizeof(group->grpcfg)); - group->num_engines = 0; - group->num_wqs = 0; -- group->use_token_limit = false; -- group->tokens_allowed = 0; -- group->tokens_reserved = 0; -- group->tc_a = -1; -- group->tc_b = -1; -+ group->use_rdbuf_limit = false; -+ group->rdbufs_allowed = 0; -+ group->rdbufs_reserved = 0; -+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { -+ group->tc_a = 1; -+ group->tc_b = 1; -+ } else { -+ group->tc_a = -1; -+ group->tc_b = -1; -+ } - } - } - -@@ -708,15 +720,16 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) - for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = idxd->wqs[i]; - -- if (wq->state == IDXD_WQ_ENABLED) { -- idxd_wq_disable_cleanup(wq); -- wq->state = IDXD_WQ_DISABLED; -- } -+ idxd_wq_disable_cleanup(wq); -+ idxd_wq_device_reset_cleanup(wq); - } - } - - void idxd_device_clear_state(struct idxd_device *idxd) - { -+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) -+ return; -+ - idxd_groups_clear_state(idxd); - idxd_engines_clear_state(idxd); - idxd_device_wqs_clear_state(idxd); -@@ -791,10 +804,10 @@ static int idxd_groups_config_write(struct idxd_device *idxd) - int i; - struct device *dev = &idxd->pdev->dev; - -- /* Setup bandwidth token limit */ -- if (idxd->token_limit) { -+ /* Setup bandwidth rdbuf limit */ -+ if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { - reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); -- reg.token_limit = idxd->token_limit; -+ reg.rdbuf_limit = idxd->rdbuf_limit; - iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); - } - -@@ -935,13 +948,12 @@ static void idxd_group_flags_setup(struct idxd_device *idxd) - group->tc_b = group->grpcfg.flags.tc_b = 1; - else - group->grpcfg.flags.tc_b = group->tc_b; -- group->grpcfg.flags.use_token_limit = group->use_token_limit; -- group->grpcfg.flags.tokens_reserved = group->tokens_reserved; -- if (group->tokens_allowed) -- group->grpcfg.flags.tokens_allowed = -- group->tokens_allowed; -+ group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; -+ group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; -+ if (group->rdbufs_allowed) -+ group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; - else -- group->grpcfg.flags.tokens_allowed = idxd->max_tokens; -+ group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs; - } - } - -@@ -1136,7 +1148,7 @@ int idxd_device_load_config(struct idxd_device *idxd) - int i, rc; - - reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); -- idxd->token_limit = reg.token_limit; -+ idxd->rdbuf_limit = reg.rdbuf_limit; - - for (i = 0; i < idxd->max_groups; i++) { - struct idxd_group *group = idxd->groups[i]; -@@ -1236,8 +1248,7 @@ int __drv_enable_wq(struct idxd_wq *wq) - return 0; - - err_map_portal: -- rc = idxd_wq_disable(wq, false); -- if (rc < 0) -+ if (idxd_wq_disable(wq, false)) - dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq))); - err: - return rc; -diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c -index e0f056c1d1f56..29af898f3c242 100644 ---- a/drivers/dma/idxd/dma.c -+++ b/drivers/dma/idxd/dma.c -@@ -77,6 +77,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq, - hw->completion_addr = compl; - } - -+static struct dma_async_tx_descriptor * -+idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags) -+{ -+ struct idxd_wq *wq = to_idxd_wq(c); -+ u32 desc_flags; -+ struct idxd_desc *desc; -+ -+ if (wq->state != IDXD_WQ_ENABLED) -+ return NULL; -+ -+ op_flag_setup(flags, &desc_flags); -+ desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); -+ if (IS_ERR(desc)) -+ return NULL; -+ -+ idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, -+ 0, 0, 0, desc->compl_dma, desc_flags); -+ desc->txd.flags = flags; -+ return &desc->txd; -+} -+ - static struct dma_async_tx_descriptor * - idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags) -@@ -181,10 +202,12 @@ int idxd_register_dma_device(struct idxd_device *idxd) - INIT_LIST_HEAD(&dma->channels); - dma->dev = dev; - -+ dma_cap_set(DMA_INTERRUPT, dma->cap_mask); - dma_cap_set(DMA_PRIVATE, dma->cap_mask); - dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); - dma->device_release = idxd_dma_release; - -+ dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt; - if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { - dma_cap_set(DMA_MEMCPY, dma->cap_mask); - dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; -@@ -311,6 +334,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) - - err_dma: - idxd_wq_quiesce(wq); -+ percpu_ref_exit(&wq->wq_active); - err_ref: - idxd_wq_free_resources(wq); - err_res_alloc: -@@ -328,9 +352,9 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) - mutex_lock(&wq->wq_lock); - idxd_wq_quiesce(wq); - idxd_unregister_dma_channel(wq); -- __drv_disable_wq(wq); - idxd_wq_free_resources(wq); -- wq->type = IDXD_WQT_NONE; -+ __drv_disable_wq(wq); -+ percpu_ref_exit(&wq->wq_active); - mutex_unlock(&wq->wq_lock); - } - -diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h -index bfcb03329f778..833af18a99ee5 100644 ---- a/drivers/dma/idxd/idxd.h -+++ b/drivers/dma/idxd/idxd.h -@@ -84,9 +84,9 @@ struct idxd_group { - int id; - int num_engines; - int num_wqs; -- bool use_token_limit; -- u8 tokens_allowed; -- u8 tokens_reserved; -+ bool use_rdbuf_limit; -+ u8 rdbufs_allowed; -+ u8 rdbufs_reserved; - int tc_a; - int tc_b; - }; -@@ -278,11 +278,11 @@ struct idxd_device { - u32 max_batch_size; - int max_groups; - int max_engines; -- int max_tokens; -+ int max_rdbufs; - int max_wqs; - int max_wq_size; -- int token_limit; -- int nr_tokens; /* non-reserved tokens */ -+ int rdbuf_limit; -+ int nr_rdbufs; /* non-reserved read buffers */ - unsigned int wqcfg_size; - - union sw_err_reg sw_err; -diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c -index eb09bc591c316..e0e0c7f286b67 100644 ---- a/drivers/dma/idxd/init.c -+++ b/drivers/dma/idxd/init.c -@@ -340,7 +340,7 @@ static int idxd_setup_groups(struct idxd_device *idxd) - } - - idxd->groups[i] = group; -- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { -+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { - group->tc_a = 1; - group->tc_b = 1; - } else { -@@ -464,9 +464,9 @@ static void idxd_read_caps(struct idxd_device *idxd) - dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); - idxd->max_groups = idxd->hw.group_cap.num_groups; - dev_dbg(dev, "max groups: %u\n", idxd->max_groups); -- idxd->max_tokens = idxd->hw.group_cap.total_tokens; -- dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); -- idxd->nr_tokens = idxd->max_tokens; -+ idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; -+ dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); -+ idxd->nr_rdbufs = idxd->max_rdbufs; - - /* read engine capabilities */ - idxd->hw.engine_cap.bits = -@@ -797,11 +797,19 @@ static void idxd_remove(struct pci_dev *pdev) - int msixcnt = pci_msix_vec_count(pdev); - int i; - -- dev_dbg(&pdev->dev, "%s called\n", __func__); -+ idxd_unregister_devices(idxd); -+ /* -+ * When ->release() is called for the idxd->conf_dev, it frees all the memory related -+ * to the idxd context. The driver still needs those bits in order to do the rest of -+ * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref -+ * on the device here to hold off the freeing while allowing the idxd sub-driver -+ * to unbind. -+ */ -+ get_device(idxd_confdev(idxd)); -+ device_unregister(idxd_confdev(idxd)); - idxd_shutdown(pdev); - if (device_pasid_enabled(idxd)) - idxd_disable_system_pasid(idxd); -- idxd_unregister_devices(idxd); - - for (i = 0; i < msixcnt; i++) { - irq_entry = &idxd->irq_entries[i]; -@@ -815,7 +823,7 @@ static void idxd_remove(struct pci_dev *pdev) - pci_disable_device(pdev); - destroy_workqueue(idxd->wq); - perfmon_pmu_remove(idxd); -- device_unregister(idxd_confdev(idxd)); -+ put_device(idxd_confdev(idxd)); - } - - static struct pci_driver idxd_pci_driver = { -diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c -index ca88fa7a328e7..6d6af0dc3c0ec 100644 ---- a/drivers/dma/idxd/irq.c -+++ b/drivers/dma/idxd/irq.c -@@ -63,6 +63,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) - int i; - bool err = false; - -+ if (cause & IDXD_INTC_HALT_STATE) -+ goto halt; -+ - if (cause & IDXD_INTC_ERR) { - spin_lock(&idxd->dev_lock); - for (i = 0; i < 4; i++) -@@ -121,6 +124,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) - if (!err) - return 0; - -+halt: - gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); - if (gensts.state == IDXD_DEVICE_STATE_HALT) { - idxd->state = IDXD_DEV_HALTED; -@@ -133,9 +137,10 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) - INIT_WORK(&idxd->work, idxd_device_reinit); - queue_work(idxd->wq, &idxd->work); - } else { -- spin_lock(&idxd->dev_lock); -+ idxd->state = IDXD_DEV_HALTED; - idxd_wqs_quiesce(idxd); - idxd_wqs_unmap_portal(idxd); -+ spin_lock(&idxd->dev_lock); - idxd_device_clear_state(idxd); - dev_err(&idxd->pdev->dev, - "idxd halted, need %s.\n", -diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h -index ffc7550a77eeb..c0961c1ac161d 100644 ---- a/drivers/dma/idxd/registers.h -+++ b/drivers/dma/idxd/registers.h -@@ -65,9 +65,9 @@ union wq_cap_reg { - union group_cap_reg { - struct { - u64 num_groups:8; -- u64 total_tokens:8; -- u64 token_en:1; -- u64 token_limit:1; -+ u64 total_rdbufs:8; /* formerly total_tokens */ -+ u64 rdbuf_ctrl:1; /* formerly token_en */ -+ u64 rdbuf_limit:1; /* formerly token_limit */ - u64 rsvd:46; - }; - u64 bits; -@@ -111,7 +111,7 @@ union offsets_reg { - #define IDXD_GENCFG_OFFSET 0x80 - union gencfg_reg { - struct { -- u32 token_limit:8; -+ u32 rdbuf_limit:8; - u32 rsvd:4; - u32 user_int_en:1; - u32 rsvd2:19; -@@ -158,6 +158,7 @@ enum idxd_device_reset_type { - #define IDXD_INTC_CMD 0x02 - #define IDXD_INTC_OCCUPY 0x04 - #define IDXD_INTC_PERFMON_OVFL 0x08 -+#define IDXD_INTC_HALT_STATE 0x10 - - #define IDXD_CMD_OFFSET 0xa0 - union idxd_command_reg { -@@ -287,10 +288,10 @@ union group_flags { - u32 tc_a:3; - u32 tc_b:3; - u32 rsvd:1; -- u32 use_token_limit:1; -- u32 tokens_reserved:8; -+ u32 use_rdbuf_limit:1; -+ u32 rdbufs_reserved:8; - u32 rsvd2:4; -- u32 tokens_allowed:8; -+ u32 rdbufs_allowed:8; - u32 rsvd3:4; - }; - u32 bits; -diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c -index de76fb4abac24..83452fbbb168b 100644 ---- a/drivers/dma/idxd/submit.c -+++ b/drivers/dma/idxd/submit.c -@@ -106,6 +106,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, - { - struct idxd_desc *d, *t, *found = NULL; - struct llist_node *head; -+ LIST_HEAD(flist); - - desc->completion->status = IDXD_COMP_DESC_ABORT; - /* -@@ -120,7 +121,11 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, - found = desc; - continue; - } -- list_add_tail(&desc->list, &ie->work_list); -+ -+ if (d->completion->status) -+ list_add_tail(&d->list, &flist); -+ else -+ list_add_tail(&d->list, &ie->work_list); - } - } - -@@ -130,6 +135,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, - - if (found) - complete_desc(found, IDXD_COMPLETE_ABORT); -+ -+ /* -+ * complete_desc() will return desc to allocator and the desc can be -+ * acquired by a different process and the desc->list can be modified. -+ * Delete desc from list so the list trasversing does not get corrupted -+ * by the other process. -+ */ -+ list_for_each_entry_safe(d, t, &flist, list) { -+ list_del_init(&d->list); -+ complete_desc(d, IDXD_COMPLETE_NORMAL); -+ } - } - - int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) -diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c -index a9025be940db2..489a9d8850764 100644 ---- a/drivers/dma/idxd/sysfs.c -+++ b/drivers/dma/idxd/sysfs.c -@@ -99,17 +99,17 @@ struct device_type idxd_engine_device_type = { - - /* Group attributes */ - --static void idxd_set_free_tokens(struct idxd_device *idxd) -+static void idxd_set_free_rdbufs(struct idxd_device *idxd) - { -- int i, tokens; -+ int i, rdbufs; - -- for (i = 0, tokens = 0; i < idxd->max_groups; i++) { -+ for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { - struct idxd_group *g = idxd->groups[i]; - -- tokens += g->tokens_reserved; -+ rdbufs += g->rdbufs_reserved; - } - -- idxd->nr_tokens = idxd->max_tokens - tokens; -+ idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; - } - - static ssize_t group_tokens_reserved_show(struct device *dev, -@@ -118,7 +118,7 @@ static ssize_t group_tokens_reserved_show(struct device *dev, - { - struct idxd_group *group = confdev_to_group(dev); - -- return sysfs_emit(buf, "%u\n", group->tokens_reserved); -+ return sysfs_emit(buf, "%u\n", group->rdbufs_reserved); - } - - static ssize_t group_tokens_reserved_store(struct device *dev, -@@ -143,14 +143,14 @@ static ssize_t group_tokens_reserved_store(struct device *dev, - if (idxd->state == IDXD_DEV_ENABLED) - return -EPERM; - -- if (val > idxd->max_tokens) -+ if (val > idxd->max_rdbufs) - return -EINVAL; - -- if (val > idxd->nr_tokens + group->tokens_reserved) -+ if (val > idxd->nr_rdbufs + group->rdbufs_reserved) - return -EINVAL; - -- group->tokens_reserved = val; -- idxd_set_free_tokens(idxd); -+ group->rdbufs_reserved = val; -+ idxd_set_free_rdbufs(idxd); - return count; - } - -@@ -164,7 +164,7 @@ static ssize_t group_tokens_allowed_show(struct device *dev, - { - struct idxd_group *group = confdev_to_group(dev); - -- return sysfs_emit(buf, "%u\n", group->tokens_allowed); -+ return sysfs_emit(buf, "%u\n", group->rdbufs_allowed); - } - - static ssize_t group_tokens_allowed_store(struct device *dev, -@@ -190,10 +190,10 @@ static ssize_t group_tokens_allowed_store(struct device *dev, - return -EPERM; - - if (val < 4 * group->num_engines || -- val > group->tokens_reserved + idxd->nr_tokens) -+ val > group->rdbufs_reserved + idxd->nr_rdbufs) - return -EINVAL; - -- group->tokens_allowed = val; -+ group->rdbufs_allowed = val; - return count; - } - -@@ -207,7 +207,7 @@ static ssize_t group_use_token_limit_show(struct device *dev, - { - struct idxd_group *group = confdev_to_group(dev); - -- return sysfs_emit(buf, "%u\n", group->use_token_limit); -+ return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit); - } - - static ssize_t group_use_token_limit_store(struct device *dev, -@@ -232,10 +232,10 @@ static ssize_t group_use_token_limit_store(struct device *dev, - if (idxd->state == IDXD_DEV_ENABLED) - return -EPERM; - -- if (idxd->token_limit == 0) -+ if (idxd->rdbuf_limit == 0) - return -EPERM; - -- group->use_token_limit = !!val; -+ group->use_rdbuf_limit = !!val; - return count; - } - -@@ -327,7 +327,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev, - if (idxd->state == IDXD_DEV_ENABLED) - return -EPERM; - -- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) -+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) - return -EPERM; - - if (val < 0 || val > 7) -@@ -369,7 +369,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev, - if (idxd->state == IDXD_DEV_ENABLED) - return -EPERM; - -- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) -+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) - return -EPERM; - - if (val < 0 || val > 7) -@@ -842,6 +842,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr - u64 xfer_size; - int rc; - -+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) -+ return -EPERM; -+ - if (wq->state != IDXD_WQ_DISABLED) - return -EPERM; - -@@ -876,6 +879,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu - u64 batch_size; - int rc; - -+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) -+ return -EPERM; -+ - if (wq->state != IDXD_WQ_DISABLED) - return -EPERM; - -@@ -1161,7 +1167,7 @@ static ssize_t max_tokens_show(struct device *dev, - { - struct idxd_device *idxd = confdev_to_idxd(dev); - -- return sysfs_emit(buf, "%u\n", idxd->max_tokens); -+ return sysfs_emit(buf, "%u\n", idxd->max_rdbufs); - } - static DEVICE_ATTR_RO(max_tokens); - -@@ -1170,7 +1176,7 @@ static ssize_t token_limit_show(struct device *dev, - { - struct idxd_device *idxd = confdev_to_idxd(dev); - -- return sysfs_emit(buf, "%u\n", idxd->token_limit); -+ return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit); - } - - static ssize_t token_limit_store(struct device *dev, -@@ -1191,13 +1197,13 @@ static ssize_t token_limit_store(struct device *dev, - if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) - return -EPERM; - -- if (!idxd->hw.group_cap.token_limit) -+ if (!idxd->hw.group_cap.rdbuf_limit) - return -EPERM; - -- if (val > idxd->hw.group_cap.total_tokens) -+ if (val > idxd->hw.group_cap.total_rdbufs) - return -EINVAL; - -- idxd->token_limit = val; -+ idxd->rdbuf_limit = val; - return count; - } - static DEVICE_ATTR_RW(token_limit); -diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c -index 2ddc31e64db03..da31e73d24d4c 100644 ---- a/drivers/dma/imx-dma.c -+++ b/drivers/dma/imx-dma.c -@@ -1047,7 +1047,7 @@ static int __init imxdma_probe(struct platform_device *pdev) - return -ENOMEM; - - imxdma->dev = &pdev->dev; -- imxdma->devtype = (enum imx_dma_type)of_device_get_match_data(&pdev->dev); -+ imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - imxdma->base = devm_ioremap_resource(&pdev->dev, res); -diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c -index cacc725ca5459..292f4c9a963dd 100644 ---- a/drivers/dma/imx-sdma.c -+++ b/drivers/dma/imx-sdma.c -@@ -198,12 +198,12 @@ struct sdma_script_start_addrs { - s32 per_2_firi_addr; - s32 mcu_2_firi_addr; - s32 uart_2_per_addr; -- s32 uart_2_mcu_ram_addr; -+ s32 uart_2_mcu_addr; - s32 per_2_app_addr; - s32 mcu_2_app_addr; - s32 per_2_per_addr; - s32 uartsh_2_per_addr; -- s32 uartsh_2_mcu_ram_addr; -+ s32 uartsh_2_mcu_addr; - s32 per_2_shp_addr; - s32 mcu_2_shp_addr; - s32 ata_2_mcu_addr; -@@ -232,8 +232,8 @@ struct sdma_script_start_addrs { - s32 mcu_2_ecspi_addr; - s32 mcu_2_sai_addr; - s32 sai_2_mcu_addr; -- s32 uart_2_mcu_addr; -- s32 uartsh_2_mcu_addr; -+ s32 uart_2_mcu_rom_addr; -+ s32 uartsh_2_mcu_rom_addr; - /* End of v3 array */ - s32 mcu_2_zqspi_addr; - /* End of v4 array */ -@@ -1428,10 +1428,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, - sdma_config_ownership(sdmac, false, true, false); - - if (sdma_load_context(sdmac)) -- goto err_desc_out; -+ goto err_bd_out; - - return desc; - -+err_bd_out: -+ sdma_free_bd(desc); - err_desc_out: - kfree(desc); - err_out: -@@ -1780,17 +1782,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma, - saddr_arr[i] = addr_arr[i]; - - /* -- * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because -- * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr -- * to be compatible with legacy freescale/nxp sdma firmware, and they -- * are located in the bottom part of sdma_script_start_addrs which are -- * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1. -+ * For compatibility with NXP internal legacy kernel before 4.19 which -+ * is based on uart ram script and mainline kernel based on uart rom -+ * script, both uart ram/rom scripts are present in newer sdma -+ * firmware. Use the rom versions if they are present (V3 or newer). - */ -- if (addr->uart_2_mcu_addr) -- sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr; -- if (addr->uartsh_2_mcu_addr) -- sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr; -- -+ if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) { -+ if (addr->uart_2_mcu_rom_addr) -+ sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr; -+ if (addr->uartsh_2_mcu_rom_addr) -+ sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr; -+ } - } - - static void sdma_load_firmware(const struct firmware *fw, void *context) -@@ -1869,7 +1871,7 @@ static int sdma_event_remap(struct sdma_engine *sdma) - u32 reg, val, shift, num_map, i; - int ret = 0; - -- if (IS_ERR(np) || IS_ERR(gpr_np)) -+ if (IS_ERR(np) || !gpr_np) - goto out; - - event_remap = of_find_property(np, propname, NULL); -@@ -1917,7 +1919,7 @@ static int sdma_event_remap(struct sdma_engine *sdma) - } - - out: -- if (!IS_ERR(gpr_np)) -+ if (gpr_np) - of_node_put(gpr_np); - - return ret; -@@ -2264,7 +2266,7 @@ MODULE_DESCRIPTION("i.MX SDMA driver"); - #if IS_ENABLED(CONFIG_SOC_IMX6Q) - MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); - #endif --#if IS_ENABLED(CONFIG_SOC_IMX7D) -+#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M) - MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); - #endif - MODULE_LICENSE("GPL"); -diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c -index 37ff4ec7db76f..e2070df6cad28 100644 ---- a/drivers/dma/ioat/dma.c -+++ b/drivers/dma/ioat/dma.c -@@ -656,7 +656,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) - if (active - i == 0) { - dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", - __func__); -- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); -+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); - } - - /* microsecond delay by sysfs variable per pending descriptor */ -@@ -682,7 +682,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan) - - if (chanerr & - (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) { -- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); -+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); - ioat_eh(ioat_chan); - } - } -@@ -879,7 +879,7 @@ static void check_active(struct ioatdma_chan *ioat_chan) - } - - if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) -- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); -+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); - } - - static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan) -diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c -index efe8bd3a0e2aa..1709d159af7e0 100644 ---- a/drivers/dma/lgm/lgm-dma.c -+++ b/drivers/dma/lgm/lgm-dma.c -@@ -914,7 +914,7 @@ static void ldma_dev_init(struct ldma_dev *d) - } - } - --static int ldma_cfg_init(struct ldma_dev *d) -+static int ldma_parse_dt(struct ldma_dev *d) - { - struct fwnode_handle *fwnode = dev_fwnode(d->dev); - struct ldma_port *p; -@@ -1593,11 +1593,12 @@ static int intel_ldma_probe(struct platform_device *pdev) - d->core_clk = devm_clk_get_optional(dev, NULL); - if (IS_ERR(d->core_clk)) - return PTR_ERR(d->core_clk); -- clk_prepare_enable(d->core_clk); - - d->rst = devm_reset_control_get_optional(dev, NULL); - if (IS_ERR(d->rst)) - return PTR_ERR(d->rst); -+ -+ clk_prepare_enable(d->core_clk); - reset_control_deassert(d->rst); - - ret = devm_add_action_or_reset(dev, ldma_clk_disable, d); -@@ -1660,10 +1661,6 @@ static int intel_ldma_probe(struct platform_device *pdev) - p->ldev = d; - } - -- ret = ldma_cfg_init(d); -- if (ret) -- return ret; -- - dma_dev->dev = &pdev->dev; - - ch_mask = (unsigned long)d->channels_mask; -@@ -1674,6 +1671,10 @@ static int intel_ldma_probe(struct platform_device *pdev) - ldma_dma_init_v3X(j, d); - } - -+ ret = ldma_parse_dt(d); -+ if (ret) -+ return ret; -+ - dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources; - dma_dev->device_free_chan_resources = ldma_free_chan_resources; - dma_dev->device_terminate_all = ldma_terminate_all; -diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c -index e12b754e6398d..60d3c5f09ad67 100644 ---- a/drivers/dma/mcf-edma.c -+++ b/drivers/dma/mcf-edma.c -@@ -191,7 +191,13 @@ static int mcf_edma_probe(struct platform_device *pdev) - return -EINVAL; - } - -- chans = pdata->dma_channels; -+ if (!pdata->dma_channels) { -+ dev_info(&pdev->dev, "setting default channel number to 64"); -+ chans = 64; -+ } else { -+ chans = pdata->dma_channels; -+ } -+ - len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans; - mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); - if (!mcf_edma) -@@ -203,11 +209,6 @@ static int mcf_edma_probe(struct platform_device *pdev) - mcf_edma->drvdata = &mcf_data; - mcf_edma->big_endian = 1; - -- if (!mcf_edma->n_chans) { -- dev_info(&pdev->dev, "setting default channel number to 64"); -- mcf_edma->n_chans = 64; -- } -- - mutex_init(&mcf_edma->fsl_edma_mutex); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c -index 375e7e647df6b..a1517ef1f4a01 100644 ---- a/drivers/dma/mediatek/mtk-uart-apdma.c -+++ b/drivers/dma/mediatek/mtk-uart-apdma.c -@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) - unsigned int status; - int ret; - -- ret = pm_runtime_get_sync(mtkd->ddev.dev); -+ ret = pm_runtime_resume_and_get(mtkd->ddev.dev); - if (ret < 0) { - pm_runtime_put_noidle(chan->device->dev); - return ret; -@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) - ret = readx_poll_timeout(readl, c->base + VFF_EN, - status, !status, 10, 100); - if (ret) -- return ret; -+ goto err_pm; - - ret = request_irq(c->irq, mtk_uart_apdma_irq_handler, - IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan); - if (ret < 0) { - dev_err(chan->device->dev, "Can't request dma IRQ\n"); -- return -EINVAL; -+ ret = -EINVAL; -+ goto err_pm; - } - - if (mtkd->support_33bits) - mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); - -+err_pm: -+ pm_runtime_put_noidle(mtkd->ddev.dev); - return ret; - } - -diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c -index 89f1814ff27a0..26d11885c50ec 100644 ---- a/drivers/dma/mmp_pdma.c -+++ b/drivers/dma/mmp_pdma.c -@@ -727,12 +727,6 @@ static int mmp_pdma_config_write(struct dma_chan *dchan, - - chan->dir = direction; - chan->dev_addr = addr; -- /* FIXME: drivers should be ported over to use the filter -- * function. Once that's done, the following two lines can -- * be removed. -- */ -- if (cfg->slave_id) -- chan->drcmr = cfg->slave_id; - - return 0; - } -diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c -index 9b0d463f89bbd..9f3e011fbd914 100644 ---- a/drivers/dma/mv_xor_v2.c -+++ b/drivers/dma/mv_xor_v2.c -@@ -756,7 +756,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev) - - xor_dev->clk = devm_clk_get(&pdev->dev, NULL); - if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) { -- ret = EPROBE_DEFER; -+ ret = -EPROBE_DEFER; - goto disable_reg_clk; - } - if (!IS_ERR(xor_dev->clk)) { -@@ -899,6 +899,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev) - tasklet_kill(&xor_dev->irq_tasklet); - - clk_disable_unprepare(xor_dev->clk); -+ clk_disable_unprepare(xor_dev->reg_clk); - - return 0; - } -diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c -index 994fc4d2aca42..dc147cc2436e9 100644 ---- a/drivers/dma/mxs-dma.c -+++ b/drivers/dma/mxs-dma.c -@@ -670,7 +670,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, - return mxs_chan->status; - } - --static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) -+static int mxs_dma_init(struct mxs_dma_engine *mxs_dma) - { - int ret; - -@@ -741,7 +741,7 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, - ofdma->of_node); - } - --static int __init mxs_dma_probe(struct platform_device *pdev) -+static int mxs_dma_probe(struct platform_device *pdev) - { - struct device_node *np = pdev->dev.of_node; - const struct mxs_dma_type *dma_type; -@@ -839,10 +839,7 @@ static struct platform_driver mxs_dma_driver = { - .name = "mxs-dma", - .of_match_table = mxs_dma_dt_ids, - }, -+ .probe = mxs_dma_probe, - }; - --static int __init mxs_dma_module_init(void) --{ -- return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); --} --subsys_initcall(mxs_dma_module_init); -+builtin_platform_driver(mxs_dma_driver); -diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c -index 110de8a600588..ec8a1565630b6 100644 ---- a/drivers/dma/pl330.c -+++ b/drivers/dma/pl330.c -@@ -403,6 +403,12 @@ enum desc_status { - * of a channel can be BUSY at any time. - */ - BUSY, -+ /* -+ * Pause was called while descriptor was BUSY. Due to hardware -+ * limitations, only termination is possible for descriptors -+ * that have been paused. -+ */ -+ PAUSED, - /* - * Sitting on the channel work_list but xfer done - * by PL330 core -@@ -1050,7 +1056,7 @@ static bool _trigger(struct pl330_thread *thrd) - return true; - } - --static bool _start(struct pl330_thread *thrd) -+static bool pl330_start_thread(struct pl330_thread *thrd) - { - switch (_state(thrd)) { - case PL330_STATE_FAULT_COMPLETING: -@@ -1702,7 +1708,7 @@ static int pl330_update(struct pl330_dmac *pl330) - thrd->req_running = -1; - - /* Get going again ASAP */ -- _start(thrd); -+ pl330_start_thread(thrd); - - /* For now, just make a list of callbacks to be done */ - list_add_tail(&descdone->rqd, &pl330->req_done); -@@ -2041,7 +2047,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) - list_for_each_entry(desc, &pch->work_list, node) { - - /* If already submitted */ -- if (desc->status == BUSY) -+ if (desc->status == BUSY || desc->status == PAUSED) - continue; - - ret = pl330_submit_req(pch->thread, desc); -@@ -2089,7 +2095,7 @@ static void pl330_tasklet(struct tasklet_struct *t) - } else { - /* Make sure the PL330 Channel thread is active */ - spin_lock(&pch->thread->dmac->lock); -- _start(pch->thread); -+ pl330_start_thread(pch->thread); - spin_unlock(&pch->thread->dmac->lock); - } - -@@ -2107,7 +2113,7 @@ static void pl330_tasklet(struct tasklet_struct *t) - if (power_down) { - pch->active = true; - spin_lock(&pch->thread->dmac->lock); -- _start(pch->thread); -+ pl330_start_thread(pch->thread); - spin_unlock(&pch->thread->dmac->lock); - power_down = false; - } -@@ -2326,6 +2332,7 @@ static int pl330_pause(struct dma_chan *chan) - { - struct dma_pl330_chan *pch = to_pchan(chan); - struct pl330_dmac *pl330 = pch->dmac; -+ struct dma_pl330_desc *desc; - unsigned long flags; - - pm_runtime_get_sync(pl330->ddma.dev); -@@ -2335,6 +2342,10 @@ static int pl330_pause(struct dma_chan *chan) - _stop(pch->thread); - spin_unlock(&pl330->lock); - -+ list_for_each_entry(desc, &pch->work_list, node) { -+ if (desc->status == BUSY) -+ desc->status = PAUSED; -+ } - spin_unlock_irqrestore(&pch->lock, flags); - pm_runtime_mark_last_busy(pl330->ddma.dev); - pm_runtime_put_autosuspend(pl330->ddma.dev); -@@ -2425,7 +2436,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - else if (running && desc == running) - transferred = - pl330_get_current_xferred_count(pch, desc); -- else if (desc->status == BUSY) -+ else if (desc->status == BUSY || desc->status == PAUSED) - /* - * Busy but not running means either just enqueued, - * or finished and not yet marked done -@@ -2442,6 +2453,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, - case DONE: - ret = DMA_COMPLETE; - break; -+ case PAUSED: -+ ret = DMA_PAUSED; -+ break; - case PREP: - case BUSY: - ret = DMA_IN_PROGRESS; -@@ -2589,7 +2603,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) - - /* If the DMAC pool is empty, alloc new */ - if (!desc) { -- DEFINE_SPINLOCK(lock); -+ static DEFINE_SPINLOCK(lock); - LIST_HEAD(pool); - - if (!add_desc(&pool, &lock, GFP_ATOMIC, 1)) -diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c -index 8a6bf291a73fe..bca4063b0dce4 100644 ---- a/drivers/dma/ptdma/ptdma-dev.c -+++ b/drivers/dma/ptdma/ptdma-dev.c -@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd - bool soc = FIELD_GET(DWORD0_SOC, desc->dw0); - u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx]; - u32 tail; -+ unsigned long flags; - - if (soc) { - desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0); - desc->dw0 &= ~DWORD0_SOC; - } -- mutex_lock(&cmd_q->q_mutex); -+ spin_lock_irqsave(&cmd_q->q_lock, flags); - - /* Copy 32-byte command descriptor to hw queue. */ - memcpy(q_desc, desc, 32); -@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd - - /* Turn the queue back on using our cached control register */ - pt_start_queue(cmd_q); -- mutex_unlock(&cmd_q->q_mutex); -+ spin_unlock_irqrestore(&cmd_q->q_lock, flags); - - return 0; - } -@@ -197,7 +198,7 @@ int pt_core_init(struct pt_device *pt) - - cmd_q->pt = pt; - cmd_q->dma_pool = dma_pool; -- mutex_init(&cmd_q->q_mutex); -+ spin_lock_init(&cmd_q->q_lock); - - /* Page alignment satisfies our needs for N <= 128 */ - cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); -@@ -207,7 +208,7 @@ int pt_core_init(struct pt_device *pt) - if (!cmd_q->qbase) { - dev_err(dev, "unable to allocate command queue\n"); - ret = -ENOMEM; -- goto e_dma_alloc; -+ goto e_destroy_pool; - } - - cmd_q->qidx = 0; -@@ -229,8 +230,10 @@ int pt_core_init(struct pt_device *pt) - - /* Request an irq */ - ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt); -- if (ret) -- goto e_pool; -+ if (ret) { -+ dev_err(dev, "unable to allocate an IRQ\n"); -+ goto e_free_dma; -+ } - - /* Update the device registers with queue information. */ - cmd_q->qcontrol &= ~CMD_Q_SIZE; -@@ -250,21 +253,20 @@ int pt_core_init(struct pt_device *pt) - /* Register the DMA engine support */ - ret = pt_dmaengine_register(pt); - if (ret) -- goto e_dmaengine; -+ goto e_free_irq; - - /* Set up debugfs entries */ - ptdma_debugfs_setup(pt); - - return 0; - --e_dmaengine: -+e_free_irq: - free_irq(pt->pt_irq, pt); - --e_dma_alloc: -+e_free_dma: - dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); - --e_pool: -- dev_err(dev, "unable to allocate an IRQ\n"); -+e_destroy_pool: - dma_pool_destroy(pt->cmd_q.dma_pool); - - return ret; -diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h -index afbf192c92305..0f0b400a864e4 100644 ---- a/drivers/dma/ptdma/ptdma.h -+++ b/drivers/dma/ptdma/ptdma.h -@@ -196,7 +196,7 @@ struct pt_cmd_queue { - struct ptdma_desc *qbase; - - /* Aligned queue start address (per requirement) */ -- struct mutex q_mutex ____cacheline_aligned; -+ spinlock_t q_lock ____cacheline_aligned; - unsigned int qidx; - - unsigned int qsize; -diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c -index 4a2a796e348c1..e613ace79ea83 100644 ---- a/drivers/dma/pxa_dma.c -+++ b/drivers/dma/pxa_dma.c -@@ -910,13 +910,6 @@ static void pxad_get_config(struct pxad_chan *chan, - *dcmd |= PXA_DCMD_BURST16; - else if (maxburst == 32) - *dcmd |= PXA_DCMD_BURST32; -- -- /* FIXME: drivers should be ported over to use the filter -- * function. Once that's done, the following two lines can -- * be removed. -- */ -- if (chan->cfg.slave_id) -- chan->drcmr = chan->cfg.slave_id; - } - - static struct dma_async_tx_descriptor * -@@ -1255,14 +1248,14 @@ static int pxad_init_phys(struct platform_device *op, - return -ENOMEM; - - for (i = 0; i < nb_phy_chans; i++) -- if (platform_get_irq(op, i) > 0) -+ if (platform_get_irq_optional(op, i) > 0) - nr_irq++; - - for (i = 0; i < nb_phy_chans; i++) { - phy = &pdev->phys[i]; - phy->base = pdev->base; - phy->idx = i; -- irq = platform_get_irq(op, i); -+ irq = platform_get_irq_optional(op, i); - if ((nr_irq > 1) && (irq > 0)) - ret = devm_request_irq(&op->dev, irq, - pxad_chan_handler, -diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c -index c8a77b428b528..ca8c862c9747e 100644 ---- a/drivers/dma/qcom/bam_dma.c -+++ b/drivers/dma/qcom/bam_dma.c -@@ -515,14 +515,6 @@ static int bam_alloc_chan(struct dma_chan *chan) - return 0; - } - --static int bam_pm_runtime_get_sync(struct device *dev) --{ -- if (pm_runtime_enabled(dev)) -- return pm_runtime_get_sync(dev); -- -- return 0; --} -- - /** - * bam_free_chan - Frees dma resources associated with specific channel - * @chan: specified channel -@@ -538,7 +530,7 @@ static void bam_free_chan(struct dma_chan *chan) - unsigned long flags; - int ret; - -- ret = bam_pm_runtime_get_sync(bdev->dev); -+ ret = pm_runtime_get_sync(bdev->dev); - if (ret < 0) - return; - -@@ -734,7 +726,7 @@ static int bam_pause(struct dma_chan *chan) - unsigned long flag; - int ret; - -- ret = bam_pm_runtime_get_sync(bdev->dev); -+ ret = pm_runtime_get_sync(bdev->dev); - if (ret < 0) - return ret; - -@@ -760,7 +752,7 @@ static int bam_resume(struct dma_chan *chan) - unsigned long flag; - int ret; - -- ret = bam_pm_runtime_get_sync(bdev->dev); -+ ret = pm_runtime_get_sync(bdev->dev); - if (ret < 0) - return ret; - -@@ -869,7 +861,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data) - if (srcs & P_IRQ) - tasklet_schedule(&bdev->task); - -- ret = bam_pm_runtime_get_sync(bdev->dev); -+ ret = pm_runtime_get_sync(bdev->dev); - if (ret < 0) - return IRQ_NONE; - -@@ -987,7 +979,7 @@ static void bam_start_dma(struct bam_chan *bchan) - if (!vd) - return; - -- ret = bam_pm_runtime_get_sync(bdev->dev); -+ ret = pm_runtime_get_sync(bdev->dev); - if (ret < 0) - return; - -@@ -1350,11 +1342,6 @@ static int bam_dma_probe(struct platform_device *pdev) - if (ret) - goto err_unregister_dma; - -- if (!bdev->bamclk) { -- pm_runtime_disable(&pdev->dev); -- return 0; -- } -- - pm_runtime_irq_safe(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(&pdev->dev); -@@ -1438,10 +1425,8 @@ static int __maybe_unused bam_dma_suspend(struct device *dev) - { - struct bam_device *bdev = dev_get_drvdata(dev); - -- if (bdev->bamclk) { -- pm_runtime_force_suspend(dev); -- clk_unprepare(bdev->bamclk); -- } -+ pm_runtime_force_suspend(dev); -+ clk_unprepare(bdev->bamclk); - - return 0; - } -@@ -1451,13 +1436,11 @@ static int __maybe_unused bam_dma_resume(struct device *dev) - struct bam_device *bdev = dev_get_drvdata(dev); - int ret; - -- if (bdev->bamclk) { -- ret = clk_prepare(bdev->bamclk); -- if (ret) -- return ret; -+ ret = clk_prepare(bdev->bamclk); -+ if (ret) -+ return ret; - -- pm_runtime_force_resume(dev); -- } -+ pm_runtime_force_resume(dev); - - return 0; - } -diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c -index 1a1b7d8458c93..1e87fe6c62af2 100644 ---- a/drivers/dma/qcom/gpi.c -+++ b/drivers/dma/qcom/gpi.c -@@ -1961,7 +1961,6 @@ error_alloc_ev_ring: - error_config_int: - gpi_free_ring(&gpii->ev_ring, gpii); - exit_gpi_init: -- mutex_unlock(&gpii->ctrl_lock); - return ret; - } - -diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c -index f12606aeff87c..dcf2b7a4183c1 100644 ---- a/drivers/dma/sf-pdma/sf-pdma.c -+++ b/drivers/dma/sf-pdma/sf-pdma.c -@@ -52,16 +52,6 @@ static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd) - static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan) - { - struct sf_pdma_desc *desc; -- unsigned long flags; -- -- spin_lock_irqsave(&chan->lock, flags); -- -- if (chan->desc && !chan->desc->in_use) { -- spin_unlock_irqrestore(&chan->lock, flags); -- return chan->desc; -- } -- -- spin_unlock_irqrestore(&chan->lock, flags); - - desc = kzalloc(sizeof(*desc), GFP_NOWAIT); - if (!desc) -@@ -106,12 +96,10 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src, - if (!desc) - return NULL; - -- desc->in_use = true; - desc->dirn = DMA_MEM_TO_MEM; - desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); - - spin_lock_irqsave(&chan->vchan.lock, iflags); -- chan->desc = desc; - sf_pdma_fill_desc(desc, dest, src, len); - spin_unlock_irqrestore(&chan->vchan.lock, iflags); - -@@ -170,11 +158,17 @@ static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, - unsigned long flags; - u64 residue = 0; - struct sf_pdma_desc *desc; -- struct dma_async_tx_descriptor *tx; -+ struct dma_async_tx_descriptor *tx = NULL; - - spin_lock_irqsave(&chan->vchan.lock, flags); - -- tx = &chan->desc->vdesc.tx; -+ list_for_each_entry(vd, &chan->vchan.desc_submitted, node) -+ if (vd->tx.cookie == cookie) -+ tx = &vd->tx; -+ -+ if (!tx) -+ goto out; -+ - if (cookie == tx->chan->completed_cookie) - goto out; - -@@ -241,6 +235,19 @@ static void sf_pdma_enable_request(struct sf_pdma_chan *chan) - writel(v, regs->ctrl); - } - -+static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan) -+{ -+ struct virt_dma_chan *vchan = &chan->vchan; -+ struct virt_dma_desc *vdesc; -+ -+ if (list_empty(&vchan->desc_issued)) -+ return NULL; -+ -+ vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node); -+ -+ return container_of(vdesc, struct sf_pdma_desc, vdesc); -+} -+ - static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan) - { - struct sf_pdma_desc *desc = chan->desc; -@@ -268,8 +275,11 @@ static void sf_pdma_issue_pending(struct dma_chan *dchan) - - spin_lock_irqsave(&chan->vchan.lock, flags); - -- if (vchan_issue_pending(&chan->vchan) && chan->desc) -+ if (!chan->desc && vchan_issue_pending(&chan->vchan)) { -+ /* vchan_issue_pending has made a check that desc in not NULL */ -+ chan->desc = sf_pdma_get_first_pending_desc(chan); - sf_pdma_xfer_desc(chan); -+ } - - spin_unlock_irqrestore(&chan->vchan.lock, flags); - } -@@ -279,7 +289,7 @@ static void sf_pdma_free_desc(struct virt_dma_desc *vdesc) - struct sf_pdma_desc *desc; - - desc = to_sf_pdma_desc(vdesc); -- desc->in_use = false; -+ kfree(desc); - } - - static void sf_pdma_donebh_tasklet(struct tasklet_struct *t) -@@ -298,6 +308,11 @@ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t) - spin_lock_irqsave(&chan->vchan.lock, flags); - list_del(&chan->desc->vdesc.node); - vchan_cookie_complete(&chan->desc->vdesc); -+ -+ chan->desc = sf_pdma_get_first_pending_desc(chan); -+ if (chan->desc) -+ sf_pdma_xfer_desc(chan); -+ - spin_unlock_irqrestore(&chan->vchan.lock, flags); - } - -diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h -index 0c20167b097d0..02a229a3ae225 100644 ---- a/drivers/dma/sf-pdma/sf-pdma.h -+++ b/drivers/dma/sf-pdma/sf-pdma.h -@@ -82,7 +82,6 @@ struct sf_pdma_desc { - u64 src_addr; - struct virt_dma_desc vdesc; - struct sf_pdma_chan *chan; -- bool in_use; - enum dma_transfer_direction dirn; - struct dma_async_tx_descriptor *async_tx; - }; -diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c -index 6885b3dcd7a97..f4c46b3b6d9d7 100644 ---- a/drivers/dma/sh/rcar-dmac.c -+++ b/drivers/dma/sh/rcar-dmac.c -@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev) - - dmac->dev = &pdev->dev; - platform_set_drvdata(pdev, dmac); -- dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); -- dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); -+ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); -+ if (ret) -+ return ret; -+ -+ ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); -+ if (ret) -+ return ret; - - ret = rcar_dmac_parse_of(&pdev->dev, dmac); - if (ret < 0) -diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c -index f9f30cbeccbe7..941a7ef475f4e 100644 ---- a/drivers/dma/sh/rz-dmac.c -+++ b/drivers/dma/sh/rz-dmac.c -@@ -9,6 +9,7 @@ - * Copyright 2012 Javier Martin, Vista Silicon - */ - -+#include - #include - #include - #include -@@ -143,8 +144,8 @@ struct rz_dmac { - #define CHCFG_REQD BIT(3) - #define CHCFG_SEL(bits) ((bits) & 0x07) - #define CHCFG_MEM_COPY (0x80400008) --#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16)) --#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12)) -+#define CHCFG_FILL_DDS_MASK GENMASK(19, 16) -+#define CHCFG_FILL_SDS_MASK GENMASK(15, 12) - #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22) - #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6) - #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5) -@@ -607,13 +608,15 @@ static int rz_dmac_config(struct dma_chan *chan, - if (val == CHCFG_DS_INVALID) - return -EINVAL; - -- channel->chcfg |= CHCFG_FILL_DDS(val); -+ channel->chcfg &= ~CHCFG_FILL_DDS_MASK; -+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val); - - val = rz_dmac_ds_to_val_mapping(config->src_addr_width); - if (val == CHCFG_DS_INVALID) - return -EINVAL; - -- channel->chcfg |= CHCFG_FILL_SDS(val); -+ channel->chcfg &= ~CHCFG_FILL_SDS_MASK; -+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val); - - return 0; - } -diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c -index 4357d2395e6b7..60115d8d40832 100644 ---- a/drivers/dma/sprd-dma.c -+++ b/drivers/dma/sprd-dma.c -@@ -1236,11 +1236,8 @@ static int sprd_dma_remove(struct platform_device *pdev) - { - struct sprd_dma_dev *sdev = platform_get_drvdata(pdev); - struct sprd_dma_chn *c, *cn; -- int ret; - -- ret = pm_runtime_get_sync(&pdev->dev); -- if (ret < 0) -- return ret; -+ pm_runtime_get_sync(&pdev->dev); - - /* explicitly free the irq */ - if (sdev->irq > 0) -diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c -index 962b6e05287b5..d95c421877fb7 100644 ---- a/drivers/dma/st_fdma.c -+++ b/drivers/dma/st_fdma.c -@@ -874,4 +874,4 @@ MODULE_LICENSE("GPL v2"); - MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver"); - MODULE_AUTHOR("Ludovic.barre "); - MODULE_AUTHOR("Peter Griffin "); --MODULE_ALIAS("platform: " DRIVER_NAME); -+MODULE_ALIAS("platform:" DRIVER_NAME); -diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c -index e1827393143f1..cb6b0e9ed5adc 100644 ---- a/drivers/dma/ste_dma40.c -+++ b/drivers/dma/ste_dma40.c -@@ -3597,6 +3597,10 @@ static int __init d40_probe(struct platform_device *pdev) - spin_lock_init(&base->lcla_pool.lock); - - base->irq = platform_get_irq(pdev, 0); -+ if (base->irq < 0) { -+ ret = base->irq; -+ goto destroy_cache; -+ } - - ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); - if (ret) { -diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c -index 9063c727962ed..7dfc743ac4338 100644 ---- a/drivers/dma/stm32-dma.c -+++ b/drivers/dma/stm32-dma.c -@@ -270,7 +270,6 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, - u32 threshold) - { - enum dma_slave_buswidth max_width; -- u64 addr = buf_addr; - - if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL) - max_width = DMA_SLAVE_BUSWIDTH_4_BYTES; -@@ -281,7 +280,7 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, - max_width > DMA_SLAVE_BUSWIDTH_1_BYTE) - max_width = max_width >> 1; - -- if (do_div(addr, max_width)) -+ if (buf_addr & (max_width - 1)) - max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; - - return max_width; -@@ -753,8 +752,14 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, - if (src_bus_width < 0) - return src_bus_width; - -- /* Set memory burst size */ -- src_maxburst = STM32_DMA_MAX_BURST; -+ /* -+ * Set memory burst size - burst not possible if address is not aligned on -+ * the address boundary equal to the size of the transfer -+ */ -+ if (buf_addr & (buf_len - 1)) -+ src_maxburst = 1; -+ else -+ src_maxburst = STM32_DMA_MAX_BURST; - src_best_burst = stm32_dma_get_best_burst(buf_len, - src_maxburst, - fifoth, -@@ -803,8 +808,14 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, - if (dst_bus_width < 0) - return dst_bus_width; - -- /* Set memory burst size */ -- dst_maxburst = STM32_DMA_MAX_BURST; -+ /* -+ * Set memory burst size - burst not possible if address is not aligned on -+ * the address boundary equal to the size of the transfer -+ */ -+ if (buf_addr & (buf_len - 1)) -+ dst_maxburst = 1; -+ else -+ dst_maxburst = STM32_DMA_MAX_BURST; - dst_best_burst = stm32_dma_get_best_burst(buf_len, - dst_maxburst, - fifoth, -diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c -index a42164389ebc2..d5d55732adba1 100644 ---- a/drivers/dma/stm32-dmamux.c -+++ b/drivers/dma/stm32-dmamux.c -@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev) - ret = of_dma_router_register(node, stm32_dmamux_route_allocate, - &stm32_dmamux->dmarouter); - if (ret) -- goto err_clk; -+ goto pm_disable; - - return 0; - -+pm_disable: -+ pm_runtime_disable(&pdev->dev); - err_clk: - clk_disable_unprepare(stm32_dmamux->clk); - -diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c -index 18cbd1e43c2e8..21a7bdc88970a 100644 ---- a/drivers/dma/stm32-mdma.c -+++ b/drivers/dma/stm32-mdma.c -@@ -40,7 +40,6 @@ - STM32_MDMA_SHIFT(mask)) - - #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ --#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */ - - /* MDMA Channel x interrupt/status register */ - #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */ -@@ -184,7 +183,7 @@ - #define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x)) - #define STM32_MDMA_CTBR_DBUS BIT(17) - #define STM32_MDMA_CTBR_SBUS BIT(16) --#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0) -+#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0) - #define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \ - STM32_MDMA_CTBR_TSEL_MASK) - -@@ -196,7 +195,7 @@ - - #define STM32_MDMA_MAX_BUF_LEN 128 - #define STM32_MDMA_MAX_BLOCK_LEN 65536 --#define STM32_MDMA_MAX_CHANNELS 63 -+#define STM32_MDMA_MAX_CHANNELS 32 - #define STM32_MDMA_MAX_REQUESTS 256 - #define STM32_MDMA_MAX_BURST 128 - #define STM32_MDMA_VERY_HIGH_PRIORITY 0x3 -@@ -1345,26 +1344,16 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) - static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) - { - struct stm32_mdma_device *dmadev = devid; -- struct stm32_mdma_chan *chan = devid; -+ struct stm32_mdma_chan *chan; - u32 reg, id, ccr, ien, status; - - /* Find out which channel generates the interrupt */ - status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0); -- if (status) { -- id = __ffs(status); -- } else { -- status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1); -- if (!status) { -- dev_dbg(mdma2dev(dmadev), "spurious it\n"); -- return IRQ_NONE; -- } -- id = __ffs(status); -- /* -- * As GISR0 provides status for channel id from 0 to 31, -- * so GISR1 provides status for channel id from 32 to 62 -- */ -- id += 32; -+ if (!status) { -+ dev_dbg(mdma2dev(dmadev), "spurious it\n"); -+ return IRQ_NONE; - } -+ id = __ffs(status); - - chan = &dmadev->chan[id]; - if (!chan) { -diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c -index b1115a6d1935c..f4f722eacee2b 100644 ---- a/drivers/dma/tegra210-adma.c -+++ b/drivers/dma/tegra210-adma.c -@@ -224,7 +224,7 @@ static int tegra_adma_init(struct tegra_adma *tdma) - int ret; - - /* Clear any interrupts */ -- tdma_write(tdma, tdma->cdata->global_int_clear, 0x1); -+ tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1); - - /* Assert soft reset */ - tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1); -@@ -867,7 +867,7 @@ static int tegra_adma_probe(struct platform_device *pdev) - - pm_runtime_enable(&pdev->dev); - -- ret = pm_runtime_get_sync(&pdev->dev); -+ ret = pm_runtime_resume_and_get(&pdev->dev); - if (ret < 0) - goto rpm_disable; - -diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c -index 71d24fc07c003..f744ddbbbad7f 100644 ---- a/drivers/dma/ti/dma-crossbar.c -+++ b/drivers/dma/ti/dma-crossbar.c -@@ -245,6 +245,7 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, - if (dma_spec->args[0] >= xbar->xbar_requests) { - dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", - dma_spec->args[0]); -+ put_device(&pdev->dev); - return ERR_PTR(-EINVAL); - } - -@@ -252,12 +253,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, - dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); - if (!dma_spec->np) { - dev_err(&pdev->dev, "Can't get DMA master\n"); -+ put_device(&pdev->dev); - return ERR_PTR(-EINVAL); - } - - map = kzalloc(sizeof(*map), GFP_KERNEL); - if (!map) { - of_node_put(dma_spec->np); -+ put_device(&pdev->dev); - return ERR_PTR(-ENOMEM); - } - -@@ -268,6 +271,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, - mutex_unlock(&xbar->mutex); - dev_err(&pdev->dev, "Run out of free DMA requests\n"); - kfree(map); -+ of_node_put(dma_spec->np); -+ put_device(&pdev->dev); - return ERR_PTR(-ENOMEM); - } - set_bit(map->xbar_out, xbar->dma_inuse); -diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c -index 4fdd9f06b7235..4f1aeb81e9c7f 100644 ---- a/drivers/dma/ti/k3-udma-glue.c -+++ b/drivers/dma/ti/k3-udma-glue.c -@@ -299,6 +299,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, - ret = device_register(&tx_chn->common.chan_dev); - if (ret) { - dev_err(dev, "Channel Device registration failed %d\n", ret); -+ put_device(&tx_chn->common.chan_dev); - tx_chn->common.chan_dev.parent = NULL; - goto err; - } -@@ -917,6 +918,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, - ret = device_register(&rx_chn->common.chan_dev); - if (ret) { - dev_err(dev, "Channel Device registration failed %d\n", ret); -+ put_device(&rx_chn->common.chan_dev); - rx_chn->common.chan_dev.parent = NULL; - goto err; - } -@@ -1048,6 +1050,7 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, - ret = device_register(&rx_chn->common.chan_dev); - if (ret) { - dev_err(dev, "Channel Device registration failed %d\n", ret); -+ put_device(&rx_chn->common.chan_dev); - rx_chn->common.chan_dev.parent = NULL; - goto err; - } -diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c -index aada84f40723c..3257b2f5157c3 100644 ---- a/drivers/dma/ti/k3-udma-private.c -+++ b/drivers/dma/ti/k3-udma-private.c -@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) - } - - pdev = of_find_device_by_node(udma_node); -+ if (np != udma_node) -+ of_node_put(udma_node); -+ - if (!pdev) { - pr_debug("UDMA device not found\n"); - return ERR_PTR(-EPROBE_DEFER); - } - -- if (np != udma_node) -- of_node_put(udma_node); -- - ud = platform_get_drvdata(pdev); - if (!ud) { - pr_debug("UDMA has not been probed\n"); -diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c -index a35858610780c..d796e50dfe992 100644 ---- a/drivers/dma/ti/k3-udma.c -+++ b/drivers/dma/ti/k3-udma.c -@@ -300,8 +300,6 @@ struct udma_chan { - - struct udma_tx_drain tx_drain; - -- u32 bcnt; /* number of bytes completed since the start of the channel */ -- - /* Channel configuration parameters */ - struct udma_chan_config config; - -@@ -757,6 +755,21 @@ static void udma_reset_rings(struct udma_chan *uc) - } - } - -+static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val) -+{ -+ if (uc->desc->dir == DMA_DEV_TO_MEM) { -+ udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); -+ udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); -+ if (uc->config.ep_type != PSIL_EP_NATIVE) -+ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); -+ } else { -+ udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); -+ udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); -+ if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE) -+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); -+ } -+} -+ - static void udma_reset_counters(struct udma_chan *uc) - { - u32 val; -@@ -790,8 +803,6 @@ static void udma_reset_counters(struct udma_chan *uc) - val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG); - udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); - } -- -- uc->bcnt = 0; - } - - static int udma_reset_chan(struct udma_chan *uc, bool hard) -@@ -1115,7 +1126,7 @@ static void udma_check_tx_completion(struct work_struct *work) - if (uc->desc) { - struct udma_desc *d = uc->desc; - -- uc->bcnt += d->residue; -+ udma_decrement_byte_counters(uc, d->residue); - udma_start(uc); - vchan_cookie_complete(&d->vd); - break; -@@ -1168,7 +1179,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data) - vchan_cyclic_callback(&d->vd); - } else { - if (udma_is_desc_really_done(uc, d)) { -- uc->bcnt += d->residue; -+ udma_decrement_byte_counters(uc, d->residue); - udma_start(uc); - vchan_cookie_complete(&d->vd); - } else { -@@ -1204,7 +1215,7 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data) - vchan_cyclic_callback(&d->vd); - } else { - /* TODO: figure out the real amount of data */ -- uc->bcnt += d->residue; -+ udma_decrement_byte_counters(uc, d->residue); - udma_start(uc); - vchan_cookie_complete(&d->vd); - } -@@ -1348,6 +1359,7 @@ static int bcdma_get_bchan(struct udma_chan *uc) - { - struct udma_dev *ud = uc->ud; - enum udma_tp_level tpl; -+ int ret; - - if (uc->bchan) { - dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", -@@ -1365,8 +1377,11 @@ static int bcdma_get_bchan(struct udma_chan *uc) - tpl = ud->bchan_tpl.levels - 1; - - uc->bchan = __udma_reserve_bchan(ud, tpl, -1); -- if (IS_ERR(uc->bchan)) -- return PTR_ERR(uc->bchan); -+ if (IS_ERR(uc->bchan)) { -+ ret = PTR_ERR(uc->bchan); -+ uc->bchan = NULL; -+ return ret; -+ } - - uc->tchan = uc->bchan; - -@@ -1376,6 +1391,7 @@ static int bcdma_get_bchan(struct udma_chan *uc) - static int udma_get_tchan(struct udma_chan *uc) - { - struct udma_dev *ud = uc->ud; -+ int ret; - - if (uc->tchan) { - dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", -@@ -1390,8 +1406,11 @@ static int udma_get_tchan(struct udma_chan *uc) - */ - uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, - uc->config.mapped_channel_id); -- if (IS_ERR(uc->tchan)) -- return PTR_ERR(uc->tchan); -+ if (IS_ERR(uc->tchan)) { -+ ret = PTR_ERR(uc->tchan); -+ uc->tchan = NULL; -+ return ret; -+ } - - if (ud->tflow_cnt) { - int tflow_id; -@@ -1421,6 +1440,7 @@ static int udma_get_tchan(struct udma_chan *uc) - static int udma_get_rchan(struct udma_chan *uc) - { - struct udma_dev *ud = uc->ud; -+ int ret; - - if (uc->rchan) { - dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", -@@ -1435,8 +1455,13 @@ static int udma_get_rchan(struct udma_chan *uc) - */ - uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, - uc->config.mapped_channel_id); -+ if (IS_ERR(uc->rchan)) { -+ ret = PTR_ERR(uc->rchan); -+ uc->rchan = NULL; -+ return ret; -+ } - -- return PTR_ERR_OR_ZERO(uc->rchan); -+ return 0; - } - - static int udma_get_chan_pair(struct udma_chan *uc) -@@ -1490,6 +1515,7 @@ static int udma_get_chan_pair(struct udma_chan *uc) - static int udma_get_rflow(struct udma_chan *uc, int flow_id) - { - struct udma_dev *ud = uc->ud; -+ int ret; - - if (!uc->rchan) { - dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); -@@ -1503,8 +1529,13 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id) - } - - uc->rflow = __udma_get_rflow(ud, flow_id); -+ if (IS_ERR(uc->rflow)) { -+ ret = PTR_ERR(uc->rflow); -+ uc->rflow = NULL; -+ return ret; -+ } - -- return PTR_ERR_OR_ZERO(uc->rflow); -+ return 0; - } - - static void bcdma_put_bchan(struct udma_chan *uc) -@@ -3791,7 +3822,6 @@ static enum dma_status udma_tx_status(struct dma_chan *chan, - bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG); - } - -- bcnt -= uc->bcnt; - if (bcnt && !(bcnt % uc->desc->residue)) - residue = 0; - else -diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c -index d6b8a202474f4..290836b7e1be2 100644 ---- a/drivers/dma/uniphier-xdmac.c -+++ b/drivers/dma/uniphier-xdmac.c -@@ -131,8 +131,9 @@ uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc) - static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc, - struct uniphier_xdmac_desc *xd) - { -- u32 src_mode, src_addr, src_width; -- u32 dst_mode, dst_addr, dst_width; -+ u32 src_mode, src_width; -+ u32 dst_mode, dst_width; -+ dma_addr_t src_addr, dst_addr; - u32 val, its, tnum; - enum dma_slave_buswidth buswidth; - -diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c -index a4450bc954665..edc2bb8f0523c 100644 ---- a/drivers/dma/xilinx/xilinx_dma.c -+++ b/drivers/dma/xilinx/xilinx_dma.c -@@ -3037,9 +3037,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) - - /* Request and map I/O memory */ - xdev->regs = devm_platform_ioremap_resource(pdev, 0); -- if (IS_ERR(xdev->regs)) -- return PTR_ERR(xdev->regs); -- -+ if (IS_ERR(xdev->regs)) { -+ err = PTR_ERR(xdev->regs); -+ goto disable_clks; -+ } - /* Retrieve the DMA engine properties from the device tree */ - xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); - xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2; -@@ -3067,7 +3068,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) - if (err < 0) { - dev_err(xdev->dev, - "missing xlnx,num-fstores property\n"); -- return err; -+ goto disable_clks; - } - - err = of_property_read_u32(node, "xlnx,flush-fsync", -@@ -3087,7 +3088,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) - xdev->ext_addr = false; - - /* Set the dma mask bits */ -- dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); -+ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); -+ if (err < 0) { -+ dev_err(xdev->dev, "DMA mask error %d\n", err); -+ goto disable_clks; -+ } - - /* Initialize the DMA engine */ - xdev->common.dev = &pdev->dev; -@@ -3133,8 +3138,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) - /* Initialize the channels */ - for_each_child_of_node(node, child) { - err = xilinx_dma_child_probe(xdev, child); -- if (err < 0) -- goto disable_clks; -+ if (err < 0) { -+ of_node_put(child); -+ goto error; -+ } - } - - if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { -@@ -3169,12 +3176,12 @@ static int xilinx_dma_probe(struct platform_device *pdev) - - return 0; - --disable_clks: -- xdma_disable_allclks(xdev); - error: - for (i = 0; i < xdev->dma_config->max_channels; i++) - if (xdev->chan[i]) - xilinx_dma_chan_remove(xdev->chan[i]); -+disable_clks: -+ xdma_disable_allclks(xdev); - - return err; - } -diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c -index b280a53e8570a..ce5c66e6897d2 100644 ---- a/drivers/dma/xilinx/xilinx_dpdma.c -+++ b/drivers/dma/xilinx/xilinx_dpdma.c -@@ -271,9 +271,6 @@ struct xilinx_dpdma_device { - /* ----------------------------------------------------------------------------- - * DebugFS - */ -- --#ifdef CONFIG_DEBUG_FS -- - #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32 - #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535" - -@@ -299,7 +296,7 @@ struct xilinx_dpdma_debugfs_request { - - static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) - { -- if (chan->id == dpdma_debugfs.chan_id) -+ if (IS_ENABLED(CONFIG_DEBUG_FS) && chan->id == dpdma_debugfs.chan_id) - dpdma_debugfs.xilinx_dpdma_irq_done_count++; - } - -@@ -462,16 +459,6 @@ static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) - dev_err(xdev->dev, "Failed to create debugfs testcase file\n"); - } - --#else --static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) --{ --} -- --static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) --{ --} --#endif /* CONFIG_DEBUG_FS */ -- - /* ----------------------------------------------------------------------------- - * I/O Accessors - */ -diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c -index 97f02f8eb03a8..5257bdbf77fb0 100644 ---- a/drivers/dma/xilinx/zynqmp_dma.c -+++ b/drivers/dma/xilinx/zynqmp_dma.c -@@ -232,7 +232,7 @@ struct zynqmp_dma_chan { - bool is_dmacoherent; - struct tasklet_struct tasklet; - bool idle; -- u32 desc_size; -+ size_t desc_size; - bool err; - u32 bus_width; - u32 src_burst_len; -@@ -489,7 +489,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) - } - - chan->desc_pool_v = dma_alloc_coherent(chan->dev, -- (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), -+ (2 * ZYNQMP_DMA_DESC_SIZE(chan) * -+ ZYNQMP_DMA_NUM_DESCS), - &chan->desc_pool_p, GFP_KERNEL); - if (!chan->desc_pool_v) - return -ENOMEM; -diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c -index 3a6d2416cb0f6..5dd29789f97d3 100644 ---- a/drivers/edac/altera_edac.c -+++ b/drivers/edac/altera_edac.c -@@ -350,7 +350,7 @@ static int altr_sdram_probe(struct platform_device *pdev) - if (irq < 0) { - edac_printk(KERN_ERR, EDAC_MC, - "No irq %d in DT\n", irq); -- return -ENODEV; -+ return irq; - } - - /* Arria10 has a 2nd IRQ */ -diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c -index 99b06a3e8fb12..4fce75013674f 100644 ---- a/drivers/edac/amd64_edac.c -+++ b/drivers/edac/amd64_edac.c -@@ -1065,12 +1065,14 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) - #define CS_ODD_PRIMARY BIT(1) - #define CS_EVEN_SECONDARY BIT(2) - #define CS_ODD_SECONDARY BIT(3) -+#define CS_3R_INTERLEAVE BIT(4) - - #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY) - #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY) - - static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt) - { -+ u8 base, count = 0; - int cs_mode = 0; - - if (csrow_enabled(2 * dimm, ctrl, pvt)) -@@ -1083,6 +1085,20 @@ static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt) - if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt)) - cs_mode |= CS_ODD_SECONDARY; - -+ /* -+ * 3 Rank inteleaving support. -+ * There should be only three bases enabled and their two masks should -+ * be equal. -+ */ -+ for_each_chip_select(base, ctrl, pvt) -+ count += csrow_enabled(base, ctrl, pvt); -+ -+ if (count == 3 && -+ pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) { -+ edac_dbg(1, "3R interleaving in use.\n"); -+ cs_mode |= CS_3R_INTERLEAVE; -+ } -+ - return cs_mode; - } - -@@ -1891,10 +1907,14 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, - * - * The MSB is the number of bits in the full mask because BIT[0] is - * always 0. -+ * -+ * In the special 3 Rank interleaving case, a single bit is flipped -+ * without swapping with the most significant bit. This can be handled -+ * by keeping the MSB where it is and ignoring the single zero bit. - */ - msb = fls(addr_mask_orig) - 1; - weight = hweight_long(addr_mask_orig); -- num_zero_bits = msb - weight; -+ num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE); - - /* Take the number of zero bits off from the top of the mask. */ - addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1); -diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c -index b8a7d9594afd4..1fa5ca57e9ec1 100644 ---- a/drivers/edac/dmc520_edac.c -+++ b/drivers/edac/dmc520_edac.c -@@ -489,7 +489,7 @@ static int dmc520_edac_probe(struct platform_device *pdev) - dev = &pdev->dev; - - for (idx = 0; idx < NUMBER_OF_IRQS; idx++) { -- irq = platform_get_irq_byname(pdev, dmc520_irq_configs[idx].name); -+ irq = platform_get_irq_byname_optional(pdev, dmc520_irq_configs[idx].name); - irqs[idx] = irq; - masks[idx] = dmc520_irq_configs[idx].mask; - if (irq >= 0) { -diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c -index 8c4d947fb8486..85c229985f905 100644 ---- a/drivers/edac/edac_device.c -+++ b/drivers/edac/edac_device.c -@@ -34,6 +34,9 @@ - static DEFINE_MUTEX(device_ctls_mutex); - static LIST_HEAD(edac_device_list); - -+/* Default workqueue processing interval on this instance, in msecs */ -+#define DEFAULT_POLL_INTERVAL 1000 -+ - #ifdef CONFIG_EDAC_DEBUG - static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) - { -@@ -366,7 +369,7 @@ static void edac_device_workq_function(struct work_struct *work_req) - * whole one second to save timers firing all over the period - * between integral seconds - */ -- if (edac_dev->poll_msec == 1000) -+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) - edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); - else - edac_queue_work(&edac_dev->work, edac_dev->delay); -@@ -396,7 +399,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, - * timers firing on sub-second basis, while they are happy - * to fire together on the 1 second exactly - */ -- if (edac_dev->poll_msec == 1000) -+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) - edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); - else - edac_queue_work(&edac_dev->work, edac_dev->delay); -@@ -424,17 +427,16 @@ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) - * Then restart the workq on the new delay - */ - void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, -- unsigned long value) -+ unsigned long msec) - { -- unsigned long jiffs = msecs_to_jiffies(value); -- -- if (value == 1000) -- jiffs = round_jiffies_relative(value); -- -- edac_dev->poll_msec = value; -- edac_dev->delay = jiffs; -+ edac_dev->poll_msec = msec; -+ edac_dev->delay = msecs_to_jiffies(msec); - -- edac_mod_work(&edac_dev->work, jiffs); -+ /* See comment in edac_device_workq_setup() above */ -+ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) -+ edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); -+ else -+ edac_mod_work(&edac_dev->work, edac_dev->delay); - } - - int edac_device_alloc_index(void) -@@ -473,11 +475,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev) - /* This instance is NOW RUNNING */ - edac_dev->op_state = OP_RUNNING_POLL; - -- /* -- * enable workq processing on this instance, -- * default = 1000 msec -- */ -- edac_device_workq_setup(edac_dev, 1000); -+ edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL); - } else { - edac_dev->op_state = OP_RUNNING_INTERRUPT; - } -diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c -index 2c5975674723a..a859ddd9d4a13 100644 ---- a/drivers/edac/edac_mc.c -+++ b/drivers/edac/edac_mc.c -@@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems) - else - return (char *)ptr; - -- r = (unsigned long)p % align; -+ r = (unsigned long)ptr % align; - - if (r == 0) - return (char *)ptr; -diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h -index aa1f91688eb8e..841d238bc3f18 100644 ---- a/drivers/edac/edac_module.h -+++ b/drivers/edac/edac_module.h -@@ -56,7 +56,7 @@ bool edac_stop_work(struct delayed_work *work); - bool edac_mod_work(struct delayed_work *work, unsigned long delay); - - extern void edac_device_reset_delay_period(struct edac_device_ctl_info -- *edac_dev, unsigned long value); -+ *edac_dev, unsigned long msec); - extern void edac_mc_reset_delay_period(unsigned long value); - - extern void *edac_align_ptr(void **p, unsigned size, int n_elems); -diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c -index 6d1ddecbf0da3..d0a9ccf640c4b 100644 ---- a/drivers/edac/ghes_edac.c -+++ b/drivers/edac/ghes_edac.c -@@ -101,9 +101,14 @@ static void dimm_setup_label(struct dimm_info *dimm, u16 handle) - - dmi_memdev_name(handle, &bank, &device); - -- /* both strings must be non-zero */ -- if (bank && *bank && device && *device) -- snprintf(dimm->label, sizeof(dimm->label), "%s %s", bank, device); -+ /* -+ * Set to a NULL string when both bank and device are zero. In this case, -+ * the label assigned by default will be preserved. -+ */ -+ snprintf(dimm->label, sizeof(dimm->label), "%s%s%s", -+ (bank && *bank) ? bank : "", -+ (bank && *bank && device && *device) ? " " : "", -+ (device && *device) ? device : ""); - } - - static void assign_dmi_dimm_info(struct dimm_info *dimm, struct memdev_dmi_entry *entry) -diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c -index 61b76ec226af1..19fba258ae108 100644 ---- a/drivers/edac/highbank_mc_edac.c -+++ b/drivers/edac/highbank_mc_edac.c -@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev) - drvdata = mci->pvt_info; - platform_set_drvdata(pdev, mci); - -- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) -- return -ENOMEM; -+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { -+ res = -ENOMEM; -+ goto free; -+ } - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { -@@ -243,6 +245,7 @@ err2: - edac_mc_del_mc(&pdev->dev); - err: - devres_release_group(&pdev->dev, NULL); -+free: - edac_mc_free(mci); - return res; - } -diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c -index 83345bfac246f..e0af60833d28c 100644 ---- a/drivers/edac/i10nm_base.c -+++ b/drivers/edac/i10nm_base.c -@@ -198,11 +198,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus, - if (unlikely(pci_enable_device(pdev) < 0)) { - edac_dbg(2, "Failed to enable device %02x:%02x.%x\n", - bus, dev, fun); -+ pci_dev_put(pdev); - return NULL; - } - -- pci_dev_get(pdev); -- - return pdev; - } - -@@ -358,6 +357,9 @@ static int i10nm_get_hbm_munits(void) - - mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE); - if (!mbase) { -+ pci_dev_put(d->imc[lmc].mdev); -+ d->imc[lmc].mdev = NULL; -+ - i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n", - base + off); - return -ENOMEM; -@@ -368,6 +370,12 @@ static int i10nm_get_hbm_munits(void) - - mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0); - if (!I10NM_IS_HBM_IMC(mcmtr)) { -+ iounmap(d->imc[lmc].mbase); -+ d->imc[lmc].mbase = NULL; -+ d->imc[lmc].hbm_mc = false; -+ pci_dev_put(d->imc[lmc].mdev); -+ d->imc[lmc].mdev = NULL; -+ - i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n"); - return -ENODEV; - } -diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c -index a07bbfd075d06..8ec70da8d84fe 100644 ---- a/drivers/edac/igen6_edac.c -+++ b/drivers/edac/igen6_edac.c -@@ -27,7 +27,7 @@ - #include "edac_mc.h" - #include "edac_module.h" - --#define IGEN6_REVISION "v2.5" -+#define IGEN6_REVISION "v2.5.1" - - #define EDAC_MOD_STR "igen6_edac" - #define IGEN6_NMI_NAME "igen6_ibecc" -@@ -1216,9 +1216,6 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent) - INIT_WORK(&ecclog_work, ecclog_work_cb); - init_irq_work(&ecclog_irq_work, ecclog_irq_work_cb); - -- /* Check if any pending errors before registering the NMI handler */ -- ecclog_handler(); -- - rc = register_err_handler(); - if (rc) - goto fail3; -@@ -1230,6 +1227,9 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent) - goto fail4; - } - -+ /* Check if any pending errors before/during the registration of the error handler */ -+ ecclog_handler(); -+ - igen6_debug_setup(); - return 0; - fail4: -diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c -index 97a27e42dd610..c45519f59dc11 100644 ---- a/drivers/edac/qcom_edac.c -+++ b/drivers/edac/qcom_edac.c -@@ -252,7 +252,7 @@ clear: - static int - dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank) - { -- struct llcc_drv_data *drv = edev_ctl->pvt_info; -+ struct llcc_drv_data *drv = edev_ctl->dev->platform_data; - int ret; - - ret = dump_syn_reg_values(drv, bank, err_type); -@@ -289,7 +289,7 @@ static irqreturn_t - llcc_ecc_irq_handler(int irq, void *edev_ctl) - { - struct edac_device_ctl_info *edac_dev_ctl = edev_ctl; -- struct llcc_drv_data *drv = edac_dev_ctl->pvt_info; -+ struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data; - irqreturn_t irq_rc = IRQ_NONE; - u32 drp_error, trp_error, i; - int ret; -@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev) - edev_ctl->dev_name = dev_name(dev); - edev_ctl->ctl_name = "llcc"; - edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE; -- edev_ctl->pvt_info = llcc_driv_data; - - rc = edac_device_add_device(edev_ctl); - if (rc) -diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c -index 4c626fcd4dcbb..1522d4aa2ca62 100644 ---- a/drivers/edac/sb_edac.c -+++ b/drivers/edac/sb_edac.c -@@ -1052,7 +1052,7 @@ static u64 haswell_get_tohm(struct sbridge_pvt *pvt) - pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®); - rc = ((reg << 6) | rc) << 26; - -- return rc | 0x1ffffff; -+ return rc | 0x3ffffff; - } - - static u64 knl_get_tolm(struct sbridge_pvt *pvt) -diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c -index 1abc020d49ab6..984c93c8825f0 100644 ---- a/drivers/edac/skx_base.c -+++ b/drivers/edac/skx_base.c -@@ -510,7 +510,7 @@ rir_found: - } - - static u8 skx_close_row[] = { -- 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33 -+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33, 34 - }; - - static u8 skx_close_column[] = { -@@ -518,7 +518,7 @@ static u8 skx_close_column[] = { - }; - - static u8 skx_open_row[] = { -- 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33 -+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34 - }; - - static u8 skx_open_column[] = { -diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c -index 7d08627e738b3..8557781bb8dce 100644 ---- a/drivers/edac/synopsys_edac.c -+++ b/drivers/edac/synopsys_edac.c -@@ -163,6 +163,11 @@ - #define ECC_STAT_CECNT_SHIFT 8 - #define ECC_STAT_BITNUM_MASK 0x7F - -+/* ECC error count register definitions */ -+#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000 -+#define ECC_ERRCNT_UECNT_SHIFT 16 -+#define ECC_ERRCNT_CECNT_MASK 0xFFFF -+ - /* DDR QOS Interrupt register definitions */ - #define DDR_QOS_IRQ_STAT_OFST 0x20200 - #define DDR_QOSUE_MASK 0x4 -@@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv) - base = priv->baseaddr; - p = &priv->stat; - -+ regval = readl(base + ECC_ERRCNT_OFST); -+ p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK; -+ p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT; -+ if (!p->ce_cnt) -+ goto ue_err; -+ - regval = readl(base + ECC_STAT_OFST); - if (!regval) - return 1; - -- p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT; -- p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT; -- if (!p->ce_cnt) -- goto ue_err; -- - p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK); - - regval = readl(base + ECC_CEADDR0_OFST); -@@ -1352,8 +1358,7 @@ static int mc_probe(struct platform_device *pdev) - } - } - -- if (of_device_is_compatible(pdev->dev.of_node, -- "xlnx,zynqmp-ddrc-2.40a")) -+ if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) - setup_address_map(priv); - #endif - -diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c -index 2ccd1db5e98ff..7197f9fa02457 100644 ---- a/drivers/edac/xgene_edac.c -+++ b/drivers/edac/xgene_edac.c -@@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev) - irq = platform_get_irq_optional(pdev, i); - if (irq < 0) { - dev_err(&pdev->dev, "No IRQ resource\n"); -- rc = -EINVAL; -+ rc = irq; - goto out_err; - } - rc = devm_request_irq(&pdev->dev, irq, -diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig -index c69d40ae5619a..7684b3afa6304 100644 ---- a/drivers/extcon/Kconfig -+++ b/drivers/extcon/Kconfig -@@ -180,7 +180,7 @@ config EXTCON_USBC_CROS_EC - - config EXTCON_USBC_TUSB320 - tristate "TI TUSB320 USB-C extcon support" -- depends on I2C -+ depends on I2C && TYPEC - select REGMAP_I2C - help - Say Y here to enable support for USB Type C cable detection extcon -diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c -index fdb31954cf2b6..8073bc7d3e615 100644 ---- a/drivers/extcon/extcon-axp288.c -+++ b/drivers/extcon/extcon-axp288.c -@@ -375,8 +375,8 @@ static int axp288_extcon_probe(struct platform_device *pdev) - if (adev) { - info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev)); - put_device(&adev->dev); -- if (!info->id_extcon) -- return -EPROBE_DEFER; -+ if (IS_ERR(info->id_extcon)) -+ return PTR_ERR(info->id_extcon); - - dev_info(dev, "controlling USB role\n"); - } else { -diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c -index 5b9a3cf8df268..2a7874108df87 100644 ---- a/drivers/extcon/extcon-ptn5150.c -+++ b/drivers/extcon/extcon-ptn5150.c -@@ -194,6 +194,13 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info) - return 0; - } - -+static void ptn5150_work_sync_and_put(void *data) -+{ -+ struct ptn5150_info *info = data; -+ -+ cancel_work_sync(&info->irq_work); -+} -+ - static int ptn5150_i2c_probe(struct i2c_client *i2c) - { - struct device *dev = &i2c->dev; -@@ -284,6 +291,10 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c) - if (ret) - return -EINVAL; - -+ ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info); -+ if (ret) -+ return ret; -+ - /* - * Update current extcon state if for example OTG connection was there - * before the probe -diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c -index 805af73b41521..b408ce989c223 100644 ---- a/drivers/extcon/extcon-usbc-tusb320.c -+++ b/drivers/extcon/extcon-usbc-tusb320.c -@@ -1,11 +1,12 @@ - // SPDX-License-Identifier: GPL-2.0 --/** -+/* - * drivers/extcon/extcon-tusb320.c - TUSB320 extcon driver - * - * Copyright (C) 2020 National Instruments Corporation - * Author: Michael Auchter - */ - -+#include - #include - #include - #include -@@ -13,21 +14,70 @@ - #include - #include - #include -+#include -+ -+#define TUSB320_REG8 0x8 -+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6) -+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB 0x0 -+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A 0x1 -+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A 0x2 -+#define TUSB320_REG8_CURRENT_MODE_DETECT GENMASK(5, 4) -+#define TUSB320_REG8_CURRENT_MODE_DETECT_DEF 0x0 -+#define TUSB320_REG8_CURRENT_MODE_DETECT_MED 0x1 -+#define TUSB320_REG8_CURRENT_MODE_DETECT_ACC 0x2 -+#define TUSB320_REG8_CURRENT_MODE_DETECT_HI 0x3 -+#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 2) -+#define TUSB320_REG8_ACCESSORY_CONNECTED_NONE 0x0 -+#define TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO 0x4 -+#define TUSB320_REG8_ACCESSORY_CONNECTED_ACC 0x5 -+#define TUSB320_REG8_ACCESSORY_CONNECTED_DEBUG 0x6 -+#define TUSB320_REG8_ACTIVE_CABLE_DETECTION BIT(0) - - #define TUSB320_REG9 0x9 - #define TUSB320_REG9_ATTACHED_STATE_SHIFT 6 - #define TUSB320_REG9_ATTACHED_STATE_MASK 0x3 - #define TUSB320_REG9_CABLE_DIRECTION BIT(5) - #define TUSB320_REG9_INTERRUPT_STATUS BIT(4) --#define TUSB320_ATTACHED_STATE_NONE 0x0 --#define TUSB320_ATTACHED_STATE_DFP 0x1 --#define TUSB320_ATTACHED_STATE_UFP 0x2 --#define TUSB320_ATTACHED_STATE_ACC 0x3 -+ -+#define TUSB320_REGA 0xa -+#define TUSB320L_REGA_DISABLE_TERM BIT(0) -+#define TUSB320_REGA_I2C_SOFT_RESET BIT(3) -+#define TUSB320_REGA_MODE_SELECT_SHIFT 4 -+#define TUSB320_REGA_MODE_SELECT_MASK 0x3 -+ -+#define TUSB320L_REGA0_REVISION 0xa0 -+ -+enum tusb320_attached_state { -+ TUSB320_ATTACHED_STATE_NONE, -+ TUSB320_ATTACHED_STATE_DFP, -+ TUSB320_ATTACHED_STATE_UFP, -+ TUSB320_ATTACHED_STATE_ACC, -+}; -+ -+enum tusb320_mode { -+ TUSB320_MODE_PORT, -+ TUSB320_MODE_UFP, -+ TUSB320_MODE_DFP, -+ TUSB320_MODE_DRP, -+}; -+ -+struct tusb320_priv; -+ -+struct tusb320_ops { -+ int (*set_mode)(struct tusb320_priv *priv, enum tusb320_mode mode); -+ int (*get_revision)(struct tusb320_priv *priv, unsigned int *revision); -+}; - - struct tusb320_priv { - struct device *dev; - struct regmap *regmap; - struct extcon_dev *edev; -+ struct tusb320_ops *ops; -+ enum tusb320_attached_state state; -+ struct typec_port *port; -+ struct typec_capability cap; -+ enum typec_port_type port_type; -+ enum typec_pwr_opmode pwr_opmode; - }; - - static const char * const tusb_attached_states[] = { -@@ -62,19 +112,142 @@ static int tusb320_check_signature(struct tusb320_priv *priv) - return 0; - } - --static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) -+static int tusb320_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode) - { -- struct tusb320_priv *priv = dev_id; -- int state, polarity; -- unsigned reg; -+ int ret; - -- if (regmap_read(priv->regmap, TUSB320_REG9, ®)) { -- dev_err(priv->dev, "error during i2c read!\n"); -- return IRQ_NONE; -+ /* Mode cannot be changed while cable is attached */ -+ if (priv->state != TUSB320_ATTACHED_STATE_NONE) -+ return -EBUSY; -+ -+ /* Write mode */ -+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, -+ TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT, -+ mode << TUSB320_REGA_MODE_SELECT_SHIFT); -+ if (ret) { -+ dev_err(priv->dev, "failed to write mode: %d\n", ret); -+ return ret; - } - -- if (!(reg & TUSB320_REG9_INTERRUPT_STATUS)) -- return IRQ_NONE; -+ return 0; -+} -+ -+static int tusb320l_set_mode(struct tusb320_priv *priv, enum tusb320_mode mode) -+{ -+ int ret; -+ -+ /* Disable CC state machine */ -+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, -+ TUSB320L_REGA_DISABLE_TERM, 1); -+ if (ret) { -+ dev_err(priv->dev, -+ "failed to disable CC state machine: %d\n", ret); -+ return ret; -+ } -+ -+ /* Write mode */ -+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, -+ TUSB320_REGA_MODE_SELECT_MASK << TUSB320_REGA_MODE_SELECT_SHIFT, -+ mode << TUSB320_REGA_MODE_SELECT_SHIFT); -+ if (ret) { -+ dev_err(priv->dev, "failed to write mode: %d\n", ret); -+ goto err; -+ } -+ -+ msleep(5); -+err: -+ /* Re-enable CC state machine */ -+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, -+ TUSB320L_REGA_DISABLE_TERM, 0); -+ if (ret) -+ dev_err(priv->dev, -+ "failed to re-enable CC state machine: %d\n", ret); -+ -+ return ret; -+} -+ -+static int tusb320_reset(struct tusb320_priv *priv) -+{ -+ int ret; -+ -+ /* Set mode to default (follow PORT pin) */ -+ ret = priv->ops->set_mode(priv, TUSB320_MODE_PORT); -+ if (ret && ret != -EBUSY) { -+ dev_err(priv->dev, -+ "failed to set mode to PORT: %d\n", ret); -+ return ret; -+ } -+ -+ /* Perform soft reset */ -+ ret = regmap_write_bits(priv->regmap, TUSB320_REGA, -+ TUSB320_REGA_I2C_SOFT_RESET, 1); -+ if (ret) { -+ dev_err(priv->dev, -+ "failed to write soft reset bit: %d\n", ret); -+ return ret; -+ } -+ -+ /* Wait for chip to go through reset */ -+ msleep(95); -+ -+ return 0; -+} -+ -+static int tusb320l_get_revision(struct tusb320_priv *priv, unsigned int *revision) -+{ -+ return regmap_read(priv->regmap, TUSB320L_REGA0_REVISION, revision); -+} -+ -+static struct tusb320_ops tusb320_ops = { -+ .set_mode = tusb320_set_mode, -+}; -+ -+static struct tusb320_ops tusb320l_ops = { -+ .set_mode = tusb320l_set_mode, -+ .get_revision = tusb320l_get_revision, -+}; -+ -+static int tusb320_set_adv_pwr_mode(struct tusb320_priv *priv) -+{ -+ u8 mode; -+ -+ if (priv->pwr_opmode == TYPEC_PWR_MODE_USB) -+ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB; -+ else if (priv->pwr_opmode == TYPEC_PWR_MODE_1_5A) -+ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A; -+ else if (priv->pwr_opmode == TYPEC_PWR_MODE_3_0A) -+ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A; -+ else /* No other mode is supported. */ -+ return -EINVAL; -+ -+ return regmap_write_bits(priv->regmap, TUSB320_REG8, -+ TUSB320_REG8_CURRENT_MODE_ADVERTISE, -+ FIELD_PREP(TUSB320_REG8_CURRENT_MODE_ADVERTISE, -+ mode)); -+} -+ -+static int tusb320_port_type_set(struct typec_port *port, -+ enum typec_port_type type) -+{ -+ struct tusb320_priv *priv = typec_get_drvdata(port); -+ -+ if (type == TYPEC_PORT_SRC) -+ return priv->ops->set_mode(priv, TUSB320_MODE_DFP); -+ else if (type == TYPEC_PORT_SNK) -+ return priv->ops->set_mode(priv, TUSB320_MODE_UFP); -+ else if (type == TYPEC_PORT_DRP) -+ return priv->ops->set_mode(priv, TUSB320_MODE_DRP); -+ else -+ return priv->ops->set_mode(priv, TUSB320_MODE_PORT); -+} -+ -+static const struct typec_operations tusb320_typec_ops = { -+ .port_type_set = tusb320_port_type_set, -+}; -+ -+static void tusb320_extcon_irq_handler(struct tusb320_priv *priv, u8 reg) -+{ -+ int state, polarity; - - state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) & - TUSB320_REG9_ATTACHED_STATE_MASK; -@@ -96,20 +269,170 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) - extcon_sync(priv->edev, EXTCON_USB); - extcon_sync(priv->edev, EXTCON_USB_HOST); - -+ priv->state = state; -+} -+ -+static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9) -+{ -+ struct typec_port *port = priv->port; -+ struct device *dev = priv->dev; -+ u8 mode, role, state; -+ int ret, reg8; -+ bool ori; -+ -+ ori = reg9 & TUSB320_REG9_CABLE_DIRECTION; -+ typec_set_orientation(port, ori ? TYPEC_ORIENTATION_REVERSE : -+ TYPEC_ORIENTATION_NORMAL); -+ -+ state = (reg9 >> TUSB320_REG9_ATTACHED_STATE_SHIFT) & -+ TUSB320_REG9_ATTACHED_STATE_MASK; -+ if (state == TUSB320_ATTACHED_STATE_DFP) -+ role = TYPEC_SOURCE; -+ else -+ role = TYPEC_SINK; -+ -+ typec_set_vconn_role(port, role); -+ typec_set_pwr_role(port, role); -+ typec_set_data_role(port, role == TYPEC_SOURCE ? -+ TYPEC_HOST : TYPEC_DEVICE); -+ -+ ret = regmap_read(priv->regmap, TUSB320_REG8, ®8); -+ if (ret) { -+ dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret); -+ return; -+ } -+ -+ mode = FIELD_GET(TUSB320_REG8_CURRENT_MODE_DETECT, reg8); -+ if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_DEF) -+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB); -+ else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_MED) -+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_1_5A); -+ else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_HI) -+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_3_0A); -+ else /* Charge through accessory */ -+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB); -+} -+ -+static irqreturn_t tusb320_state_update_handler(struct tusb320_priv *priv, -+ bool force_update) -+{ -+ unsigned int reg; -+ -+ if (regmap_read(priv->regmap, TUSB320_REG9, ®)) { -+ dev_err(priv->dev, "error during i2c read!\n"); -+ return IRQ_NONE; -+ } -+ -+ if (!force_update && !(reg & TUSB320_REG9_INTERRUPT_STATUS)) -+ return IRQ_NONE; -+ -+ tusb320_extcon_irq_handler(priv, reg); -+ -+ /* -+ * Type-C support is optional. Only call the Type-C handler if a -+ * port had been registered previously. -+ */ -+ if (priv->port) -+ tusb320_typec_irq_handler(priv, reg); -+ - regmap_write(priv->regmap, TUSB320_REG9, reg); - - return IRQ_HANDLED; - } - -+static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) -+{ -+ struct tusb320_priv *priv = dev_id; -+ -+ return tusb320_state_update_handler(priv, false); -+} -+ - static const struct regmap_config tusb320_regmap_config = { - .reg_bits = 8, - .val_bits = 8, - }; - --static int tusb320_extcon_probe(struct i2c_client *client, -- const struct i2c_device_id *id) -+static int tusb320_extcon_probe(struct tusb320_priv *priv) -+{ -+ int ret; -+ -+ priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable); -+ if (IS_ERR(priv->edev)) { -+ dev_err(priv->dev, "failed to allocate extcon device\n"); -+ return PTR_ERR(priv->edev); -+ } -+ -+ ret = devm_extcon_dev_register(priv->dev, priv->edev); -+ if (ret < 0) { -+ dev_err(priv->dev, "failed to register extcon device\n"); -+ return ret; -+ } -+ -+ extcon_set_property_capability(priv->edev, EXTCON_USB, -+ EXTCON_PROP_USB_TYPEC_POLARITY); -+ extcon_set_property_capability(priv->edev, EXTCON_USB_HOST, -+ EXTCON_PROP_USB_TYPEC_POLARITY); -+ -+ return 0; -+} -+ -+static int tusb320_typec_probe(struct i2c_client *client, -+ struct tusb320_priv *priv) -+{ -+ struct fwnode_handle *connector; -+ const char *cap_str; -+ int ret; -+ -+ /* The Type-C connector is optional, for backward compatibility. */ -+ connector = device_get_named_child_node(&client->dev, "connector"); -+ if (!connector) -+ return 0; -+ -+ /* Type-C connector found. */ -+ ret = typec_get_fw_cap(&priv->cap, connector); -+ if (ret) -+ return ret; -+ -+ priv->port_type = priv->cap.type; -+ -+ /* This goes into register 0x8 field CURRENT_MODE_ADVERTISE */ -+ ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str); -+ if (ret) -+ return ret; -+ -+ ret = typec_find_pwr_opmode(cap_str); -+ if (ret < 0) -+ return ret; -+ if (ret == TYPEC_PWR_MODE_PD) -+ return -EINVAL; -+ -+ priv->pwr_opmode = ret; -+ -+ /* Initialize the hardware with the devicetree settings. */ -+ ret = tusb320_set_adv_pwr_mode(priv); -+ if (ret) -+ return ret; -+ -+ priv->cap.revision = USB_TYPEC_REV_1_1; -+ priv->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO; -+ priv->cap.accessory[1] = TYPEC_ACCESSORY_DEBUG; -+ priv->cap.orientation_aware = true; -+ priv->cap.driver_data = priv; -+ priv->cap.ops = &tusb320_typec_ops; -+ priv->cap.fwnode = connector; -+ -+ priv->port = typec_register_port(&client->dev, &priv->cap); -+ if (IS_ERR(priv->port)) -+ return PTR_ERR(priv->port); -+ -+ return 0; -+} -+ -+static int tusb320_probe(struct i2c_client *client) - { - struct tusb320_priv *priv; -+ const void *match_data; -+ unsigned int revision; - int ret; - - priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); -@@ -125,25 +448,42 @@ static int tusb320_extcon_probe(struct i2c_client *client, - if (ret) - return ret; - -- priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable); -- if (IS_ERR(priv->edev)) { -- dev_err(priv->dev, "failed to allocate extcon device\n"); -- return PTR_ERR(priv->edev); -+ match_data = device_get_match_data(&client->dev); -+ if (!match_data) -+ return -EINVAL; -+ -+ priv->ops = (struct tusb320_ops*)match_data; -+ -+ if (priv->ops->get_revision) { -+ ret = priv->ops->get_revision(priv, &revision); -+ if (ret) -+ dev_warn(priv->dev, -+ "failed to read revision register: %d\n", ret); -+ else -+ dev_info(priv->dev, "chip revision %d\n", revision); - } - -- ret = devm_extcon_dev_register(priv->dev, priv->edev); -- if (ret < 0) { -- dev_err(priv->dev, "failed to register extcon device\n"); -+ ret = tusb320_extcon_probe(priv); -+ if (ret) - return ret; -- } - -- extcon_set_property_capability(priv->edev, EXTCON_USB, -- EXTCON_PROP_USB_TYPEC_POLARITY); -- extcon_set_property_capability(priv->edev, EXTCON_USB_HOST, -- EXTCON_PROP_USB_TYPEC_POLARITY); -+ ret = tusb320_typec_probe(client, priv); -+ if (ret) -+ return ret; - - /* update initial state */ -- tusb320_irq_handler(client->irq, priv); -+ tusb320_state_update_handler(priv, true); -+ -+ /* Reset chip to its default state */ -+ ret = tusb320_reset(priv); -+ if (ret) -+ dev_warn(priv->dev, "failed to reset chip: %d\n", ret); -+ else -+ /* -+ * State and polarity might change after a reset, so update -+ * them again and make sure the interrupt status bit is cleared. -+ */ -+ tusb320_state_update_handler(priv, true); - - ret = devm_request_threaded_irq(priv->dev, client->irq, NULL, - tusb320_irq_handler, -@@ -154,13 +494,14 @@ static int tusb320_extcon_probe(struct i2c_client *client, - } - - static const struct of_device_id tusb320_extcon_dt_match[] = { -- { .compatible = "ti,tusb320", }, -+ { .compatible = "ti,tusb320", .data = &tusb320_ops, }, -+ { .compatible = "ti,tusb320l", .data = &tusb320l_ops, }, - { } - }; - MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match); - - static struct i2c_driver tusb320_extcon_driver = { -- .probe = tusb320_extcon_probe, -+ .probe_new = tusb320_probe, - .driver = { - .name = "extcon-tusb320", - .of_match_table = tusb320_extcon_dt_match, -diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c -index e7a9561a826d3..6a0d55d627ad0 100644 ---- a/drivers/extcon/extcon.c -+++ b/drivers/extcon/extcon.c -@@ -196,6 +196,14 @@ static const struct __extcon_info { - * @attr_name: "name" sysfs entry - * @attr_state: "state" sysfs entry - * @attrs: the array pointing to attr_name and attr_state for attr_g -+ * @usb_propval: the array of USB connector properties -+ * @chg_propval: the array of charger connector properties -+ * @jack_propval: the array of jack connector properties -+ * @disp_propval: the array of display connector properties -+ * @usb_bits: the bit array of the USB connector property capabilities -+ * @chg_bits: the bit array of the charger connector property capabilities -+ * @jack_bits: the bit array of the jack connector property capabilities -+ * @disp_bits: the bit array of the display connector property capabilities - */ - struct extcon_cable { - struct extcon_dev *edev; -@@ -863,6 +871,8 @@ EXPORT_SYMBOL_GPL(extcon_set_property_capability); - * @extcon_name: the extcon name provided with extcon_dev_register() - * - * Return the pointer of extcon device if success or ERR_PTR(err) if fail. -+ * NOTE: This function returns -EPROBE_DEFER so it may only be called from -+ * probe() functions. - */ - struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) - { -@@ -876,7 +886,7 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) - if (!strcmp(sd->name, extcon_name)) - goto out; - } -- sd = NULL; -+ sd = ERR_PTR(-EPROBE_DEFER); - out: - mutex_unlock(&extcon_dev_list_lock); - return sd; -@@ -1230,19 +1240,14 @@ int extcon_dev_register(struct extcon_dev *edev) - edev->dev.type = &edev->extcon_dev_type; - } - -- ret = device_register(&edev->dev); -- if (ret) { -- put_device(&edev->dev); -- goto err_dev; -- } -- - spin_lock_init(&edev->lock); -- edev->nh = devm_kcalloc(&edev->dev, edev->max_supported, -- sizeof(*edev->nh), GFP_KERNEL); -- if (!edev->nh) { -- ret = -ENOMEM; -- device_unregister(&edev->dev); -- goto err_dev; -+ if (edev->max_supported) { -+ edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh), -+ GFP_KERNEL); -+ if (!edev->nh) { -+ ret = -ENOMEM; -+ goto err_alloc_nh; -+ } - } - - for (index = 0; index < edev->max_supported; index++) -@@ -1253,6 +1258,12 @@ int extcon_dev_register(struct extcon_dev *edev) - dev_set_drvdata(&edev->dev, edev); - edev->state = 0; - -+ ret = device_register(&edev->dev); -+ if (ret) { -+ put_device(&edev->dev); -+ goto err_dev; -+ } -+ - mutex_lock(&extcon_dev_list_lock); - list_add(&edev->entry, &extcon_dev_list); - mutex_unlock(&extcon_dev_list_lock); -@@ -1260,6 +1271,9 @@ int extcon_dev_register(struct extcon_dev *edev) - return 0; - - err_dev: -+ if (edev->max_supported) -+ kfree(edev->nh); -+err_alloc_nh: - if (edev->max_supported) - kfree(edev->extcon_dev_type.groups); - err_alloc_groups: -@@ -1320,6 +1334,7 @@ void extcon_dev_unregister(struct extcon_dev *edev) - if (edev->max_supported) { - kfree(edev->extcon_dev_type.groups); - kfree(edev->cables); -+ kfree(edev->nh); - } - - put_device(&edev->dev); -diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c -index 54be88167c60b..f3b3953cac834 100644 ---- a/drivers/firewire/core-card.c -+++ b/drivers/firewire/core-card.c -@@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release); - void fw_core_remove_card(struct fw_card *card) - { - struct fw_card_driver dummy_driver = dummy_driver_template; -+ unsigned long flags; - - card->driver->update_phy_reg(card, 4, - PHY_LINK_ACTIVE | PHY_CONTENDER, 0); -@@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card) - dummy_driver.stop_iso = card->driver->stop_iso; - card->driver = &dummy_driver; - -+ spin_lock_irqsave(&card->lock, flags); - fw_destroy_nodes(card); -+ spin_unlock_irqrestore(&card->lock, flags); - - /* Wait for all users, especially device workqueue jobs, to finish. */ - fw_card_put(card); -diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c -index fb6c651214f32..16ea847ade5fd 100644 ---- a/drivers/firewire/core-cdev.c -+++ b/drivers/firewire/core-cdev.c -@@ -818,8 +818,10 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg) - - r = container_of(resource, struct inbound_transaction_resource, - resource); -- if (is_fcp_request(r->request)) -+ if (is_fcp_request(r->request)) { -+ kfree(r->data); - goto out; -+ } - - if (a->length != fw_get_response_length(r->request)) { - ret = -EINVAL; -@@ -1480,6 +1482,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, - { - struct outbound_phy_packet_event *e = - container_of(packet, struct outbound_phy_packet_event, p); -+ struct client *e_client; - - switch (status) { - /* expected: */ -@@ -1496,9 +1499,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, - } - e->phy_packet.data[0] = packet->timestamp; - -+ e_client = e->client; - queue_event(e->client, &e->event, &e->phy_packet, - sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); -- client_put(e->client); -+ client_put(e_client); - } - - static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) -diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c -index b63d55f5ebd33..f40c815343812 100644 ---- a/drivers/firewire/core-topology.c -+++ b/drivers/firewire/core-topology.c -@@ -375,16 +375,13 @@ static void report_found_node(struct fw_card *card, - card->bm_retries = 0; - } - -+/* Must be called with card->lock held */ - void fw_destroy_nodes(struct fw_card *card) - { -- unsigned long flags; -- -- spin_lock_irqsave(&card->lock, flags); - card->color++; - if (card->local_node != NULL) - for_each_fw_node(card, card->local_node, report_lost_node); - card->local_node = NULL; -- spin_unlock_irqrestore(&card->lock, flags); - } - - static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) -@@ -510,6 +507,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, - struct fw_node *local_node; - unsigned long flags; - -+ spin_lock_irqsave(&card->lock, flags); -+ - /* - * If the selfID buffer is not the immediate successor of the - * previously processed one, we cannot reliably compare the -@@ -521,8 +520,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, - card->bm_retries = 0; - } - -- spin_lock_irqsave(&card->lock, flags); -- - card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; - card->node_id = node_id; - /* -diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c -index ac487c96bb717..6c20815cc8d16 100644 ---- a/drivers/firewire/core-transaction.c -+++ b/drivers/firewire/core-transaction.c -@@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t) - static int close_transaction(struct fw_transaction *transaction, - struct fw_card *card, int rcode) - { -- struct fw_transaction *t; -+ struct fw_transaction *t = NULL, *iter; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); -- list_for_each_entry(t, &card->transaction_list, link) { -- if (t == transaction) { -- if (!try_cancel_split_timeout(t)) { -+ list_for_each_entry(iter, &card->transaction_list, link) { -+ if (iter == transaction) { -+ if (!try_cancel_split_timeout(iter)) { - spin_unlock_irqrestore(&card->lock, flags); - goto timed_out; - } -- list_del_init(&t->link); -- card->tlabel_mask &= ~(1ULL << t->tlabel); -+ list_del_init(&iter->link); -+ card->tlabel_mask &= ~(1ULL << iter->tlabel); -+ t = iter; - break; - } - } - spin_unlock_irqrestore(&card->lock, flags); - -- if (&t->link != &card->transaction_list) { -+ if (t) { - t->callback(card, rcode, NULL, 0, t->callback_data); - return 0; - } -@@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request); - - void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) - { -- struct fw_transaction *t; -+ struct fw_transaction *t = NULL, *iter; - unsigned long flags; - u32 *data; - size_t data_length; -@@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) - rcode = HEADER_GET_RCODE(p->header[1]); - - spin_lock_irqsave(&card->lock, flags); -- list_for_each_entry(t, &card->transaction_list, link) { -- if (t->node_id == source && t->tlabel == tlabel) { -- if (!try_cancel_split_timeout(t)) { -+ list_for_each_entry(iter, &card->transaction_list, link) { -+ if (iter->node_id == source && iter->tlabel == tlabel) { -+ if (!try_cancel_split_timeout(iter)) { - spin_unlock_irqrestore(&card->lock, flags); - goto timed_out; - } -- list_del_init(&t->link); -- card->tlabel_mask &= ~(1ULL << t->tlabel); -+ list_del_init(&iter->link); -+ card->tlabel_mask &= ~(1ULL << iter->tlabel); -+ t = iter; - break; - } - } - spin_unlock_irqrestore(&card->lock, flags); - -- if (&t->link == &card->transaction_list) { -+ if (!t) { - timed_out: - fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", - source, tlabel); -diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c -index 4c3fd2eed1da4..beba0a56bb9ae 100644 ---- a/drivers/firewire/net.c -+++ b/drivers/firewire/net.c -@@ -488,7 +488,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, - struct sk_buff *skb, u16 source_node_id, - bool is_broadcast, u16 ether_type) - { -- int status; -+ int status, len; - - switch (ether_type) { - case ETH_P_ARP: -@@ -542,13 +542,15 @@ static int fwnet_finish_incoming_packet(struct net_device *net, - } - skb->protocol = protocol; - } -+ -+ len = skb->len; - status = netif_rx(skb); - if (status == NET_RX_DROP) { - net->stats.rx_errors++; - net->stats.rx_dropped++; - } else { - net->stats.rx_packets++; -- net->stats.rx_bytes += skb->len; -+ net->stats.rx_bytes += len; - } - - return 0; -diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c -index 4d5054211550b..2ceed9287435f 100644 ---- a/drivers/firewire/sbp2.c -+++ b/drivers/firewire/sbp2.c -@@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, - void *payload, size_t length, void *callback_data) - { - struct sbp2_logical_unit *lu = callback_data; -- struct sbp2_orb *orb; -+ struct sbp2_orb *orb = NULL, *iter; - struct sbp2_status status; - unsigned long flags; - -@@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, - - /* Lookup the orb corresponding to this status write. */ - spin_lock_irqsave(&lu->tgt->lock, flags); -- list_for_each_entry(orb, &lu->orb_list, link) { -+ list_for_each_entry(iter, &lu->orb_list, link) { - if (STATUS_GET_ORB_HIGH(status) == 0 && -- STATUS_GET_ORB_LOW(status) == orb->request_bus) { -- orb->rcode = RCODE_COMPLETE; -- list_del(&orb->link); -+ STATUS_GET_ORB_LOW(status) == iter->request_bus) { -+ iter->rcode = RCODE_COMPLETE; -+ list_del(&iter->link); -+ orb = iter; - break; - } - } - spin_unlock_irqrestore(&lu->tgt->lock, flags); - -- if (&orb->link != &lu->orb_list) { -+ if (orb) { - orb->callback(orb, &status); - kref_put(&orb->kref, free_orb); /* orb callback reference */ - } else { -diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig -index cda7d7162cbbd..97ce31e667fca 100644 ---- a/drivers/firmware/Kconfig -+++ b/drivers/firmware/Kconfig -@@ -40,6 +40,7 @@ config ARM_SCPI_POWER_DOMAIN - config ARM_SDE_INTERFACE - bool "ARM Software Delegated Exception Interface (SDEI)" - depends on ARM64 -+ depends on ACPI_APEI_GHES - help - The Software Delegated Exception Interface (SDEI) is an ARM - standard for registering callbacks from the platform firmware -diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c -index 641a918190880..edef31c413123 100644 ---- a/drivers/firmware/arm_ffa/bus.c -+++ b/drivers/firmware/arm_ffa/bus.c -@@ -15,6 +15,8 @@ - - #include "common.h" - -+static DEFINE_IDA(ffa_bus_id); -+ - static int ffa_device_match(struct device *dev, struct device_driver *drv) - { - const struct ffa_device_id *id_table; -@@ -53,7 +55,8 @@ static void ffa_device_remove(struct device *dev) - { - struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); - -- ffa_drv->remove(to_ffa_dev(dev)); -+ if (ffa_drv->remove) -+ ffa_drv->remove(to_ffa_dev(dev)); - } - - static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env) -@@ -130,6 +133,7 @@ static void ffa_release_device(struct device *dev) - { - struct ffa_device *ffa_dev = to_ffa_dev(dev); - -+ ida_free(&ffa_bus_id, ffa_dev->id); - kfree(ffa_dev); - } - -@@ -169,18 +173,24 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) - - struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id) - { -- int ret; -+ int id, ret; - struct device *dev; - struct ffa_device *ffa_dev; - -+ id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL); -+ if (id < 0) -+ return NULL; -+ - ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL); -- if (!ffa_dev) -+ if (!ffa_dev) { -+ ida_free(&ffa_bus_id, id); - return NULL; -+ } - - dev = &ffa_dev->dev; - dev->bus = &ffa_bus_type; - dev->release = ffa_release_device; -- dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id); -+ dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id); - - ffa_dev->vm_id = vm_id; - uuid_copy(&ffa_dev->uuid, uuid); -@@ -215,4 +225,5 @@ void arm_ffa_bus_exit(void) - { - ffa_devices_unregister(); - bus_unregister(&ffa_bus_type); -+ ida_destroy(&ffa_bus_id); - } -diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c -index c9fb56afbcb49..e4fb0c1ae4869 100644 ---- a/drivers/firmware/arm_ffa/driver.c -+++ b/drivers/firmware/arm_ffa/driver.c -@@ -451,12 +451,18 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, - ep_mem_access->receiver = args->attrs[idx].receiver; - ep_mem_access->attrs = args->attrs[idx].attrs; - ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs); -+ ep_mem_access->flag = 0; -+ ep_mem_access->reserved = 0; - } -+ mem_region->handle = 0; -+ mem_region->reserved_0 = 0; -+ mem_region->reserved_1 = 0; - mem_region->ep_count = args->nattrs; - - composite = buffer + COMPOSITE_OFFSET(args->nattrs); - composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); - composite->addr_range_cnt = num_entries; -+ composite->reserved = 0; - - length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries); - frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0); -@@ -491,6 +497,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, - - constituents->address = sg_phys(args->sg); - constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE; -+ constituents->reserved = 0; - constituents++; - frag_len += sizeof(struct ffa_mem_region_addr_range); - } while ((args->sg = sg_next(args->sg))); -@@ -556,7 +563,7 @@ static int ffa_partition_info_get(const char *uuid_str, - return -ENODEV; - } - -- count = ffa_partition_probe(&uuid_null, &pbuf); -+ count = ffa_partition_probe(&uuid, &pbuf); - if (count <= 0) - return -ENOENT; - -@@ -645,8 +652,6 @@ static void ffa_setup_partitions(void) - __func__, tpbuf->id); - continue; - } -- -- ffa_dev_set_drvdata(ffa_dev, drv_info); - } - kfree(pbuf); - } -diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c -index de416f9e79213..3fe172c03c247 100644 ---- a/drivers/firmware/arm_scmi/base.c -+++ b/drivers/firmware/arm_scmi/base.c -@@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes { - __le16 reserved; - }; - -+struct scmi_msg_resp_base_discover_agent { -+ __le32 agent_id; -+ u8 name[SCMI_MAX_STR_SIZE]; -+}; -+ -+ - struct scmi_msg_base_error_notify { - __le32 event_control; - #define BASE_TP_NOTIFY_ALL BIT(0) -@@ -191,7 +197,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, - break; - - loop_num_ret = le32_to_cpu(*num_ret); -- if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) { -+ if (loop_num_ret > MAX_PROTOCOLS_IMP - tot_num_ret) { - dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP"); - break; - } -@@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph, - int id, char *name) - { - int ret; -+ struct scmi_msg_resp_base_discover_agent *agent_info; - struct scmi_xfer *t; - - ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT, -- sizeof(__le32), SCMI_MAX_STR_SIZE, &t); -+ sizeof(__le32), sizeof(*agent_info), &t); - if (ret) - return ret; - - put_unaligned_le32(id, t->tx.buf); - - ret = ph->xops->do_xfer(ph, t); -- if (!ret) -- strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE); -+ if (!ret) { -+ agent_info = t->rx.buf; -+ strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE); -+ } - - ph->xops->xfer_put(ph, t); - -diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c -index f6fe723ab869e..7c1c0951e562d 100644 ---- a/drivers/firmware/arm_scmi/bus.c -+++ b/drivers/firmware/arm_scmi/bus.c -@@ -216,9 +216,20 @@ void scmi_device_destroy(struct scmi_device *scmi_dev) - device_unregister(&scmi_dev->dev); - } - -+void scmi_device_link_add(struct device *consumer, struct device *supplier) -+{ -+ struct device_link *link; -+ -+ link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER); -+ -+ WARN_ON(!link); -+} -+ - void scmi_set_handle(struct scmi_device *scmi_dev) - { - scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); -+ if (scmi_dev->handle) -+ scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); - } - - int scmi_protocol_register(const struct scmi_protocol *proto) -diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c -index 35b56c8ba0c0e..e76194a60edf9 100644 ---- a/drivers/firmware/arm_scmi/clock.c -+++ b/drivers/firmware/arm_scmi/clock.c -@@ -204,7 +204,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, - - if (rate_discrete && rate) { - clk->list.num_rates = tot_rate_cnt; -- sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); -+ sort(clk->list.rates, tot_rate_cnt, sizeof(*rate), -+ rate_cmp_func, NULL); - } - - clk->rate_discrete = rate_discrete; -@@ -314,9 +315,13 @@ static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) - static const struct scmi_clock_info * - scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id) - { -+ struct scmi_clock_info *clk; - struct clock_info *ci = ph->get_priv(ph); -- struct scmi_clock_info *clk = ci->clk + clk_id; - -+ if (clk_id >= ci->num_clocks) -+ return NULL; -+ -+ clk = ci->clk + clk_id; - if (!clk->name[0]) - return NULL; - -diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h -index dea1bfbe10527..b9f5829c0c4dd 100644 ---- a/drivers/firmware/arm_scmi/common.h -+++ b/drivers/firmware/arm_scmi/common.h -@@ -272,6 +272,7 @@ struct scmi_xfer_ops { - struct scmi_revision_info * - scmi_revision_area_get(const struct scmi_protocol_handle *ph); - int scmi_handle_put(const struct scmi_handle *handle); -+void scmi_device_link_add(struct device *consumer, struct device *supplier); - struct scmi_handle *scmi_handle_get(struct device *dev); - void scmi_set_handle(struct scmi_device *scmi_dev); - void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, -diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c -index b406b3f78f467..7ccda7d720a19 100644 ---- a/drivers/firmware/arm_scmi/driver.c -+++ b/drivers/firmware/arm_scmi/driver.c -@@ -652,7 +652,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, - - xfer = scmi_xfer_command_acquire(cinfo, msg_hdr); - if (IS_ERR(xfer)) { -- scmi_clear_channel(info, cinfo); -+ if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP) -+ scmi_clear_channel(info, cinfo); - return; - } - -@@ -782,6 +783,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph, - xfer->hdr.protocol_id, xfer->hdr.seq, - xfer->hdr.poll_completion); - -+ /* Clear any stale status */ -+ xfer->hdr.status = SCMI_SUCCESS; - xfer->state = SCMI_XFER_SENT_OK; - /* - * Even though spinlocking is not needed here since no race is possible -@@ -1460,7 +1463,7 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo) - return ret; - - ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); -- if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE)) -+ if (!ret && !idr_is_empty(&sinfo->rx_idr)) - ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); - - return ret; -@@ -1515,8 +1518,12 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) - { - int ret = scmi_chan_setup(info, dev, prot_id, true); - -- if (!ret) /* Rx is optional, hence no error check */ -- scmi_chan_setup(info, dev, prot_id, false); -+ if (!ret) { -+ /* Rx is optional, report only memory errors */ -+ ret = scmi_chan_setup(info, dev, prot_id, false); -+ if (ret && ret != -ENOMEM) -+ ret = 0; -+ } - - return ret; - } -@@ -1726,10 +1733,16 @@ int scmi_protocol_device_request(const struct scmi_device_id *id_table) - sdev = scmi_get_protocol_device(child, info, - id_table->protocol_id, - id_table->name); -- /* Set handle if not already set: device existed */ -- if (sdev && !sdev->handle) -- sdev->handle = -- scmi_handle_get_from_info_unlocked(info); -+ if (sdev) { -+ /* Set handle if not already set: device existed */ -+ if (!sdev->handle) -+ sdev->handle = -+ scmi_handle_get_from_info_unlocked(info); -+ /* Relink consumer and suppliers */ -+ if (sdev->handle) -+ scmi_device_link_add(&sdev->dev, -+ sdev->handle->dev); -+ } - } else { - dev_err(info->dev, - "Failed. SCMI protocol %d not active.\n", -@@ -1915,20 +1928,17 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) - - static int scmi_remove(struct platform_device *pdev) - { -- int ret = 0, id; -+ int ret, id; - struct scmi_info *info = platform_get_drvdata(pdev); - struct device_node *child; - - mutex_lock(&scmi_list_mutex); - if (info->users) -- ret = -EBUSY; -- else -- list_del(&info->node); -+ dev_warn(&pdev->dev, -+ "Still active SCMI users will be forcibly unbound.\n"); -+ list_del(&info->node); - mutex_unlock(&scmi_list_mutex); - -- if (ret) -- return ret; -- - scmi_notification_exit(&info->handle); - - mutex_lock(&info->protocols_mtx); -@@ -1940,7 +1950,11 @@ static int scmi_remove(struct platform_device *pdev) - idr_destroy(&info->active_protocols); - - /* Safe to free channels since no more users */ -- return scmi_cleanup_txrx_channels(info); -+ ret = scmi_cleanup_txrx_channels(info); -+ if (ret) -+ dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n"); -+ -+ return 0; - } - - static ssize_t protocol_version_show(struct device *dev, -@@ -2008,6 +2022,7 @@ MODULE_DEVICE_TABLE(of, scmi_of_match); - static struct platform_driver scmi_driver = { - .driver = { - .name = "arm-scmi", -+ .suppress_bind_attrs = true, - .of_match_table = scmi_of_match, - .dev_groups = versions_groups, - }, -@@ -2112,7 +2127,7 @@ static void __exit scmi_driver_exit(void) - } - module_exit(scmi_driver_exit); - --MODULE_ALIAS("platform: arm-scmi"); -+MODULE_ALIAS("platform:arm-scmi"); - MODULE_AUTHOR("Sudeep Holla "); - MODULE_DESCRIPTION("ARM SCMI protocol driver"); - MODULE_LICENSE("GPL v2"); -diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c -index e09eb12bf4219..d1400de17eca7 100644 ---- a/drivers/firmware/arm_scmi/mailbox.c -+++ b/drivers/firmware/arm_scmi/mailbox.c -@@ -52,6 +52,39 @@ static bool mailbox_chan_available(struct device *dev, int idx) - "#mbox-cells", idx, NULL); - } - -+static int mailbox_chan_validate(struct device *cdev) -+{ -+ int num_mb, num_sh, ret = 0; -+ struct device_node *np = cdev->of_node; -+ -+ num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); -+ num_sh = of_count_phandle_with_args(np, "shmem", NULL); -+ /* Bail out if mboxes and shmem descriptors are inconsistent */ -+ if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) { -+ dev_warn(cdev, "Invalid channel descriptor for '%s'\n", -+ of_node_full_name(np)); -+ return -EINVAL; -+ } -+ -+ if (num_sh > 1) { -+ struct device_node *np_tx, *np_rx; -+ -+ np_tx = of_parse_phandle(np, "shmem", 0); -+ np_rx = of_parse_phandle(np, "shmem", 1); -+ /* SCMI Tx and Rx shared mem areas have to be distinct */ -+ if (!np_tx || !np_rx || np_tx == np_rx) { -+ dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", -+ of_node_full_name(np)); -+ ret = -EINVAL; -+ } -+ -+ of_node_put(np_tx); -+ of_node_put(np_rx); -+ } -+ -+ return ret; -+} -+ - static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, - bool tx) - { -@@ -64,13 +97,19 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, - resource_size_t size; - struct resource res; - -+ ret = mailbox_chan_validate(cdev); -+ if (ret) -+ return ret; -+ - smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); - if (!smbox) - return -ENOMEM; - - shmem = of_parse_phandle(cdev->of_node, "shmem", idx); -- if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) -+ if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) { -+ of_node_put(shmem); - return -ENXIO; -+ } - - ret = of_address_to_resource(shmem, 0, &res); - of_node_put(shmem); -diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c -index 9bf2478ec6d17..e80a782058458 100644 ---- a/drivers/firmware/arm_scmi/reset.c -+++ b/drivers/firmware/arm_scmi/reset.c -@@ -152,9 +152,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain, - struct scmi_xfer *t; - struct scmi_msg_reset_domain_reset *dom; - struct scmi_reset_info *pi = ph->get_priv(ph); -- struct reset_dom_info *rdom = pi->dom_info + domain; -+ struct reset_dom_info *rdom; - -- if (rdom->async_reset) -+ if (domain >= pi->num_domains) -+ return -EINVAL; -+ -+ rdom = pi->dom_info + domain; -+ if (rdom->async_reset && flags & AUTONOMOUS_RESET) - flags |= ASYNCHRONOUS_RESET; - - ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t); -@@ -166,7 +170,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain, - dom->flags = cpu_to_le32(flags); - dom->reset_state = cpu_to_le32(state); - -- if (rdom->async_reset) -+ if (flags & ASYNCHRONOUS_RESET) - ret = ph->xops->do_xfer_with_response(ph, t); - else - ret = ph->xops->do_xfer(ph, t); -diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c -index 4371fdcd5a73f..0e05a79de82d8 100644 ---- a/drivers/firmware/arm_scmi/scmi_pm_domain.c -+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c -@@ -8,7 +8,6 @@ - #include - #include - #include --#include - #include - #include - -@@ -53,27 +52,6 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain) - return scmi_pd_power(domain, false); - } - --static int scmi_pd_attach_dev(struct generic_pm_domain *pd, struct device *dev) --{ -- int ret; -- -- ret = pm_clk_create(dev); -- if (ret) -- return ret; -- -- ret = of_pm_clk_add_clks(dev); -- if (ret >= 0) -- return 0; -- -- pm_clk_destroy(dev); -- return ret; --} -- --static void scmi_pd_detach_dev(struct generic_pm_domain *pd, struct device *dev) --{ -- pm_clk_destroy(dev); --} -- - static int scmi_pm_domain_probe(struct scmi_device *sdev) - { - int num_domains, i; -@@ -124,10 +102,6 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) - scmi_pd->genpd.name = scmi_pd->name; - scmi_pd->genpd.power_off = scmi_pd_power_off; - scmi_pd->genpd.power_on = scmi_pd_power_on; -- scmi_pd->genpd.attach_dev = scmi_pd_attach_dev; -- scmi_pd->genpd.detach_dev = scmi_pd_detach_dev; -- scmi_pd->genpd.flags = GENPD_FLAG_PM_CLK | -- GENPD_FLAG_ACTIVE_WAKEUP; - - pm_genpd_init(&scmi_pd->genpd, NULL, - state == SCMI_POWER_STATE_GENERIC_OFF); -@@ -138,9 +112,26 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) - scmi_pd_data->domains = domains; - scmi_pd_data->num_domains = num_domains; - -- of_genpd_add_provider_onecell(np, scmi_pd_data); -+ dev_set_drvdata(dev, scmi_pd_data); - -- return 0; -+ return of_genpd_add_provider_onecell(np, scmi_pd_data); -+} -+ -+static void scmi_pm_domain_remove(struct scmi_device *sdev) -+{ -+ int i; -+ struct genpd_onecell_data *scmi_pd_data; -+ struct device *dev = &sdev->dev; -+ struct device_node *np = dev->of_node; -+ -+ of_genpd_del_provider(np); -+ -+ scmi_pd_data = dev_get_drvdata(dev); -+ for (i = 0; i < scmi_pd_data->num_domains; i++) { -+ if (!scmi_pd_data->domains[i]) -+ continue; -+ pm_genpd_remove(scmi_pd_data->domains[i]); -+ } - } - - static const struct scmi_device_id scmi_id_table[] = { -@@ -152,6 +143,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table); - static struct scmi_driver scmi_power_domain_driver = { - .name = "scmi-power-domain", - .probe = scmi_pm_domain_probe, -+ .remove = scmi_pm_domain_remove, - .id_table = scmi_id_table, - }; - module_scmi_driver(scmi_power_domain_driver); -diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c -index 308471586381f..1ed66d13c06c4 100644 ---- a/drivers/firmware/arm_scmi/sensors.c -+++ b/drivers/firmware/arm_scmi/sensors.c -@@ -631,16 +631,19 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph, - { - int ret; - struct scmi_xfer *t; -+ struct sensors_info *si = ph->get_priv(ph); -+ -+ if (sensor_id >= si->num_sensors) -+ return -EINVAL; - - ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET, - sizeof(__le32), sizeof(__le32), &t); - if (ret) - return ret; - -- put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf); -+ put_unaligned_le32(sensor_id, t->tx.buf); - ret = ph->xops->do_xfer(ph, t); - if (!ret) { -- struct sensors_info *si = ph->get_priv(ph); - struct scmi_sensor_info *s = si->sensors + sensor_id; - - *sensor_config = get_unaligned_le64(t->rx.buf); -@@ -657,6 +660,10 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph, - int ret; - struct scmi_xfer *t; - struct scmi_msg_sensor_config_set *msg; -+ struct sensors_info *si = ph->get_priv(ph); -+ -+ if (sensor_id >= si->num_sensors) -+ return -EINVAL; - - ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET, - sizeof(*msg), 0, &t); -@@ -669,7 +676,6 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph, - - ret = ph->xops->do_xfer(ph, t); - if (!ret) { -- struct sensors_info *si = ph->get_priv(ph); - struct scmi_sensor_info *s = si->sensors + sensor_id; - - s->sensor_config = sensor_config; -@@ -700,8 +706,11 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, - int ret; - struct scmi_xfer *t; - struct scmi_msg_sensor_reading_get *sensor; -+ struct scmi_sensor_info *s; - struct sensors_info *si = ph->get_priv(ph); -- struct scmi_sensor_info *s = si->sensors + sensor_id; -+ -+ if (sensor_id >= si->num_sensors) -+ return -EINVAL; - - ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET, - sizeof(*sensor), 0, &t); -@@ -710,6 +719,7 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, - - sensor = t->tx.buf; - sensor->id = cpu_to_le32(sensor_id); -+ s = si->sensors + sensor_id; - if (s->async) { - sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC); - ret = ph->xops->do_xfer_with_response(ph, t); -@@ -764,9 +774,13 @@ scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph, - int ret; - struct scmi_xfer *t; - struct scmi_msg_sensor_reading_get *sensor; -+ struct scmi_sensor_info *s; - struct sensors_info *si = ph->get_priv(ph); -- struct scmi_sensor_info *s = si->sensors + sensor_id; - -+ if (sensor_id >= si->num_sensors) -+ return -EINVAL; -+ -+ s = si->sensors + sensor_id; - if (!count || !readings || - (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis)) - return -EINVAL; -@@ -817,6 +831,9 @@ scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id) - { - struct sensors_info *si = ph->get_priv(ph); - -+ if (sensor_id >= si->num_sensors) -+ return NULL; -+ - return si->sensors + sensor_id; - } - -diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c -index 0e3eaea5d8526..56a1f61aa3ff2 100644 ---- a/drivers/firmware/arm_scmi/shmem.c -+++ b/drivers/firmware/arm_scmi/shmem.c -@@ -58,10 +58,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem) - void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer) - { -+ size_t len = ioread32(&shmem->length); -+ - xfer->hdr.status = ioread32(shmem->msg_payload); - /* Skip the length of header and status in shmem area i.e 8 bytes */ -- xfer->rx.len = min_t(size_t, xfer->rx.len, -- ioread32(&shmem->length) - 8); -+ xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0); - - /* Take a copy to the rx buffer.. */ - memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); -@@ -70,8 +71,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, - void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, - size_t max_len, struct scmi_xfer *xfer) - { -+ size_t len = ioread32(&shmem->length); -+ - /* Skip only the length of header in shmem area i.e 4 bytes */ -- xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4); -+ xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0); - - /* Take a copy to the rx buffer.. */ - memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len); -diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c -index 4effecc3bb463..ea1caf70e8df9 100644 ---- a/drivers/firmware/arm_scmi/smc.c -+++ b/drivers/firmware/arm_scmi/smc.c -@@ -76,8 +76,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, - return -ENOMEM; - - np = of_parse_phandle(cdev->of_node, "shmem", 0); -- if (!of_device_is_compatible(np, "arm,scmi-shmem")) -+ if (!of_device_is_compatible(np, "arm,scmi-shmem")) { -+ of_node_put(np); - return -ENXIO; -+ } - - ret = of_address_to_resource(np, 0, &res); - of_node_put(np); -diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c -index 11e8efb713751..0c351eeee7463 100644 ---- a/drivers/firmware/arm_scmi/virtio.c -+++ b/drivers/firmware/arm_scmi/virtio.c -@@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) - } - - static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, -- struct scmi_vio_msg *msg) -+ struct scmi_vio_msg *msg, -+ struct device *dev) - { - struct scatterlist sg_in; - int rc; -@@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, - - rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); - if (rc) -- dev_err_once(vioch->cinfo->dev, -- "failed to add to virtqueue (%d)\n", rc); -+ dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc); - else - virtqueue_kick(vioch->vqueue); - -@@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg) - { - if (vioch->is_rx) { -- scmi_vio_feed_vq_rx(vioch, msg); -+ scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev); - } else { - /* Here IRQs are assumed to be already disabled by the caller */ - spin_lock(&vioch->lock); -@@ -247,19 +247,19 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, - for (i = 0; i < vioch->max_msg; i++) { - struct scmi_vio_msg *msg; - -- msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL); -+ msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - - if (tx) { -- msg->request = devm_kzalloc(cinfo->dev, -+ msg->request = devm_kzalloc(dev, - VIRTIO_SCMI_MAX_PDU_SIZE, - GFP_KERNEL); - if (!msg->request) - return -ENOMEM; - } - -- msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, -+ msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, - GFP_KERNEL); - if (!msg->input) - return -ENOMEM; -@@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, - list_add_tail(&msg->list, &vioch->free_list); - spin_unlock_irqrestore(&vioch->lock, flags); - } else { -- scmi_vio_feed_vq_rx(vioch, msg); -+ scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev); - } - } - -diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c -index a5048956a0be9..ac08e819088bb 100644 ---- a/drivers/firmware/arm_scmi/voltage.c -+++ b/drivers/firmware/arm_scmi/voltage.c -@@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, - int cnt; - - cmd->domain_id = cpu_to_le32(v->id); -- cmd->level_index = desc_index; -+ cmd->level_index = cpu_to_le32(desc_index); - ret = ph->xops->do_xfer(ph, tl); - if (ret) - break; -diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c -index ddf0b9ff9e15c..435d0e2658a42 100644 ---- a/drivers/firmware/arm_scpi.c -+++ b/drivers/firmware/arm_scpi.c -@@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info) - info->firmware_version = le32_to_cpu(caps.platform_version); - } - /* Ignore error if not implemented */ -- if (scpi_info->is_legacy && ret == -EOPNOTSUPP) -+ if (info->is_legacy && ret == -EOPNOTSUPP) - return 0; - - return ret; -@@ -913,13 +913,14 @@ static int scpi_probe(struct platform_device *pdev) - struct resource res; - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; -+ struct scpi_drvinfo *scpi_drvinfo; - -- scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); -- if (!scpi_info) -+ scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL); -+ if (!scpi_drvinfo) - return -ENOMEM; - - if (of_match_device(legacy_scpi_of_match, &pdev->dev)) -- scpi_info->is_legacy = true; -+ scpi_drvinfo->is_legacy = true; - - count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); - if (count < 0) { -@@ -927,19 +928,19 @@ static int scpi_probe(struct platform_device *pdev) - return -ENODEV; - } - -- scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), -- GFP_KERNEL); -- if (!scpi_info->channels) -+ scpi_drvinfo->channels = -+ devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL); -+ if (!scpi_drvinfo->channels) - return -ENOMEM; - -- ret = devm_add_action(dev, scpi_free_channels, scpi_info); -+ ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo); - if (ret) - return ret; - -- for (; scpi_info->num_chans < count; scpi_info->num_chans++) { -+ for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) { - resource_size_t size; -- int idx = scpi_info->num_chans; -- struct scpi_chan *pchan = scpi_info->channels + idx; -+ int idx = scpi_drvinfo->num_chans; -+ struct scpi_chan *pchan = scpi_drvinfo->channels + idx; - struct mbox_client *cl = &pchan->cl; - struct device_node *shmem = of_parse_phandle(np, "shmem", idx); - -@@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev) - return ret; - } - -- scpi_info->commands = scpi_std_commands; -+ scpi_drvinfo->commands = scpi_std_commands; - -- platform_set_drvdata(pdev, scpi_info); -+ platform_set_drvdata(pdev, scpi_drvinfo); - -- if (scpi_info->is_legacy) { -+ if (scpi_drvinfo->is_legacy) { - /* Replace with legacy variants */ - scpi_ops.clk_set_val = legacy_scpi_clk_set_val; -- scpi_info->commands = scpi_legacy_commands; -+ scpi_drvinfo->commands = scpi_legacy_commands; - - /* Fill priority bitmap */ - for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++) - set_bit(legacy_hpriority_cmds[idx], -- scpi_info->cmd_priority); -+ scpi_drvinfo->cmd_priority); - } - -- ret = scpi_init_versions(scpi_info); -+ scpi_info = scpi_drvinfo; -+ -+ ret = scpi_init_versions(scpi_drvinfo); - if (ret) { - dev_err(dev, "incorrect or no SCP firmware found\n"); -+ scpi_info = NULL; - return ret; - } - -- if (scpi_info->is_legacy && !scpi_info->protocol_version && -- !scpi_info->firmware_version) -+ if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version && -+ !scpi_drvinfo->firmware_version) - dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n"); - else - dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", - FIELD_GET(PROTO_REV_MAJOR_MASK, -- scpi_info->protocol_version), -+ scpi_drvinfo->protocol_version), - FIELD_GET(PROTO_REV_MINOR_MASK, -- scpi_info->protocol_version), -+ scpi_drvinfo->protocol_version), - FIELD_GET(FW_REV_MAJOR_MASK, -- scpi_info->firmware_version), -+ scpi_drvinfo->firmware_version), - FIELD_GET(FW_REV_MINOR_MASK, -- scpi_info->firmware_version), -+ scpi_drvinfo->firmware_version), - FIELD_GET(FW_REV_PATCH_MASK, -- scpi_info->firmware_version)); -- scpi_info->scpi_ops = &scpi_ops; -+ scpi_drvinfo->firmware_version)); -+ -+ scpi_drvinfo->scpi_ops = &scpi_ops; - -- return devm_of_platform_populate(dev); -+ ret = devm_of_platform_populate(dev); -+ if (ret) -+ scpi_info = NULL; -+ -+ return ret; - } - - static const struct of_device_id scpi_of_match[] = { -diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c -index a7e762c352f95..285fe7ad490d1 100644 ---- a/drivers/firmware/arm_sdei.c -+++ b/drivers/firmware/arm_sdei.c -@@ -43,6 +43,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id, - /* entry point from firmware to arch asm code */ - static unsigned long sdei_entry_point; - -+static int sdei_hp_state; -+ - struct sdei_event { - /* These three are protected by the sdei_list_lock */ - struct list_head list; -@@ -301,8 +303,6 @@ int sdei_mask_local_cpu(void) - { - int err; - -- WARN_ON_ONCE(preemptible()); -- - err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL); - if (err && err != -EIO) { - pr_warn_once("failed to mask CPU[%u]: %d\n", -@@ -315,6 +315,7 @@ int sdei_mask_local_cpu(void) - - static void _ipi_mask_cpu(void *ignored) - { -+ WARN_ON_ONCE(preemptible()); - sdei_mask_local_cpu(); - } - -@@ -322,8 +323,6 @@ int sdei_unmask_local_cpu(void) - { - int err; - -- WARN_ON_ONCE(preemptible()); -- - err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL); - if (err && err != -EIO) { - pr_warn_once("failed to unmask CPU[%u]: %d\n", -@@ -336,6 +335,7 @@ int sdei_unmask_local_cpu(void) - - static void _ipi_unmask_cpu(void *ignored) - { -+ WARN_ON_ONCE(preemptible()); - sdei_unmask_local_cpu(); - } - -@@ -343,6 +343,8 @@ static void _ipi_private_reset(void *ignored) - { - int err; - -+ WARN_ON_ONCE(preemptible()); -+ - err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0, - NULL); - if (err && err != -EIO) -@@ -389,8 +391,6 @@ static void _local_event_enable(void *data) - int err; - struct sdei_crosscall_args *arg = data; - -- WARN_ON_ONCE(preemptible()); -- - err = sdei_api_event_enable(arg->event->event_num); - - sdei_cross_call_return(arg, err); -@@ -479,8 +479,6 @@ static void _local_event_unregister(void *data) - int err; - struct sdei_crosscall_args *arg = data; - -- WARN_ON_ONCE(preemptible()); -- - err = sdei_api_event_unregister(arg->event->event_num); - - sdei_cross_call_return(arg, err); -@@ -561,8 +559,6 @@ static void _local_event_register(void *data) - struct sdei_registered_event *reg; - struct sdei_crosscall_args *arg = data; - -- WARN_ON(preemptible()); -- - reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id()); - err = sdei_api_event_register(arg->event->event_num, sdei_entry_point, - reg, 0, 0); -@@ -717,6 +713,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action, - { - int rv; - -+ WARN_ON_ONCE(preemptible()); -+ - switch (action) { - case CPU_PM_ENTER: - rv = sdei_mask_local_cpu(); -@@ -765,7 +763,7 @@ static int sdei_device_freeze(struct device *dev) - int err; - - /* unregister private events */ -- cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); -+ cpuhp_remove_state(sdei_entry_point); - - err = sdei_unregister_shared(); - if (err) -@@ -786,12 +784,15 @@ static int sdei_device_thaw(struct device *dev) - return err; - } - -- err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", -+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", - &sdei_cpuhp_up, &sdei_cpuhp_down); -- if (err) -+ if (err < 0) { - pr_warn("Failed to re-register CPU hotplug notifier...\n"); -+ return err; -+ } - -- return err; -+ sdei_hp_state = err; -+ return 0; - } - - static int sdei_device_restore(struct device *dev) -@@ -823,7 +824,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action, - * We are going to reset the interface, after this there is no point - * doing work when we take CPUs offline. - */ -- cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); -+ cpuhp_remove_state(sdei_hp_state); - - sdei_platform_reset(); - -@@ -1003,13 +1004,15 @@ static int sdei_probe(struct platform_device *pdev) - goto remove_cpupm; - } - -- err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", -+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", - &sdei_cpuhp_up, &sdei_cpuhp_down); -- if (err) { -+ if (err < 0) { - pr_warn("Failed to register CPU hotplug notifier...\n"); - goto remove_reboot; - } - -+ sdei_hp_state = err; -+ - return 0; - - remove_reboot: -@@ -1059,14 +1062,14 @@ static bool __init sdei_present_acpi(void) - return true; - } - --static int __init sdei_init(void) -+void __init sdei_init(void) - { - struct platform_device *pdev; - int ret; - - ret = platform_driver_register(&sdei_driver); - if (ret || !sdei_present_acpi()) -- return ret; -+ return; - - pdev = platform_device_register_simple(sdei_driver.driver.name, - 0, NULL, 0); -@@ -1076,17 +1079,8 @@ static int __init sdei_init(void) - pr_info("Failed to register ACPI:SDEI platform device %d\n", - ret); - } -- -- return ret; - } - --/* -- * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register -- * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised -- * by device_initcall(). We want to be called in the middle. -- */ --subsys_initcall_sync(sdei_init); -- - int sdei_event_handler(struct pt_regs *regs, - struct sdei_registered_event *arg) - { -@@ -1101,3 +1095,22 @@ int sdei_event_handler(struct pt_regs *regs, - return err; - } - NOKPROBE_SYMBOL(sdei_event_handler); -+ -+void sdei_handler_abort(void) -+{ -+ /* -+ * If the crash happened in an SDEI event handler then we need to -+ * finish the handler with the firmware so that we can have working -+ * interrupts in the crash kernel. -+ */ -+ if (__this_cpu_read(sdei_active_critical_event)) { -+ pr_warn("still in SDEI critical event context, attempting to finish handler.\n"); -+ __sdei_handler_abort(); -+ __this_cpu_write(sdei_active_critical_event, NULL); -+ } -+ if (__this_cpu_read(sdei_active_normal_event)) { -+ pr_warn("still in SDEI normal event context, attempting to finish handler.\n"); -+ __sdei_handler_abort(); -+ __this_cpu_write(sdei_active_normal_event, NULL); -+ } -+} -diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c -index 8b8127fa89553..3d57b08320df9 100644 ---- a/drivers/firmware/dmi-sysfs.c -+++ b/drivers/firmware/dmi-sysfs.c -@@ -602,16 +602,16 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh, - *ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL, - "%d-%d", dh->type, entry->instance); - -- if (*ret) { -- kfree(entry); -- return; -- } -- - /* Thread on the global list for cleanup */ - spin_lock(&entry_list_lock); - list_add_tail(&entry->list, &entry_list); - spin_unlock(&entry_list_lock); - -+ if (*ret) { -+ kobject_put(&entry->kobj); -+ return; -+ } -+ - /* Handle specializations by type */ - switch (dh->type) { - case DMI_ENTRY_SYSTEM_EVENT_LOG: -diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c -index 4c3201e290e29..ea84108035eb0 100644 ---- a/drivers/firmware/efi/apple-properties.c -+++ b/drivers/firmware/efi/apple-properties.c -@@ -24,7 +24,7 @@ static bool dump_properties __initdata; - static int __init dump_properties_enable(char *arg) - { - dump_properties = true; -- return 0; -+ return 1; - } - - __setup("dump_apple_properties", dump_properties_enable); -diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c -index 4dde8edd53b62..3e8d4b51a8140 100644 ---- a/drivers/firmware/efi/capsule-loader.c -+++ b/drivers/firmware/efi/capsule-loader.c -@@ -242,29 +242,6 @@ failed: - return ret; - } - --/** -- * efi_capsule_flush - called by file close or file flush -- * @file: file pointer -- * @id: not used -- * -- * If a capsule is being partially uploaded then calling this function -- * will be treated as upload termination and will free those completed -- * buffer pages and -ECANCELED will be returned. -- **/ --static int efi_capsule_flush(struct file *file, fl_owner_t id) --{ -- int ret = 0; -- struct capsule_info *cap_info = file->private_data; -- -- if (cap_info->index > 0) { -- pr_err("capsule upload not complete\n"); -- efi_free_all_buff_pages(cap_info); -- ret = -ECANCELED; -- } -- -- return ret; --} -- - /** - * efi_capsule_release - called by file close - * @inode: not used -@@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file) - { - struct capsule_info *cap_info = file->private_data; - -+ if (cap_info->index > 0 && -+ (cap_info->header.headersize == 0 || -+ cap_info->count < cap_info->total_size)) { -+ pr_err("capsule upload not complete\n"); -+ efi_free_all_buff_pages(cap_info); -+ } -+ - kfree(cap_info->pages); - kfree(cap_info->phys); - kfree(file->private_data); -@@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = { - .owner = THIS_MODULE, - .open = efi_capsule_open, - .write = efi_capsule_write, -- .flush = efi_capsule_flush, - .release = efi_capsule_release, - .llseek = no_llseek, - }; -diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c -index b19ce1a83f91a..b2c829e95bd14 100644 ---- a/drivers/firmware/efi/efi-init.c -+++ b/drivers/firmware/efi/efi-init.c -@@ -235,6 +235,11 @@ void __init efi_init(void) - } - - reserve_regions(); -+ /* -+ * For memblock manipulation, the cap should come after the memblock_add(). -+ * And now, memblock is fully populated, it is time to do capping. -+ */ -+ early_init_dt_check_for_usable_mem_range(); - efi_esrt_init(); - efi_mokvar_table_init(); - -diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c -index 0ef086e43090b..7e771c56c13c6 100644 ---- a/drivers/firmware/efi/efi-pstore.c -+++ b/drivers/firmware/efi/efi-pstore.c -@@ -266,7 +266,7 @@ static int efi_pstore_write(struct pstore_record *record) - efi_name[i] = name[i]; - - ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES, -- preemptible(), record->size, record->psi->buf); -+ false, record->size, record->psi->buf); - - if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE)) - if (!schedule_work(&efivar_work)) -diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c -index 847f33ffc4aed..332739f3eded5 100644 ---- a/drivers/firmware/efi/efi.c -+++ b/drivers/firmware/efi/efi.c -@@ -209,7 +209,7 @@ static int __init efivar_ssdt_setup(char *str) - memcpy(efivar_ssdt, str, strlen(str)); - else - pr_warn("efivar_ssdt: name too long: %s\n", str); -- return 0; -+ return 1; - } - __setup("efivar_ssdt=", efivar_ssdt_setup); - -@@ -385,8 +385,8 @@ static int __init efisubsys_init(void) - efi_kobj = kobject_create_and_add("efi", firmware_kobj); - if (!efi_kobj) { - pr_err("efi: Firmware registration failed.\n"); -- destroy_workqueue(efi_rts_wq); -- return -ENOMEM; -+ error = -ENOMEM; -+ goto err_destroy_wq; - } - - if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | -@@ -429,7 +429,10 @@ err_unregister: - generic_ops_unregister(); - err_put: - kobject_put(efi_kobj); -- destroy_workqueue(efi_rts_wq); -+err_destroy_wq: -+ if (efi_rts_wq) -+ destroy_workqueue(efi_rts_wq); -+ - return error; - } - -@@ -590,7 +593,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, - - seed = early_memremap(efi_rng_seed, sizeof(*seed)); - if (seed != NULL) { -- size = READ_ONCE(seed->size); -+ size = min_t(u32, seed->size, SZ_1K); // sanity check - early_memunmap(seed, sizeof(*seed)); - } else { - pr_err("Could not map UEFI random seed!\n"); -@@ -599,8 +602,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, - seed = early_memremap(efi_rng_seed, - sizeof(*seed) + size); - if (seed != NULL) { -- pr_notice("seeding entropy pool\n"); - add_bootloader_randomness(seed->bits, size); -+ memzero_explicit(seed->bits, size); - early_memunmap(seed, sizeof(*seed) + size); - } else { - pr_err("Could not map UEFI random seed!\n"); -@@ -719,6 +722,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, - systab_hdr->revision >> 16, - systab_hdr->revision & 0xffff, - vendor); -+ -+ if (IS_ENABLED(CONFIG_X86_64) && -+ systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && -+ !strcmp(vendor, "Apple")) { -+ pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); -+ efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; -+ } - } - - static __initdata char memory_type_name[][13] = { -@@ -940,6 +950,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) - /* first try to find a slot in an existing linked list entry */ - for (prsv = efi_memreserve_root->next; prsv; ) { - rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); -+ if (!rsv) -+ return -ENOMEM; - index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); - if (index < rsv->size) { - rsv->entry[index].base = addr; -diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile -index d0537573501e9..2c67f71f23753 100644 ---- a/drivers/firmware/efi/libstub/Makefile -+++ b/drivers/firmware/efi/libstub/Makefile -@@ -37,6 +37,13 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \ - $(call cc-option,-fno-addrsig) \ - -D__DISABLE_EXPORTS - -+# -+# struct randomization only makes sense for Linux internal types, which the EFI -+# stub code never touches, so let's turn off struct randomization for the stub -+# altogether -+# -+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS)) -+ - # remove SCS flags from all objects in this directory - KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) - # disable LTO -diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c -index 2363fee9211c9..9cc556013d085 100644 ---- a/drivers/firmware/efi/libstub/arm64-stub.c -+++ b/drivers/firmware/efi/libstub/arm64-stub.c -@@ -119,9 +119,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, - if (image->image_base != _text) - efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); - -- if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN)) -- efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n", -- EFI_KIMG_ALIGN >> 10); -+ if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN)) -+ efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n", -+ SEGMENT_ALIGN >> 10); - - kernel_size = _edata - _text; - kernel_memsize = kernel_size + (_end - _edata); -diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c -index d489bdc645fe1..2a00eb627c3c3 100644 ---- a/drivers/firmware/efi/libstub/efi-stub-helper.c -+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c -@@ -439,8 +439,10 @@ efi_status_t efi_exit_boot_services(void *handle, - { - efi_status_t status; - -- status = efi_get_memory_map(map); -+ if (efi_disable_pci_dma) -+ efi_pci_disable_bridge_busmaster(); - -+ status = efi_get_memory_map(map); - if (status != EFI_SUCCESS) - goto fail; - -@@ -448,9 +450,6 @@ efi_status_t efi_exit_boot_services(void *handle, - if (status != EFI_SUCCESS) - goto free_map; - -- if (efi_disable_pci_dma) -- efi_pci_disable_bridge_busmaster(); -- - status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); - - if (status == EFI_INVALID_PARAMETER) { -diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h -index cde0a2ef507d9..fbffdd7290a31 100644 ---- a/drivers/firmware/efi/libstub/efistub.h -+++ b/drivers/firmware/efi/libstub/efistub.h -@@ -766,6 +766,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); - efi_status_t efi_random_alloc(unsigned long size, unsigned long align, - unsigned long *addr, unsigned long random_seed); - -+efi_status_t efi_random_get_seed(void); -+ - efi_status_t check_platform_features(void); - - void *get_efi_config_table(efi_guid_t guid); -diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c -index fe567be0f118b..804f542be3f28 100644 ---- a/drivers/firmware/efi/libstub/fdt.c -+++ b/drivers/firmware/efi/libstub/fdt.c -@@ -280,14 +280,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle, - goto fail; - } - -- /* -- * Now that we have done our final memory allocation (and free) -- * we can get the memory map key needed for exit_boot_services(). -- */ -- status = efi_get_memory_map(&map); -- if (status != EFI_SUCCESS) -- goto fail_free_new_fdt; -- - status = update_fdt((void *)fdt_addr, fdt_size, - (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr, - initrd_addr, initrd_size); -diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c -index 24aa375353724..f85d2c0668777 100644 ---- a/drivers/firmware/efi/libstub/random.c -+++ b/drivers/firmware/efi/libstub/random.c -@@ -67,22 +67,43 @@ efi_status_t efi_random_get_seed(void) - efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; - efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; - efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; -+ struct linux_efi_random_seed *prev_seed, *seed = NULL; -+ int prev_seed_size = 0, seed_size = EFI_RANDOM_SEED_SIZE; - efi_rng_protocol_t *rng = NULL; -- struct linux_efi_random_seed *seed = NULL; - efi_status_t status; - - status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); - if (status != EFI_SUCCESS) - return status; - -- status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, -- sizeof(*seed) + EFI_RANDOM_SEED_SIZE, -+ /* -+ * Check whether a seed was provided by a prior boot stage. In that -+ * case, instead of overwriting it, let's create a new buffer that can -+ * hold both, and concatenate the existing and the new seeds. -+ * Note that we should read the seed size with caution, in case the -+ * table got corrupted in memory somehow. -+ */ -+ prev_seed = get_efi_config_table(LINUX_EFI_RANDOM_SEED_TABLE_GUID); -+ if (prev_seed && prev_seed->size <= 512U) { -+ prev_seed_size = prev_seed->size; -+ seed_size += prev_seed_size; -+ } -+ -+ /* -+ * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the -+ * allocation will survive a kexec reboot (although we refresh the seed -+ * beforehand) -+ */ -+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, -+ struct_size(seed, bits, seed_size), - (void **)&seed); -- if (status != EFI_SUCCESS) -- return status; -+ if (status != EFI_SUCCESS) { -+ efi_warn("Failed to allocate memory for RNG seed.\n"); -+ goto err_warn; -+ } - - status = efi_call_proto(rng, get_rng, &rng_algo_raw, -- EFI_RANDOM_SEED_SIZE, seed->bits); -+ EFI_RANDOM_SEED_SIZE, seed->bits); - - if (status == EFI_UNSUPPORTED) - /* -@@ -95,14 +116,28 @@ efi_status_t efi_random_get_seed(void) - if (status != EFI_SUCCESS) - goto err_freepool; - -- seed->size = EFI_RANDOM_SEED_SIZE; -+ seed->size = seed_size; -+ if (prev_seed_size) -+ memcpy(seed->bits + EFI_RANDOM_SEED_SIZE, prev_seed->bits, -+ prev_seed_size); -+ - status = efi_bs_call(install_configuration_table, &rng_table_guid, seed); - if (status != EFI_SUCCESS) - goto err_freepool; - -+ if (prev_seed_size) { -+ /* wipe and free the old seed if we managed to install the new one */ -+ memzero_explicit(prev_seed->bits, prev_seed_size); -+ efi_bs_call(free_pool, prev_seed); -+ } - return EFI_SUCCESS; - - err_freepool: -+ memzero_explicit(seed, struct_size(seed, bits, seed_size)); - efi_bs_call(free_pool, seed); -+ efi_warn("Failed to obtain seed from EFI_RNG_PROTOCOL\n"); -+err_warn: -+ if (prev_seed) -+ efi_warn("Retaining bootloader-supplied seed only"); - return status; - } -diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c -index 380e4e2513994..9c460843442f5 100644 ---- a/drivers/firmware/efi/libstub/riscv-stub.c -+++ b/drivers/firmware/efi/libstub/riscv-stub.c -@@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long); - - static u32 hartid; - --static u32 get_boot_hartid_from_fdt(void) -+static int get_boot_hartid_from_fdt(void) - { - const void *fdt; - int chosen_node, len; -@@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void) - - fdt = get_efi_config_table(DEVICE_TREE_GUID); - if (!fdt) -- return U32_MAX; -+ return -EINVAL; - - chosen_node = fdt_path_offset(fdt, "/chosen"); - if (chosen_node < 0) -- return U32_MAX; -+ return -EINVAL; - - prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len); - if (!prop || len != sizeof(u32)) -- return U32_MAX; -+ return -EINVAL; - -- return fdt32_to_cpu(*prop); -+ hartid = fdt32_to_cpu(*prop); -+ return 0; - } - - efi_status_t check_platform_features(void) - { -- hartid = get_boot_hartid_from_fdt(); -- if (hartid == U32_MAX) { -+ int ret; -+ -+ ret = get_boot_hartid_from_fdt(); -+ if (ret) { - efi_err("/chosen/boot-hartid missing or invalid!\n"); - return EFI_UNSUPPORTED; - } -diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c -index 8a18930f3eb69..516f4f0069bd2 100644 ---- a/drivers/firmware/efi/libstub/secureboot.c -+++ b/drivers/firmware/efi/libstub/secureboot.c -@@ -14,7 +14,7 @@ - - /* SHIM variables */ - static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; --static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; -+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT"; - - static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr, - unsigned long *data_size, void *data) -@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void) - - /* - * See if a user has put the shim into insecure mode. If so, and if the -- * variable doesn't have the runtime attribute set, we might as well -- * honor that. -+ * variable doesn't have the non-volatile attribute set, we might as -+ * well honor that. - */ - size = sizeof(moksbstate); - status = get_efi_var(shim_MokSBState_name, &shim_guid, -@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void) - /* If it fails, we don't care why. Default to secure */ - if (status != EFI_SUCCESS) - goto secure_boot_enabled; -- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1) -+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1) - return efi_secureboot_mode_disabled; - - secure_boot_enabled: -diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c -index f14c4ff5839f9..1cb4466e3c108 100644 ---- a/drivers/firmware/efi/libstub/x86-stub.c -+++ b/drivers/firmware/efi/libstub/x86-stub.c -@@ -60,7 +60,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) - rom->data.type = SETUP_PCI; - rom->data.len = size - sizeof(struct setup_data); - rom->data.next = 0; -- rom->pcilen = pci->romsize; -+ rom->pcilen = romsize; - *__rom = rom; - - status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, -@@ -414,6 +414,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, - hdr->ramdisk_image = 0; - hdr->ramdisk_size = 0; - -+ /* -+ * Disregard any setup data that was provided by the bootloader: -+ * setup_data could be pointing anywhere, and we have no way of -+ * authenticating or validating the payload. -+ */ -+ hdr->setup_data = 0; -+ - efi_stub_entry(handle, sys_table_arg, boot_params); - /* not reached */ - -diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c -index 0a9aba5f9ceff..f178b2984dfb2 100644 ---- a/drivers/firmware/efi/memattr.c -+++ b/drivers/firmware/efi/memattr.c -@@ -33,7 +33,7 @@ int __init efi_memattr_init(void) - return -ENOMEM; - } - -- if (tbl->version > 1) { -+ if (tbl->version > 2) { - pr_warn("Unexpected EFI Memory Attributes table version %d\n", - tbl->version); - goto unmap; -diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c -index f3e54f6616f02..60075e0e4943a 100644 ---- a/drivers/firmware/efi/runtime-wrappers.c -+++ b/drivers/firmware/efi/runtime-wrappers.c -@@ -62,6 +62,7 @@ struct efi_runtime_work efi_rts_work; - \ - if (!efi_enabled(EFI_RUNTIME_SERVICES)) { \ - pr_warn_once("EFI Runtime Services are disabled!\n"); \ -+ efi_rts_work.status = EFI_DEVICE_ERROR; \ - goto exit; \ - } \ - \ -diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c -index 4c7c9dd7733f9..24d6f6e08df8b 100644 ---- a/drivers/firmware/efi/sysfb_efi.c -+++ b/drivers/firmware/efi/sysfb_efi.c -@@ -266,6 +266,22 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = { - "Lenovo ideapad D330-10IGM"), - }, - }, -+ { -+ /* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */ -+ .matches = { -+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, -+ "IdeaPad Duet 3 10IGL5"), -+ }, -+ }, -+ { -+ /* Lenovo Yoga Book X91F / X91L */ -+ .matches = { -+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), -+ /* Non exact match to match F + L versions */ -+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"), -+ }, -+ }, - {}, - }; - -@@ -335,7 +351,7 @@ static const struct fwnode_operations efifb_fwnode_ops = { - #ifdef CONFIG_EFI - static struct fwnode_handle efifb_fwnode; - --__init void sysfb_apply_efi_quirks(struct platform_device *pd) -+__init void sysfb_apply_efi_quirks(void) - { - if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || - !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) -@@ -349,7 +365,10 @@ __init void sysfb_apply_efi_quirks(struct platform_device *pd) - screen_info.lfb_height = temp; - screen_info.lfb_linelength = 4 * screen_info.lfb_width; - } -+} - -+__init void sysfb_set_efifb_fwnode(struct platform_device *pd) -+{ - if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) { - fwnode_init(&efifb_fwnode, &efifb_fwnode_ops); - pd->dev.fwnode = &efifb_fwnode; -diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c -index 8f665678e9e39..e8d69bd548f3f 100644 ---- a/drivers/firmware/efi/tpm.c -+++ b/drivers/firmware/efi/tpm.c -@@ -97,7 +97,7 @@ int __init efi_tpm_eventlog_init(void) - goto out_calc; - } - -- memblock_reserve((unsigned long)final_tbl, -+ memblock_reserve(efi.tpm_final_log, - tbl_size + sizeof(*final_tbl)); - efi_tpm_final_log_size = tbl_size; - -diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c -index abdc8a6a39631..cae590bd08f27 100644 ---- a/drivers/firmware/efi/vars.c -+++ b/drivers/firmware/efi/vars.c -@@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, - { - const struct efivar_operations *ops; - efi_status_t status; -+ unsigned long varsize; - - if (!__efivars) - return -EINVAL; -@@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, - return efivar_entry_set_nonblocking(name, vendor, attributes, - size, data); - -+ varsize = size + ucs2_strsize(name, 1024); - if (!block) { - if (down_trylock(&efivars_lock)) - return -EBUSY; -+ status = check_var_size_nonblocking(attributes, varsize); - } else { - if (down_interruptible(&efivars_lock)) - return -EINTR; -+ status = check_var_size(attributes, varsize); - } - -- status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); - if (status != EFI_SUCCESS) { - up(&efivars_lock); - return -ENOSPC; -diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig -index 97968aece54f8..983e07dc022ed 100644 ---- a/drivers/firmware/google/Kconfig -+++ b/drivers/firmware/google/Kconfig -@@ -3,9 +3,9 @@ menuconfig GOOGLE_FIRMWARE - bool "Google Firmware Drivers" - default n - help -- These firmware drivers are used by Google's servers. They are -- only useful if you are working directly on one of their -- proprietary servers. If in doubt, say "N". -+ These firmware drivers are used by Google servers, -+ Chromebooks and other devices using coreboot firmware. -+ If in doubt, say "N". - - if GOOGLE_FIRMWARE - -@@ -21,7 +21,7 @@ config GOOGLE_SMI - - config GOOGLE_COREBOOT_TABLE - tristate "Coreboot Table Access" -- depends on ACPI || OF -+ depends on HAS_IOMEM && (ACPI || OF) - help - This option enables the coreboot_table module, which provides other - firmware modules access to the coreboot table. The coreboot table -diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c -index c52bcaa9def60..f3694d3478019 100644 ---- a/drivers/firmware/google/coreboot_table.c -+++ b/drivers/firmware/google/coreboot_table.c -@@ -93,7 +93,12 @@ static int coreboot_table_populate(struct device *dev, void *ptr) - for (i = 0; i < header->table_entries; i++) { - entry = ptr_entry; - -- device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL); -+ if (entry->size < sizeof(*entry)) { -+ dev_warn(dev, "coreboot table entry too small!\n"); -+ return -EINVAL; -+ } -+ -+ device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL); - if (!device) - return -ENOMEM; - -@@ -101,7 +106,7 @@ static int coreboot_table_populate(struct device *dev, void *ptr) - device->dev.parent = dev; - device->dev.bus = &coreboot_bus_type; - device->dev.release = coreboot_device_release; -- memcpy(&device->entry, ptr_entry, entry->size); -+ memcpy(device->raw, ptr_entry, entry->size); - - ret = device_register(&device->dev); - if (ret) { -@@ -149,12 +154,8 @@ static int coreboot_table_probe(struct platform_device *pdev) - if (!ptr) - return -ENOMEM; - -- ret = bus_register(&coreboot_bus_type); -- if (!ret) { -- ret = coreboot_table_populate(dev, ptr); -- if (ret) -- bus_unregister(&coreboot_bus_type); -- } -+ ret = coreboot_table_populate(dev, ptr); -+ - memunmap(ptr); - - return ret; -@@ -169,7 +170,6 @@ static int __cb_dev_unregister(struct device *dev, void *dummy) - static int coreboot_table_remove(struct platform_device *pdev) - { - bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister); -- bus_unregister(&coreboot_bus_type); - return 0; - } - -@@ -199,6 +199,32 @@ static struct platform_driver coreboot_table_driver = { - .of_match_table = of_match_ptr(coreboot_of_match), - }, - }; --module_platform_driver(coreboot_table_driver); -+ -+static int __init coreboot_table_driver_init(void) -+{ -+ int ret; -+ -+ ret = bus_register(&coreboot_bus_type); -+ if (ret) -+ return ret; -+ -+ ret = platform_driver_register(&coreboot_table_driver); -+ if (ret) { -+ bus_unregister(&coreboot_bus_type); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static void __exit coreboot_table_driver_exit(void) -+{ -+ platform_driver_unregister(&coreboot_table_driver); -+ bus_unregister(&coreboot_bus_type); -+} -+ -+module_init(coreboot_table_driver_init); -+module_exit(coreboot_table_driver_exit); -+ - MODULE_AUTHOR("Google, Inc."); - MODULE_LICENSE("GPL"); -diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h -index beb778674acdc..4a89277b99a39 100644 ---- a/drivers/firmware/google/coreboot_table.h -+++ b/drivers/firmware/google/coreboot_table.h -@@ -66,6 +66,7 @@ struct coreboot_device { - struct coreboot_table_entry entry; - struct lb_cbmem_ref cbmem_ref; - struct lb_framebuffer framebuffer; -+ DECLARE_FLEX_ARRAY(u8, raw); - }; - }; - -diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c -index c6dcc1ef93acf..c323a818805cc 100644 ---- a/drivers/firmware/google/framebuffer-coreboot.c -+++ b/drivers/firmware/google/framebuffer-coreboot.c -@@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev) - fb->green_mask_pos == formats[i].green.offset && - fb->green_mask_size == formats[i].green.length && - fb->blue_mask_pos == formats[i].blue.offset && -- fb->blue_mask_size == formats[i].blue.length && -- fb->reserved_mask_pos == formats[i].transp.offset && -- fb->reserved_mask_size == formats[i].transp.length) -+ fb->blue_mask_size == formats[i].blue.length) - pdata.format = formats[i].name; - } - if (!pdata.format) -diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c -index adaa492c3d2df..871bedf533a80 100644 ---- a/drivers/firmware/google/gsmi.c -+++ b/drivers/firmware/google/gsmi.c -@@ -361,9 +361,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name, - memcpy(data, gsmi_dev.data_buf->start, *data_size); - - /* All variables are have the following attributes */ -- *attr = EFI_VARIABLE_NON_VOLATILE | -- EFI_VARIABLE_BOOTSERVICE_ACCESS | -- EFI_VARIABLE_RUNTIME_ACCESS; -+ if (attr) -+ *attr = EFI_VARIABLE_NON_VOLATILE | -+ EFI_VARIABLE_BOOTSERVICE_ACCESS | -+ EFI_VARIABLE_RUNTIME_ACCESS; - } - - spin_unlock_irqrestore(&gsmi_dev.lock, flags); -@@ -681,6 +682,15 @@ static struct notifier_block gsmi_die_notifier = { - static int gsmi_panic_callback(struct notifier_block *nb, - unsigned long reason, void *arg) - { -+ -+ /* -+ * Panic callbacks are executed with all other CPUs stopped, -+ * so we must not attempt to spin waiting for gsmi_dev.lock -+ * to be released. -+ */ -+ if (spin_is_locked(&gsmi_dev.lock)) -+ return NOTIFY_DONE; -+ - gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC); - return NOTIFY_DONE; - } -diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c -index 77aa5c6398aa6..d081a6312627b 100644 ---- a/drivers/firmware/meson/meson_sm.c -+++ b/drivers/firmware/meson/meson_sm.c -@@ -292,6 +292,8 @@ static int __init meson_sm_probe(struct platform_device *pdev) - return -ENOMEM; - - chip = of_match_device(meson_sm_ids, dev)->data; -+ if (!chip) -+ return -EINVAL; - - if (chip->cmd_shmem_in_base) { - fw->sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base, -diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c -index 9a369a2eda71d..116eb465cdb42 100644 ---- a/drivers/firmware/psci/psci_checker.c -+++ b/drivers/firmware/psci/psci_checker.c -@@ -155,7 +155,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) - if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) - return -ENOMEM; - -- cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), -+ cpu_groups = kcalloc(nb_available_cpus, sizeof(*cpu_groups), - GFP_KERNEL); - if (!cpu_groups) { - free_cpumask_var(tmp); -diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c -index 2ee97bab74409..18e1a4b80401c 100644 ---- a/drivers/firmware/qcom_scm.c -+++ b/drivers/firmware/qcom_scm.c -@@ -252,7 +252,7 @@ static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, - break; - default: - pr_err("Unknown SMC convention being used\n"); -- return -EINVAL; -+ return false; - } - - ret = qcom_scm_call(dev, &desc, &res); -@@ -749,12 +749,6 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) - }; - int ret; - -- desc.args[0] = addr; -- desc.args[1] = size; -- desc.args[2] = spare; -- desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, -- QCOM_SCM_VAL); -- - ret = qcom_scm_call(__scm->dev, &desc, NULL); - - /* the pg table has been initialized already, ignore the error */ -@@ -1326,8 +1320,7 @@ static int qcom_scm_probe(struct platform_device *pdev) - static void qcom_scm_shutdown(struct platform_device *pdev) - { - /* Clean shutdown, disable download mode to allow normal restart */ -- if (download_mode) -- qcom_scm_set_download_mode(false); -+ qcom_scm_set_download_mode(false); - } - - static const struct of_device_id qcom_scm_dt_match[] = { -diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c -index 172c751a4f6c2..f08e056ed0ae4 100644 ---- a/drivers/firmware/qemu_fw_cfg.c -+++ b/drivers/firmware/qemu_fw_cfg.c -@@ -388,9 +388,7 @@ static void fw_cfg_sysfs_cache_cleanup(void) - struct fw_cfg_sysfs_entry *entry, *next; - - list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) { -- /* will end up invoking fw_cfg_sysfs_cache_delist() -- * via each object's release() method (i.e. destructor) -- */ -+ fw_cfg_sysfs_cache_delist(entry); - kobject_put(&entry->kobj); - } - } -@@ -448,7 +446,6 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj) - { - struct fw_cfg_sysfs_entry *entry = to_entry(kobj); - -- fw_cfg_sysfs_cache_delist(entry); - kfree(entry); - } - -@@ -601,20 +598,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) - /* set file entry information */ - entry->size = be32_to_cpu(f->size); - entry->select = be16_to_cpu(f->select); -- memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); -+ strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); - - /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ - err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, - fw_cfg_sel_ko, "%d", entry->select); -- if (err) { -- kobject_put(&entry->kobj); -- return err; -- } -+ if (err) -+ goto err_put_entry; - - /* add raw binary content access */ - err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); - if (err) -- goto err_add_raw; -+ goto err_del_entry; - - /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */ - fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name); -@@ -623,9 +618,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) - fw_cfg_sysfs_cache_enlist(entry); - return 0; - --err_add_raw: -+err_del_entry: - kobject_del(&entry->kobj); -- kfree(entry); -+err_put_entry: -+ kobject_put(&entry->kobj); - return err; - } - -diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c -index 4b8978b254f9a..dba315f675bc7 100644 ---- a/drivers/firmware/raspberrypi.c -+++ b/drivers/firmware/raspberrypi.c -@@ -272,6 +272,7 @@ static int rpi_firmware_probe(struct platform_device *pdev) - int ret = PTR_ERR(fw->chan); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to get mbox channel: %d\n", ret); -+ kfree(fw); - return ret; - } - -diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c -index 51201600d789b..800673910b511 100644 ---- a/drivers/firmware/scpi_pm_domain.c -+++ b/drivers/firmware/scpi_pm_domain.c -@@ -16,7 +16,6 @@ struct scpi_pm_domain { - struct generic_pm_domain genpd; - struct scpi_ops *ops; - u32 domain; -- char name[30]; - }; - - /* -@@ -110,8 +109,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev) - - scpi_pd->domain = i; - scpi_pd->ops = scpi_ops; -- sprintf(scpi_pd->name, "%pOFn.%d", np, i); -- scpi_pd->genpd.name = scpi_pd->name; -+ scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL, -+ "%pOFn.%d", np, i); -+ if (!scpi_pd->genpd.name) { -+ dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n", -+ np, i); -+ continue; -+ } - scpi_pd->genpd.power_off = scpi_pd_power_off; - scpi_pd->genpd.power_on = scpi_pd_power_on; - -diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c -index 581aa5e9b0778..dd7c3d5e8b0bb 100644 ---- a/drivers/firmware/smccc/soc_id.c -+++ b/drivers/firmware/smccc/soc_id.c -@@ -50,7 +50,7 @@ static int __init smccc_soc_init(void) - arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_SOC_ID, &res); - -- if (res.a0 == SMCCC_RET_NOT_SUPPORTED) { -+ if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) { - pr_info("ARCH_SOC_ID not implemented, skipping ....\n"); - return 0; - } -diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c -index 2a7687911c097..c61d55ed71a38 100644 ---- a/drivers/firmware/stratix10-svc.c -+++ b/drivers/firmware/stratix10-svc.c -@@ -477,7 +477,7 @@ static int svc_normal_to_secure_thread(void *data) - case INTEL_SIP_SMC_RSU_ERROR: - pr_err("%s: STATUS_ERROR\n", __func__); - cbdata->status = BIT(SVC_STATUS_ERROR); -- cbdata->kaddr1 = NULL; -+ cbdata->kaddr1 = &res.a1; - cbdata->kaddr2 = NULL; - cbdata->kaddr3 = NULL; - pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); -@@ -622,8 +622,8 @@ svc_create_memory_pool(struct platform_device *pdev, - end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE); - paddr = begin; - size = end - begin; -- va = memremap(paddr, size, MEMREMAP_WC); -- if (!va) { -+ va = devm_memremap(dev, paddr, size, MEMREMAP_WC); -+ if (IS_ERR(va)) { - dev_err(dev, "fail to remap shared memory\n"); - return ERR_PTR(-EINVAL); - } -@@ -941,17 +941,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory); - void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr) - { - struct stratix10_svc_data_mem *pmem; -- size_t size = 0; - - list_for_each_entry(pmem, &svc_data_mem, node) - if (pmem->vaddr == kaddr) { -- size = pmem->size; -- break; -+ gen_pool_free(chan->ctrl->genpool, -+ (unsigned long)kaddr, pmem->size); -+ pmem->vaddr = NULL; -+ list_del(&pmem->node); -+ return; - } - -- gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size); -- pmem->vaddr = NULL; -- list_del(&pmem->node); -+ list_del(&svc_data_mem); - } - EXPORT_SYMBOL_GPL(stratix10_svc_free_memory); - -@@ -989,18 +989,22 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) - return ret; - - genpool = svc_create_memory_pool(pdev, sh_memory); -- if (!genpool) -- return -ENOMEM; -+ if (IS_ERR(genpool)) -+ return PTR_ERR(genpool); - - /* allocate service controller and supporting channel */ - controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL); -- if (!controller) -- return -ENOMEM; -+ if (!controller) { -+ ret = -ENOMEM; -+ goto err_destroy_pool; -+ } - - chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL, - sizeof(*chans), GFP_KERNEL | __GFP_ZERO); -- if (!chans) -- return -ENOMEM; -+ if (!chans) { -+ ret = -ENOMEM; -+ goto err_destroy_pool; -+ } - - controller->dev = dev; - controller->num_chans = SVC_NUM_CHANNEL; -@@ -1015,7 +1019,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) - ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL); - if (ret) { - dev_err(dev, "failed to allocate FIFO\n"); -- return ret; -+ goto err_destroy_pool; - } - spin_lock_init(&controller->svc_fifo_lock); - -@@ -1060,6 +1064,8 @@ err_put_device: - platform_device_put(svc->stratix10_svc_rsu); - err_free_kfifo: - kfifo_free(&controller->svc_fifo); -+err_destroy_pool: -+ gen_pool_destroy(genpool); - return ret; - } - -diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c -index 2bfbb05f7d896..abc3279c706d1 100644 ---- a/drivers/firmware/sysfb.c -+++ b/drivers/firmware/sysfb.c -@@ -34,21 +34,61 @@ - #include - #include - -+static struct platform_device *pd; -+static DEFINE_MUTEX(disable_lock); -+static bool disabled; -+ -+static bool sysfb_unregister(void) -+{ -+ if (IS_ERR_OR_NULL(pd)) -+ return false; -+ -+ platform_device_unregister(pd); -+ pd = NULL; -+ -+ return true; -+} -+ -+/** -+ * sysfb_disable() - disable the Generic System Framebuffers support -+ * -+ * This disables the registration of system framebuffer devices that match the -+ * generic drivers that make use of the system framebuffer set up by firmware. -+ * -+ * It also unregisters a device if this was already registered by sysfb_init(). -+ * -+ * Context: The function can sleep. A @disable_lock mutex is acquired to serialize -+ * against sysfb_init(), that registers a system framebuffer device. -+ */ -+void sysfb_disable(void) -+{ -+ mutex_lock(&disable_lock); -+ sysfb_unregister(); -+ disabled = true; -+ mutex_unlock(&disable_lock); -+} -+EXPORT_SYMBOL_GPL(sysfb_disable); -+ - static __init int sysfb_init(void) - { - struct screen_info *si = &screen_info; - struct simplefb_platform_data mode; -- struct platform_device *pd; - const char *name; - bool compatible; -- int ret; -+ int ret = 0; -+ -+ mutex_lock(&disable_lock); -+ if (disabled) -+ goto unlock_mutex; -+ -+ sysfb_apply_efi_quirks(); - - /* try to create a simple-framebuffer device */ - compatible = sysfb_parse_mode(si, &mode); - if (compatible) { -- ret = sysfb_create_simplefb(si, &mode); -- if (!ret) -- return 0; -+ pd = sysfb_create_simplefb(si, &mode); -+ if (!IS_ERR(pd)) -+ goto unlock_mutex; - } - - /* if the FB is incompatible, create a legacy framebuffer device */ -@@ -60,10 +100,12 @@ static __init int sysfb_init(void) - name = "platform-framebuffer"; - - pd = platform_device_alloc(name, 0); -- if (!pd) -- return -ENOMEM; -+ if (!pd) { -+ ret = -ENOMEM; -+ goto unlock_mutex; -+ } - -- sysfb_apply_efi_quirks(pd); -+ sysfb_set_efifb_fwnode(pd); - - ret = platform_device_add_data(pd, si, sizeof(*si)); - if (ret) -@@ -73,9 +115,11 @@ static __init int sysfb_init(void) - if (ret) - goto err; - -- return 0; -+ goto unlock_mutex; - err: - platform_device_put(pd); -+unlock_mutex: -+ mutex_unlock(&disable_lock); - return ret; - } - -diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c -index b86761904949c..fd4fa923088af 100644 ---- a/drivers/firmware/sysfb_simplefb.c -+++ b/drivers/firmware/sysfb_simplefb.c -@@ -57,8 +57,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si, - return false; - } - --__init int sysfb_create_simplefb(const struct screen_info *si, -- const struct simplefb_platform_data *mode) -+__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si, -+ const struct simplefb_platform_data *mode) - { - struct platform_device *pd; - struct resource res; -@@ -76,7 +76,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si, - base |= (u64)si->ext_lfb_base << 32; - if (!base || (u64)(resource_size_t)base != base) { - printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n"); -- return -EINVAL; -+ return ERR_PTR(-EINVAL); - } - - /* -@@ -93,7 +93,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si, - length = mode->height * mode->stride; - if (length > size) { - printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); -- return -EINVAL; -+ return ERR_PTR(-EINVAL); - } - length = PAGE_ALIGN(length); - -@@ -104,21 +104,30 @@ __init int sysfb_create_simplefb(const struct screen_info *si, - res.start = base; - res.end = res.start + length - 1; - if (res.end <= res.start) -- return -EINVAL; -+ return ERR_PTR(-EINVAL); - - pd = platform_device_alloc("simple-framebuffer", 0); - if (!pd) -- return -ENOMEM; -+ return ERR_PTR(-ENOMEM); - -- sysfb_apply_efi_quirks(pd); -+ sysfb_set_efifb_fwnode(pd); - - ret = platform_device_add_resources(pd, &res, 1); - if (ret) -- return ret; -+ goto err_put_device; - - ret = platform_device_add_data(pd, mode, sizeof(*mode)); - if (ret) -- return ret; -+ goto err_put_device; - -- return platform_device_add(pd); -+ ret = platform_device_add(pd); -+ if (ret) -+ goto err_put_device; -+ -+ return pd; -+ -+err_put_device: -+ platform_device_put(pd); -+ -+ return ERR_PTR(ret); - } -diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c -index 3e9fa4b543588..1ed881a567d5c 100644 ---- a/drivers/firmware/tegra/bpmp-debugfs.c -+++ b/drivers/firmware/tegra/bpmp-debugfs.c -@@ -465,7 +465,7 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp, - mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0; - dentry = debugfs_create_file(name, mode, parent, bpmp, - &bpmp_debug_fops); -- if (!dentry) { -+ if (IS_ERR(dentry)) { - err = -ENOMEM; - goto out; - } -@@ -716,7 +716,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, - - if (t & DEBUGFS_S_ISDIR) { - dentry = debugfs_create_dir(name, parent); -- if (!dentry) -+ if (IS_ERR(dentry)) - return -ENOMEM; - err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1); - if (err < 0) -@@ -729,7 +729,7 @@ static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf, - dentry = debugfs_create_file(name, mode, - parent, bpmp, - &debugfs_fops); -- if (!dentry) -+ if (IS_ERR(dentry)) - return -ENOMEM; - } - } -@@ -779,11 +779,11 @@ int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp) - return 0; - - root = debugfs_create_dir("bpmp", NULL); -- if (!root) -+ if (IS_ERR(root)) - return -ENOMEM; - - bpmp->debugfs_mirror = debugfs_create_dir("debug", root); -- if (!bpmp->debugfs_mirror) { -+ if (IS_ERR(bpmp->debugfs_mirror)) { - err = -ENOMEM; - goto out; - } -diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c -index 5654c5e9862b1..037db21de510c 100644 ---- a/drivers/firmware/tegra/bpmp.c -+++ b/drivers/firmware/tegra/bpmp.c -@@ -201,7 +201,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, - int err; - - if (data && size > 0) -- memcpy(data, channel->ib->data, size); -+ memcpy_fromio(data, channel->ib->data, size); - - err = tegra_bpmp_ack_response(channel); - if (err < 0) -@@ -245,7 +245,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, - channel->ob->flags = flags; - - if (data && size > 0) -- memcpy(channel->ob->data, data, size); -+ memcpy_toio(channel->ob->data, data, size); - - return tegra_bpmp_post_request(channel); - } -@@ -420,7 +420,7 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code, - channel->ob->code = code; - - if (data && size > 0) -- memcpy(channel->ob->data, data, size); -+ memcpy_toio(channel->ob->data, data, size); - - err = tegra_bpmp_post_response(channel); - if (WARN_ON(err < 0)) -diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c -index a3cadbaf3cba7..0dac35406a38d 100644 ---- a/drivers/firmware/xilinx/zynqmp.c -+++ b/drivers/firmware/xilinx/zynqmp.c -@@ -171,7 +171,7 @@ static int zynqmp_pm_feature(u32 api_id) - } - - /* Add new entry if not present */ -- feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL); -+ feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC); - if (!feature_data) - return -ENOMEM; - -diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c -index dfdf21ed34c4e..c24b6fb2d7c37 100644 ---- a/drivers/fpga/altera-pr-ip-core.c -+++ b/drivers/fpga/altera-pr-ip-core.c -@@ -108,7 +108,7 @@ static int alt_pr_fpga_write(struct fpga_manager *mgr, const char *buf, - u32 *buffer_32 = (u32 *)buf; - size_t i = 0; - -- if (count <= 0) -+ if (!count) - return -EINVAL; - - /* Write out the complete 32-bit chunks */ -diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c -index f86666cf2c6a8..c38143ef23c64 100644 ---- a/drivers/fpga/dfl.c -+++ b/drivers/fpga/dfl.c -@@ -1864,7 +1864,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev, - return -EINVAL; - - fds = memdup_user((void __user *)(arg + sizeof(hdr)), -- hdr.count * sizeof(s32)); -+ array_size(hdr.count, sizeof(s32))); - if (IS_ERR(fds)) - return PTR_ERR(fds); - -diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c -index 798f55670646c..75a24b0457243 100644 ---- a/drivers/fpga/fpga-bridge.c -+++ b/drivers/fpga/fpga-bridge.c -@@ -115,7 +115,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data) - /** - * fpga_bridge_get - get an exclusive reference to an fpga bridge - * @dev: parent device that fpga bridge was registered with -- * @info: fpga manager info -+ * @info: fpga image specific information - * - * Given a device, get an exclusive reference to an fpga bridge. - * -diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c -index 047fd7f237069..91212bab58717 100644 ---- a/drivers/fpga/stratix10-soc.c -+++ b/drivers/fpga/stratix10-soc.c -@@ -213,9 +213,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr, - /* Allocate buffers from the service layer's pool. */ - for (i = 0; i < NUM_SVC_BUFS; i++) { - kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE); -- if (!kbuf) { -+ if (IS_ERR(kbuf)) { - s10_free_buffers(mgr); -- ret = -ENOMEM; -+ ret = PTR_ERR(kbuf); - goto init_done; - } - -diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c -index 59ddc9fd5bca4..92e6eebd1851e 100644 ---- a/drivers/fsi/fsi-core.c -+++ b/drivers/fsi/fsi-core.c -@@ -1309,6 +1309,9 @@ int fsi_master_register(struct fsi_master *master) - - mutex_init(&master->scan_lock); - master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL); -+ if (master->idx < 0) -+ return master->idx; -+ - dev_set_name(&master->dev, "fsi%d", master->idx); - master->dev.class = &fsi_master_class; - -diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c -index 8606e55c1721c..a3645da1f1bf3 100644 ---- a/drivers/fsi/fsi-master-aspeed.c -+++ b/drivers/fsi/fsi-master-aspeed.c -@@ -453,6 +453,8 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att - gpiod_set_value(aspeed->cfam_reset_gpio, 1); - usleep_range(900, 1000); - gpiod_set_value(aspeed->cfam_reset_gpio, 0); -+ usleep_range(900, 1000); -+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, cpu_to_be32(FSI_MRESP_RST_ALL_MASTER)); - mutex_unlock(&aspeed->lock); - - return count; -@@ -542,25 +544,28 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) - return rc; - } - -- aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL); -+ aspeed = kzalloc(sizeof(*aspeed), GFP_KERNEL); - if (!aspeed) - return -ENOMEM; - - aspeed->dev = &pdev->dev; - - aspeed->base = devm_platform_ioremap_resource(pdev, 0); -- if (IS_ERR(aspeed->base)) -- return PTR_ERR(aspeed->base); -+ if (IS_ERR(aspeed->base)) { -+ rc = PTR_ERR(aspeed->base); -+ goto err_free_aspeed; -+ } - - aspeed->clk = devm_clk_get(aspeed->dev, NULL); - if (IS_ERR(aspeed->clk)) { - dev_err(aspeed->dev, "couldn't get clock\n"); -- return PTR_ERR(aspeed->clk); -+ rc = PTR_ERR(aspeed->clk); -+ goto err_free_aspeed; - } - rc = clk_prepare_enable(aspeed->clk); - if (rc) { - dev_err(aspeed->dev, "couldn't enable clock\n"); -- return rc; -+ goto err_free_aspeed; - } - - rc = setup_cfam_reset(aspeed); -@@ -595,7 +600,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) - rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw); - if (rc) { - dev_err(&pdev->dev, "failed to read hub version\n"); -- return rc; -+ goto err_release; - } - - reg = be32_to_cpu(raw); -@@ -634,6 +639,8 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) - - err_release: - clk_disable_unprepare(aspeed->clk); -+err_free_aspeed: -+ kfree(aspeed); - return rc; - } - -diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c -index 24292acdbaf84..a9abebb07f35a 100644 ---- a/drivers/fsi/fsi-master-ast-cf.c -+++ b/drivers/fsi/fsi-master-ast-cf.c -@@ -1439,3 +1439,4 @@ static struct platform_driver fsi_master_acf = { - - module_platform_driver(fsi_master_acf); - MODULE_LICENSE("GPL"); -+MODULE_FIRMWARE(FW_FILE_NAME); -diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c -index b223f0ef337b9..ecf738411fe22 100644 ---- a/drivers/fsi/fsi-occ.c -+++ b/drivers/fsi/fsi-occ.c -@@ -50,6 +50,7 @@ struct occ { - struct device *sbefifo; - char name[32]; - int idx; -+ u8 sequence_number; - enum versions version; - struct miscdevice mdev; - struct mutex occ_lock; -@@ -141,8 +142,7 @@ static ssize_t occ_write(struct file *file, const char __user *buf, - { - struct occ_client *client = file->private_data; - size_t rlen, data_length; -- u16 checksum = 0; -- ssize_t rc, i; -+ ssize_t rc; - u8 *cmd; - - if (!client) -@@ -156,9 +156,6 @@ static ssize_t occ_write(struct file *file, const char __user *buf, - /* Construct the command */ - cmd = client->buffer; - -- /* Sequence number (we could increment and compare with response) */ -- cmd[0] = 1; -- - /* - * Copy the user command (assume user data follows the occ command - * format) -@@ -178,14 +175,7 @@ static ssize_t occ_write(struct file *file, const char __user *buf, - goto done; - } - -- /* Calculate checksum */ -- for (i = 0; i < data_length + 4; ++i) -- checksum += cmd[i]; -- -- cmd[data_length + 4] = checksum >> 8; -- cmd[data_length + 5] = checksum & 0xFF; -- -- /* Submit command */ -+ /* Submit command; 4 bytes before the data and 2 bytes after */ - rlen = PAGE_SIZE; - rc = fsi_occ_submit(client->occ->dev, cmd, data_length + 6, cmd, - &rlen); -@@ -314,11 +304,13 @@ free: - return rc; - } - --static int occ_putsram(struct occ *occ, const void *data, ssize_t len) -+static int occ_putsram(struct occ *occ, const void *data, ssize_t len, -+ u8 seq_no, u16 checksum) - { - size_t cmd_len, buf_len, resp_len, resp_data_len; - u32 data_len = ((len + 7) / 8) * 8; /* must be multiples of 8 B */ - __be32 *buf; -+ u8 *byte_buf; - int idx = 0, rc; - - cmd_len = (occ->version == occ_p10) ? 6 : 5; -@@ -358,6 +350,15 @@ static int occ_putsram(struct occ *occ, const void *data, ssize_t len) - buf[4 + idx] = cpu_to_be32(data_len); - memcpy(&buf[5 + idx], data, len); - -+ byte_buf = (u8 *)&buf[5 + idx]; -+ /* -+ * Overwrite the first byte with our sequence number and the last two -+ * bytes with the checksum. -+ */ -+ byte_buf[0] = seq_no; -+ byte_buf[len - 2] = checksum >> 8; -+ byte_buf[len - 1] = checksum & 0xff; -+ - rc = sbefifo_submit(occ->sbefifo, buf, cmd_len, buf, &resp_len); - if (rc) - goto free; -@@ -467,9 +468,12 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len, - struct occ *occ = dev_get_drvdata(dev); - struct occ_response *resp = response; - u8 seq_no; -+ u16 checksum = 0; - u16 resp_data_length; -+ const u8 *byte_request = (const u8 *)request; - unsigned long start; - int rc; -+ size_t i; - - if (!occ) - return -ENODEV; -@@ -479,11 +483,26 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len, - return -EINVAL; - } - -+ /* Checksum the request, ignoring first byte (sequence number). */ -+ for (i = 1; i < req_len - 2; ++i) -+ checksum += byte_request[i]; -+ - mutex_lock(&occ->occ_lock); - -- /* Extract the seq_no from the command (first byte) */ -- seq_no = *(const u8 *)request; -- rc = occ_putsram(occ, request, req_len); -+ /* -+ * Get a sequence number and update the counter. Avoid a sequence -+ * number of 0 which would pass the response check below even if the -+ * OCC response is uninitialized. Any sequence number the user is -+ * trying to send is overwritten since this function is the only common -+ * interface to the OCC and therefore the only place we can guarantee -+ * unique sequence numbers. -+ */ -+ seq_no = occ->sequence_number++; -+ if (!occ->sequence_number) -+ occ->sequence_number = 1; -+ checksum += seq_no; -+ -+ rc = occ_putsram(occ, request, req_len, seq_no, checksum); - if (rc) - goto done; - -@@ -574,6 +593,7 @@ static int occ_probe(struct platform_device *pdev) - occ->version = (uintptr_t)of_device_get_match_data(dev); - occ->dev = dev; - occ->sbefifo = dev->parent; -+ occ->sequence_number = 1; - mutex_init(&occ->occ_lock); - - if (dev->of_node) { -diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c -index 84cb965bfed5c..97045a8d94224 100644 ---- a/drivers/fsi/fsi-sbefifo.c -+++ b/drivers/fsi/fsi-sbefifo.c -@@ -640,7 +640,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo) - } - ffdc_iov.iov_base = ffdc; - ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE; -- iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); -+ iov_iter_kvec(&ffdc_iter, READ, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); - cmd[0] = cpu_to_be32(2); - cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC); - rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter); -@@ -737,7 +737,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len, - rbytes = (*resp_len) * sizeof(__be32); - resp_iov.iov_base = response; - resp_iov.iov_len = rbytes; -- iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes); -+ iov_iter_kvec(&resp_iter, READ, &resp_iov, 1, rbytes); - - /* Perform the command */ - mutex_lock(&sbefifo->lock); -@@ -817,7 +817,7 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf, - /* Prepare iov iterator */ - resp_iov.iov_base = buf; - resp_iov.iov_len = len; -- iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len); -+ iov_iter_init(&resp_iter, READ, &resp_iov, 1, len); - - /* Perform the command */ - mutex_lock(&sbefifo->lock); -diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c -index da1486bb6a144..bcb756dc98663 100644 ---- a/drivers/fsi/fsi-scom.c -+++ b/drivers/fsi/fsi-scom.c -@@ -145,7 +145,7 @@ static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value, - uint64_t addr, uint32_t *status) - { - uint64_t ind_data, ind_addr; -- int rc, retries, err = 0; -+ int rc, err; - - if (value & ~XSCOM_DATA_IND_DATA) - return -EINVAL; -@@ -156,19 +156,14 @@ static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value, - if (rc || (*status & SCOM_STATUS_ANY_ERR)) - return rc; - -- for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) { -- rc = __get_scom(scom, &ind_data, addr, status); -- if (rc || (*status & SCOM_STATUS_ANY_ERR)) -- return rc; -+ rc = __get_scom(scom, &ind_data, addr, status); -+ if (rc || (*status & SCOM_STATUS_ANY_ERR)) -+ return rc; - -- err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT; -- *status = err << SCOM_STATUS_PIB_RESP_SHIFT; -- if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED)) -- return 0; -+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT; -+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT; - -- msleep(1); -- } -- return rc; -+ return 0; - } - - static int put_indirect_scom_form1(struct scom_device *scom, uint64_t value, -@@ -188,7 +183,7 @@ static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value, - uint64_t addr, uint32_t *status) - { - uint64_t ind_data, ind_addr; -- int rc, retries, err = 0; -+ int rc, err; - - ind_addr = addr & XSCOM_ADDR_DIRECT_PART; - ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | XSCOM_DATA_IND_READ; -@@ -196,21 +191,15 @@ static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value, - if (rc || (*status & SCOM_STATUS_ANY_ERR)) - return rc; - -- for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) { -- rc = __get_scom(scom, &ind_data, addr, status); -- if (rc || (*status & SCOM_STATUS_ANY_ERR)) -- return rc; -- -- err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT; -- *status = err << SCOM_STATUS_PIB_RESP_SHIFT; -- *value = ind_data & XSCOM_DATA_IND_DATA; -+ rc = __get_scom(scom, &ind_data, addr, status); -+ if (rc || (*status & SCOM_STATUS_ANY_ERR)) -+ return rc; - -- if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED)) -- return 0; -+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT; -+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT; -+ *value = ind_data & XSCOM_DATA_IND_DATA; - -- msleep(1); -- } -- return rc; -+ return 0; - } - - static int raw_put_scom(struct scom_device *scom, uint64_t value, -@@ -289,7 +278,7 @@ static int put_scom(struct scom_device *scom, uint64_t value, - int rc; - - rc = raw_put_scom(scom, value, addr, &status); -- if (rc == -ENODEV) -+ if (rc) - return rc; - - rc = handle_fsi2pib_status(scom, status); -@@ -308,7 +297,7 @@ static int get_scom(struct scom_device *scom, uint64_t *value, - int rc; - - rc = raw_get_scom(scom, value, addr, &status); -- if (rc == -ENODEV) -+ if (rc) - return rc; - - rc = handle_fsi2pib_status(scom, status); -diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig -index fae5141251e5d..7b9def6b10047 100644 ---- a/drivers/gpio/Kconfig -+++ b/drivers/gpio/Kconfig -@@ -100,7 +100,7 @@ config GPIO_GENERIC - tristate - - config GPIO_REGMAP -- depends on REGMAP -+ select REGMAP - tristate - - # put drivers in the right section, in alphabetical order -@@ -523,6 +523,7 @@ config GPIO_REG - config GPIO_ROCKCHIP - tristate "Rockchip GPIO support" - depends on ARCH_ROCKCHIP || COMPILE_TEST -+ select GENERIC_IRQ_CHIP - select GPIOLIB_IRQCHIP - default ARCH_ROCKCHIP - help -diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c -index 34e35b64dcdc0..23047dc84ef1b 100644 ---- a/drivers/gpio/gpio-aggregator.c -+++ b/drivers/gpio/gpio-aggregator.c -@@ -273,7 +273,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset) - { - struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - -- return gpiod_get_value(fwd->descs[offset]); -+ return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset]) -+ : gpiod_get_value(fwd->descs[offset]); - } - - static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, -@@ -292,7 +293,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, - for_each_set_bit(i, mask, fwd->chip.ngpio) - descs[j++] = fwd->descs[i]; - -- error = gpiod_get_array_value(j, descs, NULL, values); -+ if (fwd->chip.can_sleep) -+ error = gpiod_get_array_value_cansleep(j, descs, NULL, values); -+ else -+ error = gpiod_get_array_value(j, descs, NULL, values); - if (error) - return error; - -@@ -327,7 +331,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value) - { - struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - -- gpiod_set_value(fwd->descs[offset], value); -+ if (chip->can_sleep) -+ gpiod_set_value_cansleep(fwd->descs[offset], value); -+ else -+ gpiod_set_value(fwd->descs[offset], value); - } - - static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, -@@ -346,7 +353,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, - descs[j++] = fwd->descs[i]; - } - -- gpiod_set_array_value(j, descs, NULL, values); -+ if (fwd->chip.can_sleep) -+ gpiod_set_array_value_cansleep(j, descs, NULL, values); -+ else -+ gpiod_set_array_value(j, descs, NULL, values); - } - - static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip, -diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c -index 14e6b3e64add5..6f3ded619c8b2 100644 ---- a/drivers/gpio/gpio-amd8111.c -+++ b/drivers/gpio/gpio-amd8111.c -@@ -226,7 +226,10 @@ found: - ioport_unmap(gp.pm); - goto out; - } -+ return 0; -+ - out: -+ pci_dev_put(pdev); - return err; - } - -@@ -234,6 +237,7 @@ static void __exit amd_gpio_exit(void) - { - gpiochip_remove(&gp.chip); - ioport_unmap(gp.pm); -+ pci_dev_put(gp.pdev); - } - - module_init(amd_gpio_init); -diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c -index 44398992ae15f..dba4836a18f80 100644 ---- a/drivers/gpio/gpio-amdpt.c -+++ b/drivers/gpio/gpio-amdpt.c -@@ -35,19 +35,19 @@ static int pt_gpio_request(struct gpio_chip *gc, unsigned offset) - - dev_dbg(gc->parent, "pt_gpio_request offset=%x\n", offset); - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG); - if (using_pins & BIT(offset)) { - dev_warn(gc->parent, "PT GPIO pin %x reconfigured\n", - offset); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - return -EINVAL; - } - - writel(using_pins | BIT(offset), pt_gpio->reg_base + PT_SYNC_REG); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - return 0; - } -@@ -58,13 +58,13 @@ static void pt_gpio_free(struct gpio_chip *gc, unsigned offset) - unsigned long flags; - u32 using_pins; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG); - using_pins &= ~BIT(offset); - writel(using_pins, pt_gpio->reg_base + PT_SYNC_REG); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - dev_dbg(gc->parent, "pt_gpio_free offset=%x\n", offset); - } -diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c -index 3d6ef37a7702a..454cefbeecf0e 100644 ---- a/drivers/gpio/gpio-aspeed-sgpio.c -+++ b/drivers/gpio/gpio-aspeed-sgpio.c -@@ -31,7 +31,7 @@ struct aspeed_sgpio { - struct gpio_chip chip; - struct irq_chip intc; - struct clk *pclk; -- spinlock_t lock; -+ raw_spinlock_t lock; - void __iomem *base; - int irq; - }; -@@ -173,12 +173,12 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) - enum aspeed_sgpio_reg reg; - int rc = 0; - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata; - rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset)); - -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - return rc; - } -@@ -215,11 +215,11 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) - struct aspeed_sgpio *gpio = gpiochip_get_data(gc); - unsigned long flags; - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - sgpio_set_value(gc, offset, val); - -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - } - - static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset) -@@ -236,9 +236,9 @@ static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int v - /* No special action is required for setting the direction; we'll - * error-out in sgpio_set_value if this isn't an output GPIO */ - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - rc = sgpio_set_value(gc, offset, val); -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - return rc; - } -@@ -277,11 +277,11 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d) - - status_addr = bank_reg(gpio, bank, reg_irq_status); - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - iowrite32(bit, status_addr); - -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - } - - static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set) -@@ -296,7 +296,7 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set) - irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset); - addr = bank_reg(gpio, bank, reg_irq_enable); - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - reg = ioread32(addr); - if (set) -@@ -306,7 +306,7 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set) - - iowrite32(reg, addr); - -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - } - - static void aspeed_sgpio_irq_mask(struct irq_data *d) -@@ -355,7 +355,7 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type) - return -EINVAL; - } - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - addr = bank_reg(gpio, bank, reg_irq_type0); - reg = ioread32(addr); -@@ -372,7 +372,7 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type) - reg = (reg & ~bit) | type2; - iowrite32(reg, addr); - -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - irq_set_handler_locked(d, handler); - -@@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc) - reg = ioread32(bank_reg(data, bank, reg_irq_status)); - - for_each_set_bit(p, ®, 32) -- generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2); -+ generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2); - } - - chained_irq_exit(ic, desc); -@@ -467,7 +467,7 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip, - - reg = bank_reg(gpio, to_bank(offset), reg_tolerance); - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - val = readl(reg); - -@@ -478,7 +478,7 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip, - - writel(val, reg); - -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - return 0; - } -@@ -575,7 +575,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) - iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) | gpio_cnt_regval | - ASPEED_SGPIO_ENABLE, gpio->base + ASPEED_SGPIO_CTRL); - -- spin_lock_init(&gpio->lock); -+ raw_spin_lock_init(&gpio->lock); - - gpio->chip.parent = &pdev->dev; - gpio->chip.ngpio = nr_gpios * 2; -diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c -index 3c8f20c57695f..318a7d95a1a8b 100644 ---- a/drivers/gpio/gpio-aspeed.c -+++ b/drivers/gpio/gpio-aspeed.c -@@ -53,7 +53,7 @@ struct aspeed_gpio_config { - struct aspeed_gpio { - struct gpio_chip chip; - struct irq_chip irqc; -- spinlock_t lock; -+ raw_spinlock_t lock; - void __iomem *base; - int irq; - const struct aspeed_gpio_config *config; -@@ -413,14 +413,14 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, - unsigned long flags; - bool copro; - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - copro = aspeed_gpio_copro_request(gpio, offset); - - __aspeed_gpio_set(gc, offset, val); - - if (copro) - aspeed_gpio_copro_release(gpio, offset); -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - } - - static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) -@@ -435,7 +435,7 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) - if (!have_input(gpio, offset)) - return -ENOTSUPP; - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - reg = ioread32(addr); - reg &= ~GPIO_BIT(offset); -@@ -445,7 +445,7 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) - if (copro) - aspeed_gpio_copro_release(gpio, offset); - -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - return 0; - } -@@ -463,7 +463,7 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc, - if (!have_output(gpio, offset)) - return -ENOTSUPP; - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - reg = ioread32(addr); - reg |= GPIO_BIT(offset); -@@ -474,7 +474,7 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc, - - if (copro) - aspeed_gpio_copro_release(gpio, offset); -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - return 0; - } -@@ -492,11 +492,11 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) - if (!have_output(gpio, offset)) - return GPIO_LINE_DIRECTION_IN; - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset); - -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - return val ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; - } -@@ -539,14 +539,14 @@ static void aspeed_gpio_irq_ack(struct irq_data *d) - - status_addr = bank_reg(gpio, bank, reg_irq_status); - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - copro = aspeed_gpio_copro_request(gpio, offset); - - iowrite32(bit, status_addr); - - if (copro) - aspeed_gpio_copro_release(gpio, offset); -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - } - - static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) -@@ -565,7 +565,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) - - addr = bank_reg(gpio, bank, reg_irq_enable); - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - copro = aspeed_gpio_copro_request(gpio, offset); - - reg = ioread32(addr); -@@ -577,7 +577,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) - - if (copro) - aspeed_gpio_copro_release(gpio, offset); -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - } - - static void aspeed_gpio_irq_mask(struct irq_data *d) -@@ -629,7 +629,7 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) - return -EINVAL; - } - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - copro = aspeed_gpio_copro_request(gpio, offset); - - addr = bank_reg(gpio, bank, reg_irq_type0); -@@ -649,7 +649,7 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) - - if (copro) - aspeed_gpio_copro_release(gpio, offset); -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - irq_set_handler_locked(d, handler); - -@@ -716,7 +716,7 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip, - - treg = bank_reg(gpio, to_bank(offset), reg_tolerance); - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - copro = aspeed_gpio_copro_request(gpio, offset); - - val = readl(treg); -@@ -730,7 +730,7 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip, - - if (copro) - aspeed_gpio_copro_release(gpio, offset); -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - return 0; - } -@@ -856,7 +856,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset, - return rc; - } - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - if (timer_allocation_registered(gpio, offset)) { - rc = unregister_allocated_timer(gpio, offset); -@@ -916,7 +916,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset, - configure_timer(gpio, offset, i); - - out: -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - return rc; - } -@@ -927,13 +927,13 @@ static int disable_debounce(struct gpio_chip *chip, unsigned int offset) - unsigned long flags; - int rc; - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - rc = unregister_allocated_timer(gpio, offset); - if (!rc) - configure_timer(gpio, offset, 0); - -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - - return rc; - } -@@ -1015,7 +1015,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, - return -EINVAL; - bindex = offset >> 3; - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - /* Sanity check, this shouldn't happen */ - if (gpio->cf_copro_bankmap[bindex] == 0xff) { -@@ -1036,7 +1036,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, - if (bit) - *bit = GPIO_OFFSET(offset); - bail: -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - return rc; - } - EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio); -@@ -1060,7 +1060,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc) - return -EINVAL; - bindex = offset >> 3; - -- spin_lock_irqsave(&gpio->lock, flags); -+ raw_spin_lock_irqsave(&gpio->lock, flags); - - /* Sanity check, this shouldn't happen */ - if (gpio->cf_copro_bankmap[bindex] == 0) { -@@ -1074,7 +1074,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc) - aspeed_gpio_change_cmd_source(gpio, bank, bindex, - GPIO_CMDSRC_ARM); - bail: -- spin_unlock_irqrestore(&gpio->lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->lock, flags); - return rc; - } - EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio); -@@ -1148,7 +1148,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev) - if (IS_ERR(gpio->base)) - return PTR_ERR(gpio->base); - -- spin_lock_init(&gpio->lock); -+ raw_spin_lock_init(&gpio->lock); - - gpio_id = of_match_node(aspeed_gpio_of_table, pdev->dev.of_node); - if (!gpio_id) -diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c -index 895a79936248d..c5d85e931f2a9 100644 ---- a/drivers/gpio/gpio-brcmstb.c -+++ b/drivers/gpio/gpio-brcmstb.c -@@ -92,9 +92,9 @@ brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank) - unsigned long status; - unsigned long flags; - -- spin_lock_irqsave(&bank->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags); - status = __brcmstb_gpio_get_active_irqs(bank); -- spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags); - - return status; - } -@@ -114,14 +114,14 @@ static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank, - u32 imask; - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - imask = gc->read_reg(priv->reg_base + GIO_MASK(bank->id)); - if (enable) - imask |= mask; - else - imask &= ~mask; - gc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned offset) -@@ -204,7 +204,7 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type) - return -EINVAL; - } - -- spin_lock_irqsave(&bank->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags); - - iedge_config = bank->gc.read_reg(priv->reg_base + - GIO_EC(bank->id)) & ~mask; -@@ -220,7 +220,7 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type) - bank->gc.write_reg(priv->reg_base + GIO_LEVEL(bank->id), - ilevel | level); - -- spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags); - return 0; - } - -diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c -index 562f8f7e7d1fc..137aea49ba026 100644 ---- a/drivers/gpio/gpio-cadence.c -+++ b/drivers/gpio/gpio-cadence.c -@@ -41,12 +41,12 @@ static int cdns_gpio_request(struct gpio_chip *chip, unsigned int offset) - struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); - unsigned long flags; - -- spin_lock_irqsave(&chip->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&chip->bgpio_lock, flags); - - iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) & ~BIT(offset), - cgpio->regs + CDNS_GPIO_BYPASS_MODE); - -- spin_unlock_irqrestore(&chip->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags); - return 0; - } - -@@ -55,13 +55,13 @@ static void cdns_gpio_free(struct gpio_chip *chip, unsigned int offset) - struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip); - unsigned long flags; - -- spin_lock_irqsave(&chip->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&chip->bgpio_lock, flags); - - iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) | - (BIT(offset) & cgpio->bypass_orig), - cgpio->regs + CDNS_GPIO_BYPASS_MODE); - -- spin_unlock_irqrestore(&chip->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags); - } - - static void cdns_gpio_irq_mask(struct irq_data *d) -@@ -90,7 +90,7 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type) - u32 mask = BIT(d->hwirq); - int ret = 0; - -- spin_lock_irqsave(&chip->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&chip->bgpio_lock, flags); - - int_value = ioread32(cgpio->regs + CDNS_GPIO_IRQ_VALUE) & ~mask; - int_type = ioread32(cgpio->regs + CDNS_GPIO_IRQ_TYPE) & ~mask; -@@ -115,7 +115,7 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type) - iowrite32(int_type, cgpio->regs + CDNS_GPIO_IRQ_TYPE); - - err_irq_type: -- spin_unlock_irqrestore(&chip->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags); - return ret; - } - -diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c -index cb5afaa7ed482..0214244e9f01f 100644 ---- a/drivers/gpio/gpio-davinci.c -+++ b/drivers/gpio/gpio-davinci.c -@@ -326,7 +326,7 @@ static struct irq_chip gpio_irqchip = { - .irq_enable = gpio_irq_enable, - .irq_disable = gpio_irq_disable, - .irq_set_type = gpio_irq_type, -- .flags = IRQCHIP_SET_TYPE_MASKED, -+ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE, - }; - - static void gpio_irq_handler(struct irq_desc *desc) -diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c -index 026903e3ef543..08b9e2cf4f2d6 100644 ---- a/drivers/gpio/gpio-dln2.c -+++ b/drivers/gpio/gpio-dln2.c -@@ -46,6 +46,7 @@ - struct dln2_gpio { - struct platform_device *pdev; - struct gpio_chip gpio; -+ struct irq_chip irqchip; - - /* - * Cache pin direction to save us one transfer, since the hardware has -@@ -383,15 +384,6 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd) - mutex_unlock(&dln2->irq_lock); - } - --static struct irq_chip dln2_gpio_irqchip = { -- .name = "dln2-irq", -- .irq_mask = dln2_irq_mask, -- .irq_unmask = dln2_irq_unmask, -- .irq_set_type = dln2_irq_set_type, -- .irq_bus_lock = dln2_irq_bus_lock, -- .irq_bus_sync_unlock = dln2_irq_bus_unlock, --}; -- - static void dln2_gpio_event(struct platform_device *pdev, u16 echo, - const void *data, int len) - { -@@ -473,8 +465,15 @@ static int dln2_gpio_probe(struct platform_device *pdev) - dln2->gpio.direction_output = dln2_gpio_direction_output; - dln2->gpio.set_config = dln2_gpio_set_config; - -+ dln2->irqchip.name = "dln2-irq", -+ dln2->irqchip.irq_mask = dln2_irq_mask, -+ dln2->irqchip.irq_unmask = dln2_irq_unmask, -+ dln2->irqchip.irq_set_type = dln2_irq_set_type, -+ dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock, -+ dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock, -+ - girq = &dln2->gpio.irq; -- girq->chip = &dln2_gpio_irqchip; -+ girq->chip = &dln2->irqchip; - /* The event comes from the outside so no parent handler */ - girq->parent_handler = NULL; - girq->num_parents = 0; -diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c -index f98fa33e16790..a503f37001ebb 100644 ---- a/drivers/gpio/gpio-dwapb.c -+++ b/drivers/gpio/gpio-dwapb.c -@@ -242,9 +242,9 @@ static void dwapb_irq_ack(struct irq_data *d) - u32 val = BIT(irqd_to_hwirq(d)); - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - dwapb_write(gpio, GPIO_PORTA_EOI, val); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static void dwapb_irq_mask(struct irq_data *d) -@@ -254,10 +254,10 @@ static void dwapb_irq_mask(struct irq_data *d) - unsigned long flags; - u32 val; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - val = dwapb_read(gpio, GPIO_INTMASK) | BIT(irqd_to_hwirq(d)); - dwapb_write(gpio, GPIO_INTMASK, val); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static void dwapb_irq_unmask(struct irq_data *d) -@@ -267,10 +267,10 @@ static void dwapb_irq_unmask(struct irq_data *d) - unsigned long flags; - u32 val; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(irqd_to_hwirq(d)); - dwapb_write(gpio, GPIO_INTMASK, val); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static void dwapb_irq_enable(struct irq_data *d) -@@ -280,11 +280,11 @@ static void dwapb_irq_enable(struct irq_data *d) - unsigned long flags; - u32 val; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - val = dwapb_read(gpio, GPIO_INTEN); - val |= BIT(irqd_to_hwirq(d)); - dwapb_write(gpio, GPIO_INTEN, val); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static void dwapb_irq_disable(struct irq_data *d) -@@ -294,11 +294,11 @@ static void dwapb_irq_disable(struct irq_data *d) - unsigned long flags; - u32 val; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - val = dwapb_read(gpio, GPIO_INTEN); - val &= ~BIT(irqd_to_hwirq(d)); - dwapb_write(gpio, GPIO_INTEN, val); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static int dwapb_irq_set_type(struct irq_data *d, u32 type) -@@ -308,7 +308,7 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type) - irq_hw_number_t bit = irqd_to_hwirq(d); - unsigned long level, polarity, flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - level = dwapb_read(gpio, GPIO_INTTYPE_LEVEL); - polarity = dwapb_read(gpio, GPIO_INT_POLARITY); - -@@ -343,7 +343,7 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type) - dwapb_write(gpio, GPIO_INTTYPE_LEVEL, level); - if (type != IRQ_TYPE_EDGE_BOTH) - dwapb_write(gpio, GPIO_INT_POLARITY, polarity); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - return 0; - } -@@ -373,7 +373,7 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc, - unsigned long flags, val_deb; - unsigned long mask = BIT(offset); - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - val_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE); - if (debounce) -@@ -382,7 +382,7 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc, - val_deb &= ~mask; - dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - return 0; - } -@@ -653,10 +653,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio) - gpio->clks[1].id = "db"; - err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS, - gpio->clks); -- if (err) { -- dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n"); -- return err; -- } -+ if (err) -+ return dev_err_probe(gpio->dev, err, -+ "Cannot get APB/Debounce clocks\n"); - - err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks); - if (err) { -@@ -739,7 +738,7 @@ static int dwapb_gpio_suspend(struct device *dev) - unsigned long flags; - int i; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - for (i = 0; i < gpio->nr_ports; i++) { - unsigned int offset; - unsigned int idx = gpio->ports[i].idx; -@@ -766,7 +765,7 @@ static int dwapb_gpio_suspend(struct device *dev) - dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en); - } - } -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks); - -@@ -786,7 +785,7 @@ static int dwapb_gpio_resume(struct device *dev) - return err; - } - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - for (i = 0; i < gpio->nr_ports; i++) { - unsigned int offset; - unsigned int idx = gpio->ports[i].idx; -@@ -813,7 +812,7 @@ static int dwapb_gpio_resume(struct device *dev) - dwapb_write(gpio, GPIO_PORTA_EOI, 0xffffffff); - } - } -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - return 0; - } -diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c -index f954359c9544e..21204a5dca3d4 100644 ---- a/drivers/gpio/gpio-grgpio.c -+++ b/drivers/gpio/gpio-grgpio.c -@@ -145,7 +145,7 @@ static int grgpio_irq_set_type(struct irq_data *d, unsigned int type) - return -EINVAL; - } - -- spin_lock_irqsave(&priv->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags); - - ipol = priv->gc.read_reg(priv->regs + GRGPIO_IPOL) & ~mask; - iedge = priv->gc.read_reg(priv->regs + GRGPIO_IEDGE) & ~mask; -@@ -153,7 +153,7 @@ static int grgpio_irq_set_type(struct irq_data *d, unsigned int type) - priv->gc.write_reg(priv->regs + GRGPIO_IPOL, ipol | pol); - priv->gc.write_reg(priv->regs + GRGPIO_IEDGE, iedge | edge); - -- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); - - return 0; - } -@@ -164,11 +164,11 @@ static void grgpio_irq_mask(struct irq_data *d) - int offset = d->hwirq; - unsigned long flags; - -- spin_lock_irqsave(&priv->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags); - - grgpio_set_imask(priv, offset, 0); - -- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); - } - - static void grgpio_irq_unmask(struct irq_data *d) -@@ -177,11 +177,11 @@ static void grgpio_irq_unmask(struct irq_data *d) - int offset = d->hwirq; - unsigned long flags; - -- spin_lock_irqsave(&priv->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags); - - grgpio_set_imask(priv, offset, 1); - -- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); - } - - static struct irq_chip grgpio_irq_chip = { -@@ -199,7 +199,7 @@ static irqreturn_t grgpio_irq_handler(int irq, void *dev) - int i; - int match = 0; - -- spin_lock_irqsave(&priv->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags); - - /* - * For each gpio line, call its interrupt handler if it its underlying -@@ -215,7 +215,7 @@ static irqreturn_t grgpio_irq_handler(int irq, void *dev) - } - } - -- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); - - if (!match) - dev_warn(priv->dev, "No gpio line matched irq %d\n", irq); -@@ -247,13 +247,13 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq, - dev_dbg(priv->dev, "Mapping irq %d for gpio line %d\n", - irq, offset); - -- spin_lock_irqsave(&priv->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags); - - /* Request underlying irq if not already requested */ - lirq->irq = irq; - uirq = &priv->uirqs[lirq->index]; - if (uirq->refcnt == 0) { -- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); - ret = request_irq(uirq->uirq, grgpio_irq_handler, 0, - dev_name(priv->dev), priv); - if (ret) { -@@ -262,11 +262,11 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq, - uirq->uirq); - return ret; - } -- spin_lock_irqsave(&priv->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags); - } - uirq->refcnt++; - -- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); - - /* Setup irq */ - irq_set_chip_data(irq, priv); -@@ -290,7 +290,7 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq) - irq_set_chip_and_handler(irq, NULL, NULL); - irq_set_chip_data(irq, NULL); - -- spin_lock_irqsave(&priv->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags); - - /* Free underlying irq if last user unmapped */ - index = -1; -@@ -309,13 +309,13 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq) - uirq = &priv->uirqs[lirq->index]; - uirq->refcnt--; - if (uirq->refcnt == 0) { -- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); - free_irq(uirq->uirq, priv); - return; - } - } - -- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags); - } - - static const struct irq_domain_ops grgpio_irq_domain_ops = { -diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c -index 641719a96a1a9..4e13e937f8324 100644 ---- a/drivers/gpio/gpio-hlwd.c -+++ b/drivers/gpio/gpio-hlwd.c -@@ -65,7 +65,7 @@ static void hlwd_gpio_irqhandler(struct irq_desc *desc) - int hwirq; - u32 emulated_pending; - -- spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); - pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG); - pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK); - -@@ -93,7 +93,7 @@ static void hlwd_gpio_irqhandler(struct irq_desc *desc) - /* Mark emulated interrupts as pending */ - pending |= rising | falling; - } -- spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); - - chained_irq_enter(chip, desc); - -@@ -118,11 +118,11 @@ static void hlwd_gpio_irq_mask(struct irq_data *data) - unsigned long flags; - u32 mask; - -- spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); - mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK); - mask &= ~BIT(data->hwirq); - iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK); -- spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); - } - - static void hlwd_gpio_irq_unmask(struct irq_data *data) -@@ -132,11 +132,11 @@ static void hlwd_gpio_irq_unmask(struct irq_data *data) - unsigned long flags; - u32 mask; - -- spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); - mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK); - mask |= BIT(data->hwirq); - iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK); -- spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); - } - - static void hlwd_gpio_irq_enable(struct irq_data *data) -@@ -173,7 +173,7 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) - unsigned long flags; - u32 level; - -- spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); - - hlwd->edge_emulation &= ~BIT(data->hwirq); - -@@ -194,11 +194,11 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) - hlwd_gpio_irq_setup_emulation(hlwd, data->hwirq, flow_type); - break; - default: -- spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); - return -EINVAL; - } - -- spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); - return 0; - } - -diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c -index 50003ad2e5898..1cafdf46f8756 100644 ---- a/drivers/gpio/gpio-idt3243x.c -+++ b/drivers/gpio/gpio-idt3243x.c -@@ -57,7 +57,7 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) - if (sense == IRQ_TYPE_NONE || (sense & IRQ_TYPE_EDGE_BOTH)) - return -EINVAL; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - ilevel = readl(ctrl->gpio + IDT_GPIO_ILEVEL); - if (sense & IRQ_TYPE_LEVEL_HIGH) -@@ -68,7 +68,7 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) - writel(ilevel, ctrl->gpio + IDT_GPIO_ILEVEL); - irq_set_handler_locked(d, handle_level_irq); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - return 0; - } - -@@ -86,12 +86,12 @@ static void idt_gpio_mask(struct irq_data *d) - struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc); - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - ctrl->mask_cache |= BIT(d->hwirq); - writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static void idt_gpio_unmask(struct irq_data *d) -@@ -100,12 +100,12 @@ static void idt_gpio_unmask(struct irq_data *d) - struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc); - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - ctrl->mask_cache &= ~BIT(d->hwirq); - writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static int idt_gpio_irq_init_hw(struct gpio_chip *gc) -@@ -132,7 +132,7 @@ static int idt_gpio_probe(struct platform_device *pdev) - struct device *dev = &pdev->dev; - struct gpio_irq_chip *girq; - struct idt_gpio_ctrl *ctrl; -- unsigned int parent_irq; -+ int parent_irq; - int ngpios; - int ret; - -@@ -164,8 +164,8 @@ static int idt_gpio_probe(struct platform_device *pdev) - return PTR_ERR(ctrl->pic); - - parent_irq = platform_get_irq(pdev, 0); -- if (!parent_irq) -- return -EINVAL; -+ if (parent_irq < 0) -+ return parent_irq; - - girq = &ctrl->gc.irq; - girq->chip = &idt_gpio_irqchip; -diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c -index b3b050604e0be..6b184502fa3f8 100644 ---- a/drivers/gpio/gpio-ixp4xx.c -+++ b/drivers/gpio/gpio-ixp4xx.c -@@ -128,7 +128,7 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type) - int_reg = IXP4XX_REG_GPIT1; - } - -- spin_lock_irqsave(&g->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&g->gc.bgpio_lock, flags); - - /* Clear the style for the appropriate pin */ - val = __raw_readl(g->base + int_reg); -@@ -147,7 +147,7 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type) - val |= BIT(d->hwirq); - __raw_writel(val, g->base + IXP4XX_REG_GPOE); - -- spin_unlock_irqrestore(&g->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&g->gc.bgpio_lock, flags); - - /* This parent only accept level high (asserted) */ - return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); -diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c -index 1b1ee94eeab47..5d90b3bc5a256 100644 ---- a/drivers/gpio/gpio-loongson1.c -+++ b/drivers/gpio/gpio-loongson1.c -@@ -25,10 +25,10 @@ static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset) - { - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) | BIT(offset), - gpio_reg_base + GPIO_CFG); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - return 0; - } -@@ -37,10 +37,10 @@ static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset) - { - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) & ~BIT(offset), - gpio_reg_base + GPIO_CFG); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static int ls1x_gpio_probe(struct platform_device *pdev) -diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c -index 1e21c661d79d6..a035a9bcb57c6 100644 ---- a/drivers/gpio/gpio-menz127.c -+++ b/drivers/gpio/gpio-menz127.c -@@ -64,7 +64,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, - debounce /= 50; - } - -- spin_lock(&gc->bgpio_lock); -+ raw_spin_lock(&gc->bgpio_lock); - - db_en = readl(priv->reg_base + MEN_Z127_DBER); - -@@ -79,7 +79,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, - writel(db_en, priv->reg_base + MEN_Z127_DBER); - writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio)); - -- spin_unlock(&gc->bgpio_lock); -+ raw_spin_unlock(&gc->bgpio_lock); - - return 0; - } -@@ -91,7 +91,7 @@ static int men_z127_set_single_ended(struct gpio_chip *gc, - struct men_z127_gpio *priv = gpiochip_get_data(gc); - u32 od_en; - -- spin_lock(&gc->bgpio_lock); -+ raw_spin_lock(&gc->bgpio_lock); - od_en = readl(priv->reg_base + MEN_Z127_ODER); - - if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN) -@@ -101,7 +101,7 @@ static int men_z127_set_single_ended(struct gpio_chip *gc, - od_en &= ~BIT(offset); - - writel(od_en, priv->reg_base + MEN_Z127_ODER); -- spin_unlock(&gc->bgpio_lock); -+ raw_spin_unlock(&gc->bgpio_lock); - - return 0; - } -diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c -index 40a052bc67849..5a09070e5f78c 100644 ---- a/drivers/gpio/gpio-mlxbf2.c -+++ b/drivers/gpio/gpio-mlxbf2.c -@@ -120,7 +120,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) - u32 arm_gpio_lock_val; - - mutex_lock(yu_arm_gpio_lock_param.lock); -- spin_lock(&gs->gc.bgpio_lock); -+ raw_spin_lock(&gs->gc.bgpio_lock); - - arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io); - -@@ -128,7 +128,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) - * When lock active bit[31] is set, ModeX is write enabled - */ - if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) { -- spin_unlock(&gs->gc.bgpio_lock); -+ raw_spin_unlock(&gs->gc.bgpio_lock); - mutex_unlock(yu_arm_gpio_lock_param.lock); - return -EINVAL; - } -@@ -146,7 +146,7 @@ static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs) - __releases(yu_arm_gpio_lock_param.lock) - { - writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io); -- spin_unlock(&gs->gc.bgpio_lock); -+ raw_spin_unlock(&gs->gc.bgpio_lock); - mutex_unlock(yu_arm_gpio_lock_param.lock); - } - -diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c -index c335a0309ba31..d9dff3dc92ae5 100644 ---- a/drivers/gpio/gpio-mmio.c -+++ b/drivers/gpio/gpio-mmio.c -@@ -220,7 +220,7 @@ static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) - unsigned long mask = bgpio_line2mask(gc, gpio); - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - if (val) - gc->bgpio_data |= mask; -@@ -229,7 +229,7 @@ static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) - - gc->write_reg(gc->reg_dat, gc->bgpio_data); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio, -@@ -248,7 +248,7 @@ static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val) - unsigned long mask = bgpio_line2mask(gc, gpio); - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - if (val) - gc->bgpio_data |= mask; -@@ -257,7 +257,7 @@ static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val) - - gc->write_reg(gc->reg_set, gc->bgpio_data); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static void bgpio_multiple_get_masks(struct gpio_chip *gc, -@@ -286,7 +286,7 @@ static void bgpio_set_multiple_single_reg(struct gpio_chip *gc, - unsigned long flags; - unsigned long set_mask, clear_mask; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask); - -@@ -295,7 +295,7 @@ static void bgpio_set_multiple_single_reg(struct gpio_chip *gc, - - gc->write_reg(reg, gc->bgpio_data); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static void bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, -@@ -347,7 +347,7 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) - { - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio); - -@@ -356,7 +356,7 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) - if (gc->reg_dir_out) - gc->write_reg(gc->reg_dir_out, gc->bgpio_dir); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - return 0; - } -@@ -387,7 +387,7 @@ static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) - { - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - - gc->bgpio_dir |= bgpio_line2mask(gc, gpio); - -@@ -396,7 +396,7 @@ static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) - if (gc->reg_dir_out) - gc->write_reg(gc->reg_dir_out, gc->bgpio_dir); - -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - } - - static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio, -@@ -610,7 +610,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, - if (gc->bgpio_bits > BITS_PER_LONG) - return -EINVAL; - -- spin_lock_init(&gc->bgpio_lock); -+ raw_spin_lock_init(&gc->bgpio_lock); - gc->parent = dev; - gc->label = dev_name(dev); - gc->base = -1; -diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c -index d26bff29157b5..5c84dd7880a47 100644 ---- a/drivers/gpio/gpio-mockup.c -+++ b/drivers/gpio/gpio-mockup.c -@@ -368,11 +368,18 @@ static void gpio_mockup_debugfs_setup(struct device *dev, - priv->offset = i; - priv->desc = gpiochip_get_desc(gc, i); - -- debugfs_create_file(name, 0200, chip->dbg_dir, priv, -+ debugfs_create_file(name, 0600, chip->dbg_dir, priv, - &gpio_mockup_debugfs_ops); - } - } - -+static void gpio_mockup_debugfs_cleanup(void *data) -+{ -+ struct gpio_mockup_chip *chip = data; -+ -+ debugfs_remove_recursive(chip->dbg_dir); -+} -+ - static void gpio_mockup_dispose_mappings(void *data) - { - struct gpio_mockup_chip *chip = data; -@@ -455,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev) - - gpio_mockup_debugfs_setup(dev, chip); - -- return 0; -+ return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip); - } - - static const struct of_device_id gpio_mockup_of_match[] = { -@@ -547,8 +554,10 @@ static int __init gpio_mockup_register_chip(int idx) - } - - fwnode = fwnode_create_software_node(properties, NULL); -- if (IS_ERR(fwnode)) -+ if (IS_ERR(fwnode)) { -+ kfree_strarray(line_names, ngpio); - return PTR_ERR(fwnode); -+ } - - pdevinfo.name = "gpio-mockup"; - pdevinfo.id = idx; -@@ -611,9 +620,9 @@ static int __init gpio_mockup_init(void) - - static void __exit gpio_mockup_exit(void) - { -+ gpio_mockup_unregister_pdevs(); - debugfs_remove_recursive(gpio_mockup_dbg_dir); - platform_driver_unregister(&gpio_mockup_driver); -- gpio_mockup_unregister_pdevs(); - } - - module_init(gpio_mockup_init); -diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c -index 70d6ae20b1da5..763256efddc2b 100644 ---- a/drivers/gpio/gpio-mpc8xxx.c -+++ b/drivers/gpio/gpio-mpc8xxx.c -@@ -47,7 +47,7 @@ struct mpc8xxx_gpio_chip { - unsigned offset, int value); - - struct irq_domain *irq; -- unsigned int irqn; -+ int irqn; - }; - - /* -@@ -172,6 +172,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) - - switch (flow_type) { - case IRQ_TYPE_EDGE_FALLING: -+ case IRQ_TYPE_LEVEL_LOW: - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); - gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR, - gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR) -@@ -388,8 +389,8 @@ static int mpc8xxx_probe(struct platform_device *pdev) - } - - mpc8xxx_gc->irqn = platform_get_irq(pdev, 0); -- if (!mpc8xxx_gc->irqn) -- return 0; -+ if (mpc8xxx_gc->irqn < 0) -+ return mpc8xxx_gc->irqn; - - mpc8xxx_gc->irq = irq_domain_create_linear(fwnode, - MPC8XXX_GPIO_PINS, -diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c -index 8f429d9f36616..b965513f44fea 100644 ---- a/drivers/gpio/gpio-mvebu.c -+++ b/drivers/gpio/gpio-mvebu.c -@@ -707,6 +707,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, - unsigned long flags; - unsigned int on, off; - -+ if (state->polarity != PWM_POLARITY_NORMAL) -+ return -EINVAL; -+ - val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle; - do_div(val, NSEC_PER_SEC); - if (val > UINT_MAX + 1ULL) -@@ -790,8 +793,12 @@ static int mvebu_pwm_probe(struct platform_device *pdev, - u32 offset; - u32 set; - -- if (of_device_is_compatible(mvchip->chip.of_node, -- "marvell,armada-370-gpio")) { -+ if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) { -+ int ret = of_property_read_u32(dev->of_node, -+ "marvell,pwm-offset", &offset); -+ if (ret < 0) -+ return 0; -+ } else { - /* - * There are only two sets of PWM configuration registers for - * all the GPIO lines on those SoCs which this driver reserves -@@ -801,13 +808,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev, - if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm")) - return 0; - offset = 0; -- } else if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) { -- int ret = of_property_read_u32(dev->of_node, -- "marvell,pwm-offset", &offset); -- if (ret < 0) -- return 0; -- } else { -- return 0; - } - - if (IS_ERR(mvchip->clk)) -@@ -871,17 +871,10 @@ static int mvebu_pwm_probe(struct platform_device *pdev, - mvpwm->chip.dev = dev; - mvpwm->chip.ops = &mvebu_pwm_ops; - mvpwm->chip.npwm = mvchip->chip.ngpio; -- /* -- * There may already be some PWM allocated, so we can't force -- * mvpwm->chip.base to a fixed point like mvchip->chip.base. -- * So, we let pwmchip_add() do the numbering and take the next free -- * region. -- */ -- mvpwm->chip.base = -1; - - spin_lock_init(&mvpwm->lock); - -- return pwmchip_add(&mvpwm->chip); -+ return devm_pwmchip_add(dev, &mvpwm->chip); - } - - #ifdef CONFIG_DEBUG_FS -@@ -1119,6 +1112,13 @@ static int mvebu_gpio_probe_syscon(struct platform_device *pdev, - return 0; - } - -+static void mvebu_gpio_remove_irq_domain(void *data) -+{ -+ struct irq_domain *domain = data; -+ -+ irq_domain_remove(domain); -+} -+ - static int mvebu_gpio_probe(struct platform_device *pdev) - { - struct mvebu_gpio_chip *mvchip; -@@ -1251,17 +1251,21 @@ static int mvebu_gpio_probe(struct platform_device *pdev) - if (!mvchip->domain) { - dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", - mvchip->chip.label); -- err = -ENODEV; -- goto err_pwm; -+ return -ENODEV; - } - -+ err = devm_add_action_or_reset(&pdev->dev, mvebu_gpio_remove_irq_domain, -+ mvchip->domain); -+ if (err) -+ return err; -+ - err = irq_alloc_domain_generic_chips( - mvchip->domain, ngpios, 2, np->name, handle_level_irq, - IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0); - if (err) { - dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n", - mvchip->chip.label); -- goto err_domain; -+ return err; - } - - /* -@@ -1301,13 +1305,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) - } - - return 0; -- --err_domain: -- irq_domain_remove(mvchip->domain); --err_pwm: -- pwmchip_remove(&mvchip->mvpwm->chip); -- -- return err; - } - - static struct platform_driver mvebu_gpio_driver = { -diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c -index c871602fc5ba9..853d9aa6b3b1f 100644 ---- a/drivers/gpio/gpio-mxc.c -+++ b/drivers/gpio/gpio-mxc.c -@@ -18,6 +18,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -147,6 +148,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type) - { - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - struct mxc_gpio_port *port = gc->private; -+ unsigned long flags; - u32 bit, val; - u32 gpio_idx = d->hwirq; - int edge; -@@ -185,6 +187,8 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type) - return -EINVAL; - } - -+ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags); -+ - if (GPIO_EDGE_SEL >= 0) { - val = readl(port->base + GPIO_EDGE_SEL); - if (edge == GPIO_INT_BOTH_EDGES) -@@ -204,15 +208,20 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type) - - writel(1 << gpio_idx, port->base + GPIO_ISR); - -- return 0; -+ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags); -+ -+ return port->gc.direction_input(&port->gc, gpio_idx); - } - - static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) - { - void __iomem *reg = port->base; -+ unsigned long flags; - u32 bit, val; - int edge; - -+ raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags); -+ - reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ - bit = gpio & 0xf; - val = readl(reg); -@@ -227,9 +236,12 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) - } else { - pr_err("mxc: invalid configuration for GPIO %d: %x\n", - gpio, edge); -- return; -+ goto unlock; - } - writel(val | (edge << (bit << 1)), reg); -+ -+unlock: -+ raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags); - } - - /* handle 32 interrupts in one status register */ -diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c -index d2fe76f3f34fd..4860bf3b7e002 100644 ---- a/drivers/gpio/gpio-pca953x.c -+++ b/drivers/gpio/gpio-pca953x.c -@@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = { - .reg_bits = 8, - .val_bits = 8, - -+ .use_single_read = true, -+ .use_single_write = true, -+ - .readable_reg = pca953x_readable_register, - .writeable_reg = pca953x_writeable_register, - .volatile_reg = pca953x_volatile_register, -@@ -762,11 +765,11 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin - bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio); - bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio); - -+ bitmap_copy(chip->irq_stat, new_stat, gc->ngpio); -+ - if (bitmap_empty(trigger, gc->ngpio)) - return false; - -- bitmap_copy(chip->irq_stat, new_stat, gc->ngpio); -- - bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio); - bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio); - bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio); -@@ -894,15 +897,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, - static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert) - { - DECLARE_BITMAP(val, MAX_LINE); -+ u8 regaddr; - int ret; - -- ret = regcache_sync_region(chip->regmap, chip->regs->output, -- chip->regs->output + NBANK(chip)); -+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); -+ ret = regcache_sync_region(chip->regmap, regaddr, -+ regaddr + NBANK(chip) - 1); - if (ret) - goto out; - -- ret = regcache_sync_region(chip->regmap, chip->regs->direction, -- chip->regs->direction + NBANK(chip)); -+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); -+ ret = regcache_sync_region(chip->regmap, regaddr, -+ regaddr + NBANK(chip) - 1); - if (ret) - goto out; - -@@ -1108,20 +1114,21 @@ static int pca953x_regcache_sync(struct device *dev) - { - struct pca953x_chip *chip = dev_get_drvdata(dev); - int ret; -+ u8 regaddr; - - /* - * The ordering between direction and output is important, - * sync these registers first and only then sync the rest. - */ -- ret = regcache_sync_region(chip->regmap, chip->regs->direction, -- chip->regs->direction + NBANK(chip)); -+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); -+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); - if (ret) { - dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret); - return ret; - } - -- ret = regcache_sync_region(chip->regmap, chip->regs->output, -- chip->regs->output + NBANK(chip)); -+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); -+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); - if (ret) { - dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret); - return ret; -@@ -1129,16 +1136,18 @@ static int pca953x_regcache_sync(struct device *dev) - - #ifdef CONFIG_GPIO_PCA953X_IRQ - if (chip->driver_data & PCA_PCAL) { -- ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH, -- PCAL953X_IN_LATCH + NBANK(chip)); -+ regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0); -+ ret = regcache_sync_region(chip->regmap, regaddr, -+ regaddr + NBANK(chip) - 1); - if (ret) { - dev_err(dev, "Failed to sync INT latch registers: %d\n", - ret); - return ret; - } - -- ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK, -- PCAL953X_INT_MASK + NBANK(chip)); -+ regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0); -+ ret = regcache_sync_region(chip->regmap, regaddr, -+ regaddr + NBANK(chip) - 1); - if (ret) { - dev_err(dev, "Failed to sync INT mask registers: %d\n", - ret); -@@ -1154,7 +1163,9 @@ static int pca953x_suspend(struct device *dev) - { - struct pca953x_chip *chip = dev_get_drvdata(dev); - -+ mutex_lock(&chip->i2c_lock); - regcache_cache_only(chip->regmap, true); -+ mutex_unlock(&chip->i2c_lock); - - if (atomic_read(&chip->wakeup_path)) - device_set_wakeup_path(dev); -@@ -1177,13 +1188,17 @@ static int pca953x_resume(struct device *dev) - } - } - -+ mutex_lock(&chip->i2c_lock); - regcache_cache_only(chip->regmap, false); - regcache_mark_dirty(chip->regmap); - ret = pca953x_regcache_sync(dev); -- if (ret) -+ if (ret) { -+ mutex_unlock(&chip->i2c_lock); - return ret; -+ } - - ret = regcache_sync(chip->regmap); -+ mutex_unlock(&chip->i2c_lock); - if (ret) { - dev_err(dev, "Failed to restore register map: %d\n", ret); - return ret; -diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c -index eeeb39bc171dc..bd75401b549d1 100644 ---- a/drivers/gpio/gpio-realtek-otto.c -+++ b/drivers/gpio/gpio-realtek-otto.c -@@ -205,7 +205,7 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc) - status = realtek_gpio_read_isr(ctrl, lines_done / 8); - port_pin_count = min(gc->ngpio - lines_done, 8U); - for_each_set_bit(offset, &status, port_pin_count) -- generic_handle_domain_irq(gc->irq.domain, offset); -+ generic_handle_domain_irq(gc->irq.domain, offset + lines_done); - } - - chained_irq_exit(irq_chip, desc); -diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c -index ce63cbd14d69a..a197f698efebb 100644 ---- a/drivers/gpio/gpio-rockchip.c -+++ b/drivers/gpio/gpio-rockchip.c -@@ -19,6 +19,8 @@ - #include - #include - #include -+#include -+#include - #include - - #include "../pinctrl/core.h" -@@ -154,6 +156,12 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip, - unsigned long flags; - u32 data = input ? 0 : 1; - -+ -+ if (input) -+ pinctrl_gpio_direction_input(bank->pin_base + offset); -+ else -+ pinctrl_gpio_direction_output(bank->pin_base + offset); -+ - raw_spin_lock_irqsave(&bank->slock, flags); - rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr); - raw_spin_unlock_irqrestore(&bank->slock, flags); -@@ -410,20 +418,18 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) - level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type); - polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity); - -- switch (type) { -- case IRQ_TYPE_EDGE_BOTH: -+ if (type == IRQ_TYPE_EDGE_BOTH) { - if (bank->gpio_type == GPIO_TYPE_V2) { -- bank->toggle_edge_mode &= ~mask; - rockchip_gpio_writel_bit(bank, d->hwirq, 1, - bank->gpio_regs->int_bothedge); - goto out; - } else { - bank->toggle_edge_mode |= mask; -- level |= mask; -+ level &= ~mask; - - /* - * Determine gpio state. If 1 next interrupt should be -- * falling otherwise rising. -+ * low otherwise high. - */ - data = readl(bank->reg_base + bank->gpio_regs->ext_port); - if (data & mask) -@@ -431,30 +437,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) - else - polarity |= mask; - } -- break; -- case IRQ_TYPE_EDGE_RISING: -- bank->toggle_edge_mode &= ~mask; -- level |= mask; -- polarity |= mask; -- break; -- case IRQ_TYPE_EDGE_FALLING: -- bank->toggle_edge_mode &= ~mask; -- level |= mask; -- polarity &= ~mask; -- break; -- case IRQ_TYPE_LEVEL_HIGH: -- bank->toggle_edge_mode &= ~mask; -- level &= ~mask; -- polarity |= mask; -- break; -- case IRQ_TYPE_LEVEL_LOW: -- bank->toggle_edge_mode &= ~mask; -- level &= ~mask; -- polarity &= ~mask; -- break; -- default: -- ret = -EINVAL; -- goto out; -+ } else { -+ if (bank->gpio_type == GPIO_TYPE_V2) { -+ rockchip_gpio_writel_bit(bank, d->hwirq, 0, -+ bank->gpio_regs->int_bothedge); -+ } else { -+ bank->toggle_edge_mode &= ~mask; -+ } -+ switch (type) { -+ case IRQ_TYPE_EDGE_RISING: -+ level |= mask; -+ polarity |= mask; -+ break; -+ case IRQ_TYPE_EDGE_FALLING: -+ level |= mask; -+ polarity &= ~mask; -+ break; -+ case IRQ_TYPE_LEVEL_HIGH: -+ level &= ~mask; -+ polarity |= mask; -+ break; -+ case IRQ_TYPE_LEVEL_LOW: -+ level &= ~mask; -+ polarity &= ~mask; -+ break; -+ default: -+ ret = -EINVAL; -+ goto out; -+ } - } - - rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type); -@@ -595,6 +605,7 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank) - return -ENODATA; - - pctldev = of_pinctrl_get(pctlnp); -+ of_node_put(pctlnp); - if (!pctldev) - return -ENODEV; - -@@ -689,7 +700,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev) - struct device_node *pctlnp = of_get_parent(np); - struct pinctrl_dev *pctldev = NULL; - struct rockchip_pin_bank *bank = NULL; -- struct rockchip_pin_output_deferred *cfg; -+ struct rockchip_pin_deferred *cfg; - static int gpio; - int id, ret; - -@@ -730,15 +741,22 @@ static int rockchip_gpio_probe(struct platform_device *pdev) - return ret; - } - -- while (!list_empty(&bank->deferred_output)) { -- cfg = list_first_entry(&bank->deferred_output, -- struct rockchip_pin_output_deferred, head); -+ while (!list_empty(&bank->deferred_pins)) { -+ cfg = list_first_entry(&bank->deferred_pins, -+ struct rockchip_pin_deferred, head); - list_del(&cfg->head); - -- ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg); -- if (ret) -- dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg); -- -+ switch (cfg->param) { -+ case PIN_CONFIG_OUTPUT: -+ ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg); -+ if (ret) -+ dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, -+ cfg->arg); -+ break; -+ default: -+ dev_warn(dev, "unknown deferred config param %d\n", cfg->param); -+ break; -+ } - kfree(cfg); - } - -diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c -index 403f9e833d6a3..5ffab0fc1b765 100644 ---- a/drivers/gpio/gpio-sifive.c -+++ b/drivers/gpio/gpio-sifive.c -@@ -44,7 +44,7 @@ static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset) - unsigned long flags; - unsigned int trigger; - -- spin_lock_irqsave(&chip->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&chip->gc.bgpio_lock, flags); - trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0; - regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset), - (trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0); -@@ -54,7 +54,7 @@ static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset) - (trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0); - regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset), - (trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0); -- spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags); - } - - static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger) -@@ -84,13 +84,13 @@ static void sifive_gpio_irq_enable(struct irq_data *d) - /* Switch to input */ - gc->direction_input(gc, offset); - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - /* Clear any sticky pending interrupts */ - regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit); - regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit); - regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit); - regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - /* Enable interrupts */ - assign_bit(offset, &chip->irq_state, 1); -@@ -116,13 +116,13 @@ static void sifive_gpio_irq_eoi(struct irq_data *d) - u32 bit = BIT(offset); - unsigned long flags; - -- spin_lock_irqsave(&gc->bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - /* Clear all pending interrupts */ - regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit); - regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit); - regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit); - regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit); -- spin_unlock_irqrestore(&gc->bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); - - irq_chip_eoi_parent(d); - } -@@ -209,13 +209,18 @@ static int sifive_gpio_probe(struct platform_device *pdev) - return -ENODEV; - } - parent = irq_find_host(irq_parent); -+ of_node_put(irq_parent); - if (!parent) { - dev_err(dev, "no IRQ parent domain\n"); - return -ENODEV; - } - -- for (i = 0; i < ngpio; i++) -- chip->irq_number[i] = platform_get_irq(pdev, i); -+ for (i = 0; i < ngpio; i++) { -+ ret = platform_get_irq(pdev, i); -+ if (ret < 0) -+ return ret; -+ chip->irq_number[i] = ret; -+ } - - ret = bgpio_init(&chip->gc, dev, 4, - chip->base + SIFIVE_GPIO_INPUT_VAL, -@@ -223,7 +228,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) - NULL, - chip->base + SIFIVE_GPIO_OUTPUT_EN, - chip->base + SIFIVE_GPIO_INPUT_EN, -- 0); -+ BGPIOF_READ_OUTPUT_REG_SET); - if (ret) { - dev_err(dev, "unable to init generic GPIO\n"); - return ret; -diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c -index 718a508d3b2f8..de6afa3f97168 100644 ---- a/drivers/gpio/gpio-tb10x.c -+++ b/drivers/gpio/gpio-tb10x.c -@@ -62,14 +62,14 @@ static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs, - u32 r; - unsigned long flags; - -- spin_lock_irqsave(&gpio->gc.bgpio_lock, flags); -+ raw_spin_lock_irqsave(&gpio->gc.bgpio_lock, flags); - - r = tb10x_reg_read(gpio, offs); - r = (r & ~mask) | (val & mask); - - tb10x_reg_write(gpio, offs, r); - -- spin_unlock_irqrestore(&gpio->gc.bgpio_lock, flags); -+ raw_spin_unlock_irqrestore(&gpio->gc.bgpio_lock, flags); - } - - static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c -index c99858f40a27e..00762de3d4096 100644 ---- a/drivers/gpio/gpio-tegra186.c -+++ b/drivers/gpio/gpio-tegra186.c -@@ -337,9 +337,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip, - return offset + pin; - } - -+#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio) -+ - static void tegra186_irq_ack(struct irq_data *data) - { -- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); -+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data); -+ struct tegra_gpio *gpio = to_tegra_gpio(gc); - void __iomem *base; - - base = tegra186_gpio_get_base(gpio, data->hwirq); -@@ -351,7 +354,8 @@ static void tegra186_irq_ack(struct irq_data *data) - - static void tegra186_irq_mask(struct irq_data *data) - { -- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); -+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data); -+ struct tegra_gpio *gpio = to_tegra_gpio(gc); - void __iomem *base; - u32 value; - -@@ -366,7 +370,8 @@ static void tegra186_irq_mask(struct irq_data *data) - - static void tegra186_irq_unmask(struct irq_data *data) - { -- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); -+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data); -+ struct tegra_gpio *gpio = to_tegra_gpio(gc); - void __iomem *base; - u32 value; - -@@ -381,7 +386,8 @@ static void tegra186_irq_unmask(struct irq_data *data) - - static int tegra186_irq_set_type(struct irq_data *data, unsigned int type) - { -- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); -+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data); -+ struct tegra_gpio *gpio = to_tegra_gpio(gc); - void __iomem *base; - u32 value; - -diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c -index 423b7bc30ae88..03a523a6d6fa4 100644 ---- a/drivers/gpio/gpio-tps68470.c -+++ b/drivers/gpio/gpio-tps68470.c -@@ -91,13 +91,13 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset, - struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); - struct regmap *regmap = tps68470_gpio->tps68470_regmap; - -+ /* Set the initial value */ -+ tps68470_gpio_set(gc, offset, value); -+ - /* rest are always outputs */ - if (offset >= TPS68470_N_REGULAR_GPIO) - return 0; - -- /* Set the initial value */ -- tps68470_gpio_set(gc, offset, value); -- - return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset), - TPS68470_GPIO_MODE_MASK, - TPS68470_GPIO_MODE_OUT_CMOS); -diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c -index d885032cf814d..d918d2df4de2c 100644 ---- a/drivers/gpio/gpio-ts4900.c -+++ b/drivers/gpio/gpio-ts4900.c -@@ -1,7 +1,7 @@ - /* - * Digital I/O driver for Technologic Systems I2C FPGA Core - * -- * Copyright (C) 2015 Technologic Systems -+ * Copyright (C) 2015, 2018 Technologic Systems - * Copyright (C) 2016 Savoir-Faire Linux - * - * This program is free software; you can redistribute it and/or -@@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip, - { - struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); - -- /* -- * This will clear the output enable bit, the other bits are -- * dontcare when this is cleared -+ /* Only clear the OE bit here, requires a RMW. Prevents potential issue -+ * with OE and data getting to the physical pin at different times. - */ -- return regmap_write(priv->regmap, offset, 0); -+ return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0); - } - - static int ts4900_gpio_direction_output(struct gpio_chip *chip, - unsigned int offset, int value) - { - struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); -+ unsigned int reg; - int ret; - -+ /* If changing from an input to an output, we need to first set the -+ * proper data bit to what is requested and then set OE bit. This -+ * prevents a glitch that can occur on the IO line -+ */ -+ regmap_read(priv->regmap, offset, ®); -+ if (!(reg & TS4900_GPIO_OE)) { -+ if (value) -+ reg = TS4900_GPIO_OUT; -+ else -+ reg &= ~TS4900_GPIO_OUT; -+ -+ regmap_write(priv->regmap, offset, reg); -+ } -+ - if (value) - ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE | - TS4900_GPIO_OUT); -diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c -index e0f2b67558e74..edb28af7ba3b0 100644 ---- a/drivers/gpio/gpio-vf610.c -+++ b/drivers/gpio/gpio-vf610.c -@@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, - { - struct vf610_gpio_port *port = gpiochip_get_data(chip); - unsigned long mask = BIT(gpio); -+ u32 val; - -- if (port->sdata && port->sdata->have_paddr) -- vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR); -+ if (port->sdata && port->sdata->have_paddr) { -+ val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR); -+ val |= mask; -+ vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR); -+ } - - vf610_gpio_set(chip, gpio, value); - -@@ -300,7 +304,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) - gc = &port->gc; - gc->of_node = np; - gc->parent = dev; -- gc->label = "vf610-gpio"; -+ gc->label = dev_name(dev); - gc->ngpio = VF610_GPIO_PER_PORT; - gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT; - -diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c -index d24f1c9264bc9..dd3b23c9580b1 100644 ---- a/drivers/gpio/gpio-virtio.c -+++ b/drivers/gpio/gpio-virtio.c -@@ -81,11 +81,7 @@ static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio, - virtqueue_kick(vgpio->request_vq); - mutex_unlock(&vgpio->lock); - -- if (!wait_for_completion_timeout(&line->completion, HZ)) { -- dev_err(dev, "GPIO operation timed out\n"); -- ret = -ETIMEDOUT; -- goto out; -- } -+ wait_for_completion(&line->completion); - - if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) { - dev_err(dev, "GPIO request failed: %d\n", gpio); -diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c -index 47455810bdb91..e6534ea1eaa7a 100644 ---- a/drivers/gpio/gpio-visconti.c -+++ b/drivers/gpio/gpio-visconti.c -@@ -130,7 +130,6 @@ static int visconti_gpio_probe(struct platform_device *pdev) - struct gpio_irq_chip *girq; - struct irq_domain *parent; - struct device_node *irq_parent; -- struct fwnode_handle *fwnode; - int ret; - - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); -@@ -150,14 +149,12 @@ static int visconti_gpio_probe(struct platform_device *pdev) - } - - parent = irq_find_host(irq_parent); -+ of_node_put(irq_parent); - if (!parent) { - dev_err(dev, "No IRQ parent domain\n"); - return -ENODEV; - } - -- fwnode = of_node_to_fwnode(irq_parent); -- of_node_put(irq_parent); -- - ret = bgpio_init(&priv->gpio_chip, dev, 4, - priv->base + GPIO_IDATA, - priv->base + GPIO_OSET, -@@ -180,7 +177,7 @@ static int visconti_gpio_probe(struct platform_device *pdev) - - girq = &priv->gpio_chip.irq; - girq->chip = irq_chip; -- girq->fwnode = fwnode; -+ girq->fwnode = of_node_to_fwnode(dev->of_node); - girq->parent_domain = parent; - girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq; - girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec; -diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c -index 98cd715ccc33c..8d09b619c1669 100644 ---- a/drivers/gpio/gpio-vr41xx.c -+++ b/drivers/gpio/gpio-vr41xx.c -@@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq) - printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", - maskl, pendl, maskh, pendh); - -- atomic_inc(&irq_err_count); -- - return -EINVAL; - } - -diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c -index 7f8f5b02e31d5..4b61d975cc0ec 100644 ---- a/drivers/gpio/gpio-winbond.c -+++ b/drivers/gpio/gpio-winbond.c -@@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset) - unsigned long *base = gpiochip_get_data(gc); - const struct winbond_gpio_info *info; - bool val; -+ int ret; - - winbond_gpio_get_info(&offset, &info); - -- val = winbond_sio_enter(*base); -- if (val) -- return val; -+ ret = winbond_sio_enter(*base); -+ if (ret) -+ return ret; - - winbond_sio_select_logical(*base, info->dev); - -diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c -index a1b66338d077d..db616ae560a3c 100644 ---- a/drivers/gpio/gpio-xilinx.c -+++ b/drivers/gpio/gpio-xilinx.c -@@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v) - const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5); - - map[index] &= ~(0xFFFFFFFFul << offset); -- map[index] |= v << offset; -+ map[index] |= (unsigned long)v << offset; - } - - static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch) -diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c -index 47712b6903b51..53be0bdf2bc38 100644 ---- a/drivers/gpio/gpiolib-acpi.c -+++ b/drivers/gpio/gpiolib-acpi.c -@@ -311,7 +311,8 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, - if (IS_ERR(desc)) - return desc; - -- ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout); -+ /* ACPI uses hundredths of milliseconds units */ -+ ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10); - if (ret) - dev_warn(chip->parent, - "Failed to set debounce-timeout for pin 0x%04X, err %d\n", -@@ -391,8 +392,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, - pin = agpio->pin_table[0]; - - if (pin <= 255) { -- char ev_name[5]; -- sprintf(ev_name, "_%c%02hhX", -+ char ev_name[8]; -+ sprintf(ev_name, "_%c%02X", - agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', - pin); - if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) -@@ -1052,17 +1053,25 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind - if (ret < 0) - return ret; - -- ret = gpio_set_debounce_timeout(desc, info.debounce); -+ /* ACPI uses hundredths of milliseconds units */ -+ ret = gpio_set_debounce_timeout(desc, info.debounce * 10); - if (ret) - return ret; - - irq_flags = acpi_dev_get_irq_type(info.triggering, - info.polarity); - -- /* Set type if specified and different than the current one */ -- if (irq_flags != IRQ_TYPE_NONE && -- irq_flags != irq_get_trigger_type(irq)) -- irq_set_irq_type(irq, irq_flags); -+ /* -+ * If the IRQ is not already in use then set type -+ * if specified and different than the current one. -+ */ -+ if (can_request_irq(irq, irq_flags)) { -+ if (irq_flags != IRQ_TYPE_NONE && -+ irq_flags != irq_get_trigger_type(irq)) -+ irq_set_irq_type(irq, irq_flags); -+ } else { -+ dev_dbg(&adev->dev, "IRQ %d already in use\n", irq); -+ } - - return irq; - } -diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c -index c7b5446d01fd2..2a2e0691462bf 100644 ---- a/drivers/gpio/gpiolib-cdev.c -+++ b/drivers/gpio/gpiolib-cdev.c -@@ -54,6 +54,50 @@ static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); - * interface to gpiolib GPIOs via ioctl()s. - */ - -+typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *); -+typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long); -+typedef ssize_t (*read_fn)(struct file *, char __user *, -+ size_t count, loff_t *); -+ -+static __poll_t call_poll_locked(struct file *file, -+ struct poll_table_struct *wait, -+ struct gpio_device *gdev, poll_fn func) -+{ -+ __poll_t ret; -+ -+ down_read(&gdev->sem); -+ ret = func(file, wait); -+ up_read(&gdev->sem); -+ -+ return ret; -+} -+ -+static long call_ioctl_locked(struct file *file, unsigned int cmd, -+ unsigned long arg, struct gpio_device *gdev, -+ ioctl_fn func) -+{ -+ long ret; -+ -+ down_read(&gdev->sem); -+ ret = func(file, cmd, arg); -+ up_read(&gdev->sem); -+ -+ return ret; -+} -+ -+static ssize_t call_read_locked(struct file *file, char __user *buf, -+ size_t count, loff_t *f_ps, -+ struct gpio_device *gdev, read_fn func) -+{ -+ ssize_t ret; -+ -+ down_read(&gdev->sem); -+ ret = func(file, buf, count, f_ps); -+ up_read(&gdev->sem); -+ -+ return ret; -+} -+ - /* - * GPIO line handle management - */ -@@ -190,23 +234,25 @@ static long linehandle_set_config(struct linehandle_state *lh, - return 0; - } - --static long linehandle_ioctl(struct file *file, unsigned int cmd, -- unsigned long arg) -+static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd, -+ unsigned long arg) - { - struct linehandle_state *lh = file->private_data; - void __user *ip = (void __user *)arg; - struct gpiohandle_data ghd; - DECLARE_BITMAP(vals, GPIOHANDLES_MAX); -- int i; -+ unsigned int i; -+ int ret; - -- if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { -- /* NOTE: It's ok to read values of output lines. */ -- int ret = gpiod_get_array_value_complex(false, -- true, -- lh->num_descs, -- lh->descs, -- NULL, -- vals); -+ if (!lh->gdev->chip) -+ return -ENODEV; -+ -+ switch (cmd) { -+ case GPIOHANDLE_GET_LINE_VALUES_IOCTL: -+ /* NOTE: It's okay to read values of output lines */ -+ ret = gpiod_get_array_value_complex(false, true, -+ lh->num_descs, lh->descs, -+ NULL, vals); - if (ret) - return ret; - -@@ -218,7 +264,7 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, - return -EFAULT; - - return 0; -- } else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) { -+ case GPIOHANDLE_SET_LINE_VALUES_IOCTL: - /* - * All line descriptors were created at once with the same - * flags so just check if the first one is really output. -@@ -240,10 +286,20 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, - lh->descs, - NULL, - vals); -- } else if (cmd == GPIOHANDLE_SET_CONFIG_IOCTL) { -+ case GPIOHANDLE_SET_CONFIG_IOCTL: - return linehandle_set_config(lh, ip); -+ default: -+ return -EINVAL; - } -- return -EINVAL; -+} -+ -+static long linehandle_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ struct linehandle_state *lh = file->private_data; -+ -+ return call_ioctl_locked(file, cmd, arg, lh->gdev, -+ linehandle_ioctl_unlocked); - } - - #ifdef CONFIG_COMPAT -@@ -330,7 +386,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) - goto out_free_lh; - } - -- ret = gpiod_request(desc, lh->label); -+ ret = gpiod_request_user(desc, lh->label); - if (ret) - goto out_free_lh; - lh->descs[i] = desc; -@@ -1182,20 +1238,34 @@ static long linereq_set_config(struct linereq *lr, void __user *ip) - return ret; - } - --static long linereq_ioctl(struct file *file, unsigned int cmd, -- unsigned long arg) -+static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd, -+ unsigned long arg) - { - struct linereq *lr = file->private_data; - void __user *ip = (void __user *)arg; - -- if (cmd == GPIO_V2_LINE_GET_VALUES_IOCTL) -+ if (!lr->gdev->chip) -+ return -ENODEV; -+ -+ switch (cmd) { -+ case GPIO_V2_LINE_GET_VALUES_IOCTL: - return linereq_get_values(lr, ip); -- else if (cmd == GPIO_V2_LINE_SET_VALUES_IOCTL) -+ case GPIO_V2_LINE_SET_VALUES_IOCTL: - return linereq_set_values(lr, ip); -- else if (cmd == GPIO_V2_LINE_SET_CONFIG_IOCTL) -+ case GPIO_V2_LINE_SET_CONFIG_IOCTL: - return linereq_set_config(lr, ip); -+ default: -+ return -EINVAL; -+ } -+} - -- return -EINVAL; -+static long linereq_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ struct linereq *lr = file->private_data; -+ -+ return call_ioctl_locked(file, cmd, arg, lr->gdev, -+ linereq_ioctl_unlocked); - } - - #ifdef CONFIG_COMPAT -@@ -1206,12 +1276,15 @@ static long linereq_ioctl_compat(struct file *file, unsigned int cmd, - } - #endif - --static __poll_t linereq_poll(struct file *file, -- struct poll_table_struct *wait) -+static __poll_t linereq_poll_unlocked(struct file *file, -+ struct poll_table_struct *wait) - { - struct linereq *lr = file->private_data; - __poll_t events = 0; - -+ if (!lr->gdev->chip) -+ return EPOLLHUP | EPOLLERR; -+ - poll_wait(file, &lr->wait, wait); - - if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, -@@ -1221,16 +1294,25 @@ static __poll_t linereq_poll(struct file *file, - return events; - } - --static ssize_t linereq_read(struct file *file, -- char __user *buf, -- size_t count, -- loff_t *f_ps) -+static __poll_t linereq_poll(struct file *file, -+ struct poll_table_struct *wait) -+{ -+ struct linereq *lr = file->private_data; -+ -+ return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked); -+} -+ -+static ssize_t linereq_read_unlocked(struct file *file, char __user *buf, -+ size_t count, loff_t *f_ps) - { - struct linereq *lr = file->private_data; - struct gpio_v2_line_event le; - ssize_t bytes_read = 0; - int ret; - -+ if (!lr->gdev->chip) -+ return -ENODEV; -+ - if (count < sizeof(le)) - return -EINVAL; - -@@ -1275,6 +1357,15 @@ static ssize_t linereq_read(struct file *file, - return bytes_read; - } - -+static ssize_t linereq_read(struct file *file, char __user *buf, -+ size_t count, loff_t *f_ps) -+{ -+ struct linereq *lr = file->private_data; -+ -+ return call_read_locked(file, buf, count, f_ps, lr->gdev, -+ linereq_read_unlocked); -+} -+ - static void linereq_free(struct linereq *lr) - { - unsigned int i; -@@ -1378,7 +1469,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip) - goto out_free_linereq; - } - -- ret = gpiod_request(desc, lr->label); -+ ret = gpiod_request_user(desc, lr->label); - if (ret) - goto out_free_linereq; - -@@ -1490,12 +1581,15 @@ struct lineevent_state { - (GPIOEVENT_REQUEST_RISING_EDGE | \ - GPIOEVENT_REQUEST_FALLING_EDGE) - --static __poll_t lineevent_poll(struct file *file, -- struct poll_table_struct *wait) -+static __poll_t lineevent_poll_unlocked(struct file *file, -+ struct poll_table_struct *wait) - { - struct lineevent_state *le = file->private_data; - __poll_t events = 0; - -+ if (!le->gdev->chip) -+ return EPOLLHUP | EPOLLERR; -+ - poll_wait(file, &le->wait, wait); - - if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) -@@ -1504,15 +1598,21 @@ static __poll_t lineevent_poll(struct file *file, - return events; - } - -+static __poll_t lineevent_poll(struct file *file, -+ struct poll_table_struct *wait) -+{ -+ struct lineevent_state *le = file->private_data; -+ -+ return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked); -+} -+ - struct compat_gpioeevent_data { - compat_u64 timestamp; - u32 id; - }; - --static ssize_t lineevent_read(struct file *file, -- char __user *buf, -- size_t count, -- loff_t *f_ps) -+static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf, -+ size_t count, loff_t *f_ps) - { - struct lineevent_state *le = file->private_data; - struct gpioevent_data ge; -@@ -1520,6 +1620,9 @@ static ssize_t lineevent_read(struct file *file, - ssize_t ge_size; - int ret; - -+ if (!le->gdev->chip) -+ return -ENODEV; -+ - /* - * When compatible system call is being used the struct gpioevent_data, - * in case of at least ia32, has different size due to the alignment -@@ -1577,6 +1680,15 @@ static ssize_t lineevent_read(struct file *file, - return bytes_read; - } - -+static ssize_t lineevent_read(struct file *file, char __user *buf, -+ size_t count, loff_t *f_ps) -+{ -+ struct lineevent_state *le = file->private_data; -+ -+ return call_read_locked(file, buf, count, f_ps, le->gdev, -+ lineevent_read_unlocked); -+} -+ - static void lineevent_free(struct lineevent_state *le) - { - if (le->irq) -@@ -1594,13 +1706,16 @@ static int lineevent_release(struct inode *inode, struct file *file) - return 0; - } - --static long lineevent_ioctl(struct file *file, unsigned int cmd, -- unsigned long arg) -+static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd, -+ unsigned long arg) - { - struct lineevent_state *le = file->private_data; - void __user *ip = (void __user *)arg; - struct gpiohandle_data ghd; - -+ if (!le->gdev->chip) -+ return -ENODEV; -+ - /* - * We can get the value for an event line but not set it, - * because it is input by definition. -@@ -1623,6 +1738,15 @@ static long lineevent_ioctl(struct file *file, unsigned int cmd, - return -EINVAL; - } - -+static long lineevent_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ struct lineevent_state *le = file->private_data; -+ -+ return call_ioctl_locked(file, cmd, arg, le->gdev, -+ lineevent_ioctl_unlocked); -+} -+ - #ifdef CONFIG_COMPAT - static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, - unsigned long arg) -@@ -1764,7 +1888,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) - } - } - -- ret = gpiod_request(desc, le->label); -+ ret = gpiod_request_user(desc, le->label); - if (ret) - goto out_free_le; - le->desc = desc; -@@ -1784,7 +1908,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) - ret = -ENODEV; - goto out_free_le; - } -- le->irq = irq; - - if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) - irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? -@@ -1798,7 +1921,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) - init_waitqueue_head(&le->wait); - - /* Request a thread to read the events */ -- ret = request_threaded_irq(le->irq, -+ ret = request_threaded_irq(irq, - lineevent_irq_handler, - lineevent_irq_thread, - irqflags, -@@ -1807,6 +1930,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) - if (ret) - goto out_free_le; - -+ le->irq = irq; -+ - fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); - if (fd < 0) { - ret = fd; -@@ -2113,28 +2238,30 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) - return -ENODEV; - - /* Fill in the struct and pass to userspace */ -- if (cmd == GPIO_GET_CHIPINFO_IOCTL) { -+ switch (cmd) { -+ case GPIO_GET_CHIPINFO_IOCTL: - return chipinfo_get(cdev, ip); - #ifdef CONFIG_GPIO_CDEV_V1 -- } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) { -+ case GPIO_GET_LINEHANDLE_IOCTL: - return linehandle_create(gdev, ip); -- } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) { -+ case GPIO_GET_LINEEVENT_IOCTL: - return lineevent_create(gdev, ip); -- } else if (cmd == GPIO_GET_LINEINFO_IOCTL || -- cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) { -- return lineinfo_get_v1(cdev, ip, -- cmd == GPIO_GET_LINEINFO_WATCH_IOCTL); -+ case GPIO_GET_LINEINFO_IOCTL: -+ return lineinfo_get_v1(cdev, ip, false); -+ case GPIO_GET_LINEINFO_WATCH_IOCTL: -+ return lineinfo_get_v1(cdev, ip, true); - #endif /* CONFIG_GPIO_CDEV_V1 */ -- } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL || -- cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) { -- return lineinfo_get(cdev, ip, -- cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL); -- } else if (cmd == GPIO_V2_GET_LINE_IOCTL) { -+ case GPIO_V2_GET_LINEINFO_IOCTL: -+ return lineinfo_get(cdev, ip, false); -+ case GPIO_V2_GET_LINEINFO_WATCH_IOCTL: -+ return lineinfo_get(cdev, ip, true); -+ case GPIO_V2_GET_LINE_IOCTL: - return linereq_create(gdev, ip); -- } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) { -+ case GPIO_GET_LINEINFO_UNWATCH_IOCTL: - return lineinfo_unwatch(cdev, ip); -+ default: -+ return -EINVAL; - } -- return -EINVAL; - } - - #ifdef CONFIG_COMPAT -@@ -2176,12 +2303,15 @@ static int lineinfo_changed_notify(struct notifier_block *nb, - return NOTIFY_OK; - } - --static __poll_t lineinfo_watch_poll(struct file *file, -- struct poll_table_struct *pollt) -+static __poll_t lineinfo_watch_poll_unlocked(struct file *file, -+ struct poll_table_struct *pollt) - { - struct gpio_chardev_data *cdev = file->private_data; - __poll_t events = 0; - -+ if (!cdev->gdev->chip) -+ return EPOLLHUP | EPOLLERR; -+ - poll_wait(file, &cdev->wait, pollt); - - if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, -@@ -2191,8 +2321,17 @@ static __poll_t lineinfo_watch_poll(struct file *file, - return events; - } - --static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, -- size_t count, loff_t *off) -+static __poll_t lineinfo_watch_poll(struct file *file, -+ struct poll_table_struct *pollt) -+{ -+ struct gpio_chardev_data *cdev = file->private_data; -+ -+ return call_poll_locked(file, pollt, cdev->gdev, -+ lineinfo_watch_poll_unlocked); -+} -+ -+static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf, -+ size_t count, loff_t *off) - { - struct gpio_chardev_data *cdev = file->private_data; - struct gpio_v2_line_info_changed event; -@@ -2200,6 +2339,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, - int ret; - size_t event_size; - -+ if (!cdev->gdev->chip) -+ return -ENODEV; -+ - #ifndef CONFIG_GPIO_CDEV_V1 - event_size = sizeof(struct gpio_v2_line_info_changed); - if (count < event_size) -@@ -2267,6 +2409,15 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, - return bytes_read; - } - -+static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, -+ size_t count, loff_t *off) -+{ -+ struct gpio_chardev_data *cdev = file->private_data; -+ -+ return call_read_locked(file, buf, count, off, cdev->gdev, -+ lineinfo_watch_read_unlocked); -+} -+ - /** - * gpio_chrdev_open() - open the chardev for ioctl operations - * @inode: inode for this chardev -@@ -2280,13 +2431,17 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) - struct gpio_chardev_data *cdev; - int ret = -ENOMEM; - -+ down_read(&gdev->sem); -+ - /* Fail on open if the backing gpiochip is gone */ -- if (!gdev->chip) -- return -ENODEV; -+ if (!gdev->chip) { -+ ret = -ENODEV; -+ goto out_unlock; -+ } - - cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); - if (!cdev) -- return -ENOMEM; -+ goto out_unlock; - - cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); - if (!cdev->watched_lines) -@@ -2309,6 +2464,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) - if (ret) - goto out_unregister_notifier; - -+ up_read(&gdev->sem); -+ - return ret; - - out_unregister_notifier: -@@ -2318,6 +2475,8 @@ out_free_bitmap: - bitmap_free(cdev->watched_lines); - out_free_cdev: - kfree(cdev); -+out_unlock: -+ up_read(&gdev->sem); - return ret; - } - -diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c -index 0ad288ab6262d..7a96eb626a08b 100644 ---- a/drivers/gpio/gpiolib-of.c -+++ b/drivers/gpio/gpiolib-of.c -@@ -863,7 +863,8 @@ int of_mm_gpiochip_add_data(struct device_node *np, - if (mm_gc->save_regs) - mm_gc->save_regs(mm_gc); - -- mm_gc->gc.of_node = np; -+ of_node_put(mm_gc->gc.of_node); -+ mm_gc->gc.of_node = of_node_get(np); - - ret = gpiochip_add_data(gc, data); - if (ret) -@@ -871,6 +872,7 @@ int of_mm_gpiochip_add_data(struct device_node *np, - - return 0; - err2: -+ of_node_put(np); - iounmap(mm_gc->regs); - err1: - kfree(gc->label); -@@ -912,7 +914,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip) - i, &start); - of_property_read_u32_index(np, "gpio-reserved-ranges", - i + 1, &count); -- if (start >= chip->ngpio || start + count >= chip->ngpio) -+ if (start >= chip->ngpio || start + count > chip->ngpio) - continue; - - bitmap_clear(chip->valid_mask, start, count); -@@ -933,6 +935,11 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) - if (!np) - return 0; - -+ if (!of_property_read_bool(np, "gpio-ranges") && -+ chip->of_gpio_ranges_fallback) { -+ return chip->of_gpio_ranges_fallback(chip, np); -+ } -+ - group_names = of_find_property(np, group_names_propname, NULL); - - for (;; index++) { -diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c -index 4098bc7f88b7e..44c1ad51b3fe9 100644 ---- a/drivers/gpio/gpiolib-sysfs.c -+++ b/drivers/gpio/gpiolib-sysfs.c -@@ -475,12 +475,9 @@ static ssize_t export_store(struct class *class, - * they may be undone on its behalf too. - */ - -- status = gpiod_request(desc, "sysfs"); -- if (status) { -- if (status == -EPROBE_DEFER) -- status = -ENODEV; -+ status = gpiod_request_user(desc, "sysfs"); -+ if (status) - goto done; -- } - - status = gpiod_set_transitory(desc, false); - if (!status) { -diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c -index d1b9b721218f2..f9fdd117c654c 100644 ---- a/drivers/gpio/gpiolib.c -+++ b/drivers/gpio/gpiolib.c -@@ -189,9 +189,8 @@ static int gpiochip_find_base(int ngpio) - /* found a free space? */ - if (gdev->base + gdev->ngpio <= base) - break; -- else -- /* nope, check the space right before the chip */ -- base = gdev->base - ngpio; -+ /* nope, check the space right before the chip */ -+ base = gdev->base - ngpio; - } - - if (gpio_is_valid(base)) { -@@ -525,12 +524,13 @@ static int gpiochip_setup_dev(struct gpio_device *gdev) - if (ret) - return ret; - -+ /* From this point, the .release() function cleans up gpio_device */ -+ gdev->dev.release = gpiodevice_release; -+ - ret = gpiochip_sysfs_register(gdev); - if (ret) - goto err_remove_device; - -- /* From this point, the .release() function cleans up gpio_device */ -- gdev->dev.release = gpiodevice_release; - dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base, - gdev->base + gdev->ngpio - 1, gdev->chip->label ? : "generic"); - -@@ -594,11 +594,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, - struct lock_class_key *request_key) - { - struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL; -- unsigned long flags; -- int ret = 0; -- unsigned i; -- int base = gc->base; - struct gpio_device *gdev; -+ unsigned long flags; -+ unsigned int i; -+ u32 ngpios = 0; -+ int base = 0; -+ int ret = 0; - - /* - * First: allocate and populate the internal stat container, and -@@ -640,22 +641,43 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, - else - gdev->owner = THIS_MODULE; - -- gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); -- if (!gdev->descs) { -- ret = -ENOMEM; -- goto err_free_dev_name; -+ /* -+ * Try the device properties if the driver didn't supply the number -+ * of GPIO lines. -+ */ -+ ngpios = gc->ngpio; -+ if (ngpios == 0) { -+ ret = device_property_read_u32(&gdev->dev, "ngpios", &ngpios); -+ if (ret == -ENODATA) -+ /* -+ * -ENODATA means that there is no property found and -+ * we want to issue the error message to the user. -+ * Besides that, we want to return different error code -+ * to state that supplied value is not valid. -+ */ -+ ngpios = 0; -+ else if (ret) -+ goto err_free_dev_name; -+ -+ gc->ngpio = ngpios; - } - - if (gc->ngpio == 0) { - chip_err(gc, "tried to insert a GPIO chip with zero lines\n"); - ret = -EINVAL; -- goto err_free_descs; -+ goto err_free_dev_name; - } - - if (gc->ngpio > FASTPATH_NGPIO) - chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n", - gc->ngpio, FASTPATH_NGPIO); - -+ gdev->descs = kcalloc(gc->ngpio, sizeof(*gdev->descs), GFP_KERNEL); -+ if (!gdev->descs) { -+ ret = -ENOMEM; -+ goto err_free_dev_name; -+ } -+ - gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL); - if (!gdev->label) { - ret = -ENOMEM; -@@ -674,11 +696,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, - * it may be a pipe dream. It will not happen before we get rid - * of the sysfs interface anyways. - */ -+ base = gc->base; - if (base < 0) { - base = gpiochip_find_base(gc->ngpio); - if (base < 0) { -- ret = base; - spin_unlock_irqrestore(&gpio_lock, flags); -+ ret = base; -+ base = 0; - goto err_free_label; - } - /* -@@ -703,6 +727,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, - spin_unlock_irqrestore(&gpio_lock, flags); - - BLOCKING_INIT_NOTIFIER_HEAD(&gdev->notifier); -+ init_rwsem(&gdev->sem); - - #ifdef CONFIG_PINCTRL - INIT_LIST_HEAD(&gdev->pin_ranges); -@@ -786,6 +811,11 @@ err_remove_of_chip: - err_free_gpiochip_mask: - gpiochip_remove_pin_ranges(gc); - gpiochip_free_valid_mask(gc); -+ if (gdev->dev.release) { -+ /* release() has been registered by gpiochip_setup_dev() */ -+ put_device(&gdev->dev); -+ goto err_print_message; -+ } - err_remove_from_list: - spin_lock_irqsave(&gpio_lock, flags); - list_del(&gdev->list); -@@ -799,13 +829,14 @@ err_free_dev_name: - err_free_ida: - ida_free(&gpio_ida, gdev->id); - err_free_gdev: -+ kfree(gdev); -+err_print_message: - /* failures here can mean systems won't boot... */ - if (ret != -EPROBE_DEFER) { - pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, -- gdev->base, gdev->base + gdev->ngpio - 1, -+ base, base + (int)ngpios - 1, - gc->label ? : "generic", ret); - } -- kfree(gdev); - return ret; - } - EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key); -@@ -835,6 +866,8 @@ void gpiochip_remove(struct gpio_chip *gc) - unsigned long flags; - unsigned int i; - -+ down_write(&gdev->sem); -+ - /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ - gpiochip_sysfs_unregister(gdev); - gpiochip_free_hogs(gc); -@@ -869,6 +902,7 @@ void gpiochip_remove(struct gpio_chip *gc) - * gone. - */ - gcdev_unregister(gdev); -+ up_write(&gdev->sem); - put_device(&gdev->dev); - } - EXPORT_SYMBOL_GPL(gpiochip_remove); -@@ -1368,6 +1402,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset) - { - struct irq_domain *domain = gc->irq.domain; - -+#ifdef CONFIG_GPIOLIB_IRQCHIP -+ /* -+ * Avoid race condition with other code, which tries to lookup -+ * an IRQ before the irqchip has been properly registered, -+ * i.e. while gpiochip is still being brought up. -+ */ -+ if (!gc->irq.initialized) -+ return -EPROBE_DEFER; -+#endif -+ - if (!gpiochip_irqchip_irq_valid(gc, offset)) - return -ENXIO; - -@@ -1534,9 +1578,14 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, - } - - if (gc->irq.parent_handler) { -- void *data = gc->irq.parent_handler_data ?: gc; -- - for (i = 0; i < gc->irq.num_parents; i++) { -+ void *data; -+ -+ if (gc->irq.per_parent_data) -+ data = gc->irq.parent_handler_data_array[i]; -+ else -+ data = gc->irq.parent_handler_data ?: gc; -+ - /* - * The parent IRQ chip is already using the chip_data - * for this IRQ chip, so our callbacks simply use the -@@ -1550,6 +1599,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, - - gpiochip_set_irq_hooks(gc); - -+ /* -+ * Using barrier() here to prevent compiler from reordering -+ * gc->irq.initialized before initialization of above -+ * GPIO chip irq members. -+ */ -+ barrier(); -+ -+ gc->irq.initialized = true; -+ - acpi_gpiochip_request_interrupts(gc); - - return 0; -@@ -1625,6 +1683,14 @@ int gpiochip_irqchip_add_domain(struct gpio_chip *gc, - gc->to_irq = gpiochip_to_irq; - gc->irq.domain = domain; - -+ /* -+ * Using barrier() here to prevent compiler from reordering -+ * gc->irq.initialized before adding irqdomain. -+ */ -+ barrier(); -+ -+ gc->irq.initialized = true; -+ - return 0; - } - EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_domain); -@@ -2186,6 +2252,16 @@ static int gpio_set_bias(struct gpio_desc *desc) - return gpio_set_config_with_argument_optional(desc, bias, arg); - } - -+/** -+ * gpio_set_debounce_timeout() - Set debounce timeout -+ * @desc: GPIO descriptor to set the debounce timeout -+ * @debounce: Debounce timeout in microseconds -+ * -+ * The function calls the certain GPIO driver to set debounce timeout -+ * in the hardware. -+ * -+ * Returns 0 on success, or negative error code otherwise. -+ */ - int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce) - { - return gpio_set_config_with_argument_optional(desc, -@@ -2350,8 +2426,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) - ret = gpiod_direction_input(desc); - goto set_output_flag; - } -- } -- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { -+ } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { - ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE); - if (!ret) - goto set_output_value; -@@ -2508,9 +2583,9 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) - static int gpio_chip_get_multiple(struct gpio_chip *gc, - unsigned long *mask, unsigned long *bits) - { -- if (gc->get_multiple) { -+ if (gc->get_multiple) - return gc->get_multiple(gc, mask, bits); -- } else if (gc->get) { -+ if (gc->get) { - int i, value; - - for_each_set_bit(i, mask, gc->ngpio) { -@@ -3106,6 +3181,16 @@ int gpiod_to_irq(const struct gpio_desc *desc) - - return retirq; - } -+#ifdef CONFIG_GPIOLIB_IRQCHIP -+ if (gc->irq.chip) { -+ /* -+ * Avoid race condition with other code, which tries to lookup -+ * an IRQ before the irqchip has been properly registered, -+ * i.e. while gpiochip is still being brought up. -+ */ -+ return -EPROBE_DEFER; -+ } -+#endif - return -ENXIO; - } - EXPORT_SYMBOL_GPL(gpiod_to_irq); -diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h -index 30bc3f80f83e6..73b732a1d9c94 100644 ---- a/drivers/gpio/gpiolib.h -+++ b/drivers/gpio/gpiolib.h -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - - #define GPIOCHIP_NAME "gpiochip" - -@@ -37,6 +38,12 @@ - * or name of the IP component in a System on Chip. - * @data: per-instance data assigned by the driver - * @list: links gpio_device:s together for traversal -+ * @notifier: used to notify subscribers about lines being requested, released -+ * or reconfigured -+ * @sem: protects the structure from a NULL-pointer dereference of @chip by -+ * user-space operations when the device gets unregistered during -+ * a hot-unplug event -+ * @pin_ranges: range of pins served by the GPIO driver - * - * This state container holds most of the runtime variable data - * for a GPIO device and can hold references and live on after the -@@ -57,6 +64,7 @@ struct gpio_device { - void *data; - struct list_head list; - struct blocking_notifier_head notifier; -+ struct rw_semaphore sem; - - #ifdef CONFIG_PINCTRL - /* -@@ -72,6 +80,20 @@ struct gpio_device { - /* gpio suffixes used for ACPI and device tree lookup */ - static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" }; - -+/** -+ * struct gpio_array - Opaque descriptor for a structure of GPIO array attributes -+ * -+ * @desc: Array of pointers to the GPIO descriptors -+ * @size: Number of elements in desc -+ * @chip: Parent GPIO chip -+ * @get_mask: Get mask used in fastpath -+ * @set_mask: Set mask used in fastpath -+ * @invert_mask: Invert mask used in fastpath -+ * -+ * This structure is attached to struct gpiod_descs obtained from -+ * gpiod_get_array() and can be passed back to get/set array functions in order -+ * to activate fast processing path if applicable. -+ */ - struct gpio_array { - struct gpio_desc **desc; - unsigned int size; -@@ -96,6 +118,23 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, - extern spinlock_t gpio_lock; - extern struct list_head gpio_devices; - -+ -+/** -+ * struct gpio_desc - Opaque descriptor for a GPIO -+ * -+ * @gdev: Pointer to the parent GPIO device -+ * @flags: Binary descriptor flags -+ * @label: Name of the consumer -+ * @name: Line name -+ * @hog: Pointer to the device node that hogs this line (if any) -+ * @debounce_period_us: Debounce period in microseconds -+ * -+ * These are obtained using gpiod_get() and are preferable to the old -+ * integer-based handles. -+ * -+ * Contrary to integers, a pointer to a &struct gpio_desc is guaranteed to be -+ * valid until the GPIO is released. -+ */ - struct gpio_desc { - struct gpio_device *gdev; - unsigned long flags; -@@ -135,6 +174,18 @@ struct gpio_desc { - - int gpiod_request(struct gpio_desc *desc, const char *label); - void gpiod_free(struct gpio_desc *desc); -+ -+static inline int gpiod_request_user(struct gpio_desc *desc, const char *label) -+{ -+ int ret; -+ -+ ret = gpiod_request(desc, label); -+ if (ret == -EPROBE_DEFER) -+ ret = -ENODEV; -+ -+ return ret; -+} -+ - int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, - unsigned long lflags, enum gpiod_flags dflags); - int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce); -diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h -index 5b393622f5920..a0f0a17e224fe 100644 ---- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h -+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h -@@ -119,6 +119,7 @@ - #define CONNECTOR_OBJECT_ID_eDP 0x14 - #define CONNECTOR_OBJECT_ID_MXM 0x15 - #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 -+#define CONNECTOR_OBJECT_ID_USBC 0x17 - - /* deleted */ - -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h -index 269437b013280..d90da384d1851 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h -@@ -312,7 +312,7 @@ enum amdgpu_kiq_irq { - AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, - AMDGPU_CP_KIQ_IRQ_LAST - }; -- -+#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */ - #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ - #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ - #define MAX_KIQ_REG_TRY 1000 -@@ -1069,6 +1069,7 @@ struct amdgpu_device { - bool runpm; - bool in_runpm; - bool has_pr3; -+ bool is_fw_fb; - - bool pm_sysfs_en; - bool ucode_sysfs_en; -@@ -1078,8 +1079,6 @@ struct amdgpu_device { - char product_name[32]; - char serial[20]; - -- struct amdgpu_autodump autodump; -- - atomic_t throttling_logging_enabled; - struct ratelimit_state throttling_logging_rs; - uint32_t ras_hw_enabled; -@@ -1286,6 +1285,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, - void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); - int amdgpu_device_pci_reset(struct amdgpu_device *adev); - bool amdgpu_device_need_post(struct amdgpu_device *adev); -+bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); -+bool amdgpu_device_aspm_support_quirk(void); - - void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, - u64 num_vis_bytes); -@@ -1398,12 +1399,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta - int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); - - void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); --bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); - void amdgpu_acpi_detect(void); - #else - static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } - static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } --static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } - static inline void amdgpu_acpi_detect(void) { } - static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } - static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, -@@ -1412,6 +1411,16 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, - enum amdgpu_ss ss_state) { return 0; } - #endif - -+#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) -+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); -+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); -+bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); -+#else -+static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } -+static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } -+static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } -+#endif -+ - int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, - uint64_t addr, struct amdgpu_bo **bo, - struct amdgpu_bo_va_mapping **mapping); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c -index 4811b0faafd9a..6cded09d5878a 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c -@@ -1031,6 +1031,38 @@ void amdgpu_acpi_detect(void) - } - } - -+#if IS_ENABLED(CONFIG_SUSPEND) -+/** -+ * amdgpu_acpi_is_s3_active -+ * -+ * @adev: amdgpu_device_pointer -+ * -+ * returns true if supported, false if not. -+ */ -+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) -+{ -+ return !(adev->flags & AMD_IS_APU) || -+ (pm_suspend_target_state == PM_SUSPEND_MEM); -+} -+ -+/** -+ * amdgpu_acpi_should_gpu_reset -+ * -+ * @adev: amdgpu_device_pointer -+ * -+ * returns true if should reset GPU, false if not -+ */ -+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) -+{ -+ if (adev->flags & AMD_IS_APU) -+ return false; -+ -+ if (amdgpu_sriov_vf(adev)) -+ return false; -+ -+ return pm_suspend_target_state != PM_SUSPEND_TO_IDLE; -+} -+ - /** - * amdgpu_acpi_is_s0ix_active - * -@@ -1040,11 +1072,24 @@ void amdgpu_acpi_detect(void) - */ - bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) - { --#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND) -- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { -- if (adev->flags & AMD_IS_APU) -- return pm_suspend_target_state == PM_SUSPEND_TO_IDLE; -+ if (!(adev->flags & AMD_IS_APU) || -+ (pm_suspend_target_state != PM_SUSPEND_TO_IDLE)) -+ return false; -+ -+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { -+ dev_warn_once(adev->dev, -+ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" -+ "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); -+ return false; - } --#endif -+ -+#if !IS_ENABLED(CONFIG_AMD_PMC) -+ dev_warn_once(adev->dev, -+ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); - return false; -+#else -+ return true; -+#endif /* CONFIG_AMD_PMC */ - } -+ -+#endif /* CONFIG_SUSPEND */ -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c -index 1d41c2c00623b..5690cb6d27fed 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c -@@ -768,7 +768,8 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - bool all_hub = false; - -- if (adev->family == AMDGPU_FAMILY_AI) -+ if (adev->family == AMDGPU_FAMILY_AI || -+ adev->family == AMDGPU_FAMILY_RV) - all_hub = true; - - return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c -index 46cd4ee6bafb7..f3743089a1c99 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c -@@ -44,5 +44,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { - .get_atc_vmid_pasid_mapping_info = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, -+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, - .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings - }; -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c -index 054c1a224defb..00a8aef48a696 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c -@@ -476,13 +476,13 @@ kfd_mem_dmamap_userptr(struct kgd_mem *mem, - struct ttm_tt *ttm = bo->tbo.ttm; - int ret; - -+ if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) -+ return -EINVAL; -+ - ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); - if (unlikely(!ttm->sg)) - return -ENOMEM; - -- if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) -- return -EINVAL; -- - /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ - ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, - ttm->num_pages, 0, -@@ -1318,16 +1318,10 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, - struct amdgpu_vm *vm) - { - struct amdkfd_process_info *process_info = vm->process_info; -- struct amdgpu_bo *pd = vm->root.bo; - - if (!process_info) - return; - -- /* Release eviction fence from PD */ -- amdgpu_bo_reserve(pd, false); -- amdgpu_bo_fence(pd, NULL, false); -- amdgpu_bo_unreserve(pd); -- - /* Update process info */ - mutex_lock(&process_info->lock); - process_info->n_vms--; -@@ -1393,7 +1387,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - struct sg_table *sg = NULL; - uint64_t user_addr = 0; - struct amdgpu_bo *bo; -- struct drm_gem_object *gobj; -+ struct drm_gem_object *gobj = NULL; - u32 domain, alloc_domain; - u64 alloc_flags; - int ret; -@@ -1503,14 +1497,16 @@ allocate_init_user_pages_failed: - remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); - drm_vma_node_revoke(&gobj->vma_node, drm_priv); - err_node_allow: -- amdgpu_bo_unref(&bo); - /* Don't unreserve system mem limit twice */ - goto err_reserve_limit; - err_bo_create: - unreserve_mem_limit(adev, size, alloc_domain, !!sg); - err_reserve_limit: - mutex_destroy(&(*mem)->lock); -- kfree(*mem); -+ if (gobj) -+ drm_gem_object_put(gobj); -+ else -+ kfree(*mem); - err: - if (sg) { - sg_free_table(sg); -@@ -1826,9 +1822,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, - return -EINVAL; - } - -- /* delete kgd_mem from kfd_bo_list to avoid re-validating -- * this BO in BO's restoring after eviction. -- */ - mutex_lock(&mem->process_info->lock); - - ret = amdgpu_bo_reserve(bo, true); -@@ -1851,7 +1844,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, - - amdgpu_amdkfd_remove_eviction_fence( - bo, mem->process_info->eviction_fence); -- list_del_init(&mem->validate_list.head); - - if (size) - *size = amdgpu_bo_size(bo); -@@ -1918,7 +1910,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, - - ret = drm_vma_node_allow(&obj->vma_node, drm_priv); - if (ret) { -- kfree(mem); -+ kfree(*mem); - return ret; - } - -@@ -2358,6 +2350,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) - if (!attachment->is_mapped) - continue; - -+ if (attachment->bo_va->base.bo->tbo.pin_count) -+ continue; -+ - kfd_mem_dmaunmap_attachment(mem, attachment); - ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); - if (ret) { -@@ -2397,12 +2392,15 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) - process_info->eviction_fence = new_fence; - *ef = dma_fence_get(&new_fence->base); - -- /* Attach new eviction fence to all BOs */ -+ /* Attach new eviction fence to all BOs except pinned ones */ - list_for_each_entry(mem, &process_info->kfd_bo_list, -- validate_list.head) -+ validate_list.head) { -+ if (mem->bo->tbo.pin_count) -+ continue; -+ - amdgpu_bo_fence(mem->bo, - &process_info->eviction_fence->base, true); -- -+ } - /* Attach eviction fence to PD / PT BOs */ - list_for_each_entry(peer_vm, &process_info->vm_list_head, - vm_list_node) { -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c -index 27b19503773b9..71354f505b84b 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c -@@ -317,6 +317,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) - - if (!found) - return false; -+ pci_dev_put(pdev); - - adev->bios = kmalloc(size, GFP_KERNEL); - if (!adev->bios) { -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c -index 15c45b2a39835..714178f1b6c6e 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c -@@ -61,7 +61,7 @@ static void amdgpu_bo_list_free(struct kref *ref) - - int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, - struct drm_amdgpu_bo_list_entry *info, -- unsigned num_entries, struct amdgpu_bo_list **result) -+ size_t num_entries, struct amdgpu_bo_list **result) - { - unsigned last_entry = 0, first_userptr = num_entries; - struct amdgpu_bo_list_entry *array; -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h -index c905a4cfc173d..044b41f0bfd9c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h -@@ -61,7 +61,7 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, - int amdgpu_bo_list_create(struct amdgpu_device *adev, - struct drm_file *filp, - struct drm_amdgpu_bo_list_entry *info, -- unsigned num_entries, -+ size_t num_entries, - struct amdgpu_bo_list **list); - - static inline struct amdgpu_bo_list_entry * -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c -index b9c11c2b2885a..c777aff164b76 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c -@@ -175,7 +175,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) - - /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */ - if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) { -- if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) && -+ if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) && - (mode_clock * 5/4 <= max_tmds_clock)) - bpc = 10; - else -@@ -315,8 +315,10 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector) - if (!amdgpu_connector->edid) { - /* some laptops provide a hardcoded edid in rom for LCDs */ - if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || -- (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) -+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) { - amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev); -+ drm_connector_update_edid_property(connector, amdgpu_connector->edid); -+ } - } - } - -@@ -387,6 +389,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) - native_mode->vdisplay != 0 && - native_mode->clock != 0) { - mode = drm_mode_duplicate(dev, native_mode); -+ if (!mode) -+ return NULL; -+ - mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; - drm_mode_set_name(mode); - -@@ -401,6 +406,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) - * simpler. - */ - mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); -+ if (!mode) -+ return NULL; -+ - mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; - DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); - } -@@ -827,6 +835,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) - - amdgpu_connector_get_edid(connector); - ret = amdgpu_connector_ddc_get_modes(connector); -+ amdgpu_get_native_mode(connector); - - return ret; - } -@@ -1664,10 +1673,12 @@ amdgpu_connector_add(struct amdgpu_device *adev, - adev->mode_info.dither_property, - AMDGPU_FMT_DITHER_DISABLE); - -- if (amdgpu_audio != 0) -+ if (amdgpu_audio != 0) { - drm_object_attach_property(&amdgpu_connector->base.base, - adev->mode_info.audio_property, - AMDGPU_AUDIO_AUTO); -+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; -+ } - - subpixel_order = SubPixelHorizontalRGB; - connector->interlace_allowed = true; -@@ -1789,6 +1800,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, - drm_object_attach_property(&amdgpu_connector->base.base, - adev->mode_info.audio_property, - AMDGPU_AUDIO_AUTO); -+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; - } - drm_object_attach_property(&amdgpu_connector->base.base, - adev->mode_info.dither_property, -@@ -1842,6 +1854,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, - drm_object_attach_property(&amdgpu_connector->base.base, - adev->mode_info.audio_property, - AMDGPU_AUDIO_AUTO); -+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; - } - drm_object_attach_property(&amdgpu_connector->base.base, - adev->mode_info.dither_property, -@@ -1892,6 +1905,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, - drm_object_attach_property(&amdgpu_connector->base.base, - adev->mode_info.audio_property, - AMDGPU_AUDIO_AUTO); -+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO; - } - drm_object_attach_property(&amdgpu_connector->base.base, - adev->mode_info.dither_property, -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c -index 913f9eaa9cd65..4b01188385b28 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c -@@ -115,7 +115,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs - int ret; - - if (cs->in.num_chunks == 0) -- return 0; -+ return -EINVAL; - - chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); - if (!chunk_array) -@@ -1508,6 +1508,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, - return 0; - - default: -+ dma_fence_put(fence); - return -EINVAL; - } - } -@@ -1540,15 +1541,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, - continue; - - r = dma_fence_wait_timeout(fence, true, timeout); -+ if (r > 0 && fence->error) -+ r = fence->error; -+ - dma_fence_put(fence); - if (r < 0) - return r; - - if (r == 0) - break; -- -- if (fence->error) -- return fence->error; - } - - memset(wait, 0, sizeof(*wait)); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c -index 463b9c0283f7e..348629ea0e153 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c -@@ -27,7 +27,6 @@ - #include - #include - #include --#include - - #include "amdgpu.h" - #include "amdgpu_pm.h" -@@ -37,85 +36,7 @@ - #include "amdgpu_securedisplay.h" - #include "amdgpu_fw_attestation.h" - --int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev) --{ - #if defined(CONFIG_DEBUG_FS) -- unsigned long timeout = 600 * HZ; -- int ret; -- -- wake_up_interruptible(&adev->autodump.gpu_hang); -- -- ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout); -- if (ret == 0) { -- pr_err("autodump: timeout, move on to gpu recovery\n"); -- return -ETIMEDOUT; -- } --#endif -- return 0; --} -- --#if defined(CONFIG_DEBUG_FS) -- --static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file) --{ -- struct amdgpu_device *adev = inode->i_private; -- int ret; -- -- file->private_data = adev; -- -- ret = down_read_killable(&adev->reset_sem); -- if (ret) -- return ret; -- -- if (adev->autodump.dumping.done) { -- reinit_completion(&adev->autodump.dumping); -- ret = 0; -- } else { -- ret = -EBUSY; -- } -- -- up_read(&adev->reset_sem); -- -- return ret; --} -- --static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file) --{ -- struct amdgpu_device *adev = file->private_data; -- -- complete_all(&adev->autodump.dumping); -- return 0; --} -- --static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table) --{ -- struct amdgpu_device *adev = file->private_data; -- -- poll_wait(file, &adev->autodump.gpu_hang, poll_table); -- -- if (amdgpu_in_reset(adev)) -- return POLLIN | POLLRDNORM | POLLWRNORM; -- -- return 0; --} -- --static const struct file_operations autodump_debug_fops = { -- .owner = THIS_MODULE, -- .open = amdgpu_debugfs_autodump_open, -- .poll = amdgpu_debugfs_autodump_poll, -- .release = amdgpu_debugfs_autodump_release, --}; -- --static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev) --{ -- init_completion(&adev->autodump.dumping); -- complete_all(&adev->autodump.dumping); -- init_waitqueue_head(&adev->autodump.gpu_hang); -- -- debugfs_create_file("amdgpu_autodump", 0600, -- adev_to_drm(adev)->primary->debugfs_root, -- adev, &autodump_debug_fops); --} - - /** - * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes -@@ -1255,7 +1176,7 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val) - return r; - } - -- *val = amdgpu_bo_evict_vram(adev); -+ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); - - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); -@@ -1268,17 +1189,15 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) - { - struct amdgpu_device *adev = (struct amdgpu_device *)data; - struct drm_device *dev = adev_to_drm(adev); -- struct ttm_resource_manager *man; - int r; - - r = pm_runtime_get_sync(dev->dev); - if (r < 0) { -- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); -+ pm_runtime_put_autosuspend(dev->dev); - return r; - } - -- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); -- *val = ttm_resource_manager_evict_all(&adev->mman.bdev, man); -+ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); - - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); -@@ -1588,7 +1507,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) - } - - amdgpu_ras_debugfs_create_all(adev); -- amdgpu_debugfs_autodump_init(adev); - amdgpu_rap_debugfs_init(adev); - amdgpu_securedisplay_debugfs_init(adev); - amdgpu_fw_attestation_debugfs_init(adev); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h -index 141a8474e24f2..8b641f40fdf66 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h -@@ -26,10 +26,6 @@ - /* - * Debugfs - */ --struct amdgpu_autodump { -- struct completion dumping; -- struct wait_queue_head gpu_hang; --}; - - int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); - int amdgpu_debugfs_init(struct amdgpu_device *adev); -@@ -37,4 +33,3 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev); - void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); - void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); - void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); --int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -index af9bdf16eefd4..8b6b47fd9b880 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -@@ -30,7 +30,9 @@ - #include - #include - #include -+#include - -+#include - #include - #include - #include -@@ -73,6 +75,10 @@ - - #include - -+#if IS_ENABLED(CONFIG_X86) -+#include -+#endif -+ - MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); - MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); - MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); -@@ -88,6 +94,8 @@ MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin"); - - #define AMDGPU_RESUME_MS 2000 - -+static const struct drm_driver amdgpu_kms_driver; -+ - const char *amdgpu_asic_name[] = { - "TAHITI", - "PITCAIRN", -@@ -1187,6 +1195,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) - u16 cmd; - int r; - -+ if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) -+ return 0; -+ - /* Bypass for VF */ - if (amdgpu_sriov_vf(adev)) - return 0; -@@ -1308,6 +1319,42 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) - return true; - } - -+/** -+ * amdgpu_device_should_use_aspm - check if the device should program ASPM -+ * -+ * @adev: amdgpu_device pointer -+ * -+ * Confirm whether the module parameter and pcie bridge agree that ASPM should -+ * be set for this device. -+ * -+ * Returns true if it should be used or false if not. -+ */ -+bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) -+{ -+ switch (amdgpu_aspm) { -+ case -1: -+ break; -+ case 0: -+ return false; -+ case 1: -+ return true; -+ default: -+ return false; -+ } -+ return pcie_aspm_enabled(adev->pdev); -+} -+ -+bool amdgpu_device_aspm_support_quirk(void) -+{ -+#if IS_ENABLED(CONFIG_X86) -+ struct cpuinfo_x86 *c = &cpu_data(0); -+ -+ return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE); -+#else -+ return true; -+#endif -+} -+ - /* if we get transitioned to only one device, take VGA back */ - /** - * amdgpu_device_vga_set_decode - enable/disable vga decode -@@ -2069,6 +2116,8 @@ out: - */ - static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) - { -+ struct drm_device *dev = adev_to_drm(adev); -+ struct pci_dev *parent; - int i, r; - - amdgpu_device_enable_virtual_display(adev); -@@ -2168,6 +2217,18 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) - return -EINVAL; - } - -+ if (amdgpu_has_atpx() && -+ (amdgpu_is_atpx_hybrid() || -+ amdgpu_has_atpx_dgpu_power_cntl()) && -+ ((adev->flags & AMD_IS_APU) == 0) && -+ !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) -+ adev->flags |= AMD_IS_PX; -+ -+ if (!(adev->flags & AMD_IS_APU)) { -+ parent = pci_upstream_bridge(adev->pdev); -+ adev->has_pr3 = parent ? pci_pr3_present(parent) : false; -+ } -+ - amdgpu_amdkfd_device_probe(adev); - - adev->pm.pp_feature = amdgpu_pp_feature_mask; -@@ -2348,8 +2409,20 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) - } - adev->ip_blocks[i].status.sw = true; - -- /* need to do gmc hw init early so we can allocate gpu mem */ -- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { -+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { -+ /* need to do common hw init early so everything is set up for gmc */ -+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); -+ if (r) { -+ DRM_ERROR("hw_init %d failed %d\n", i, r); -+ goto init_failed; -+ } -+ adev->ip_blocks[i].status.hw = true; -+ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { -+ /* need to do gmc hw init early so we can allocate gpu mem */ -+ /* Try to reserve bad pages early */ -+ if (amdgpu_sriov_vf(adev)) -+ amdgpu_virt_exchange_data(adev); -+ - r = amdgpu_device_vram_scratch_init(adev); - if (r) { - DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); -@@ -2394,6 +2467,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) - if (r) - goto init_failed; - -+ r = amdgpu_amdkfd_resume_iommu(adev); -+ if (r) -+ goto init_failed; -+ - r = amdgpu_device_ip_hw_init_phase1(adev); - if (r) - goto init_failed; -@@ -2432,15 +2509,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) - if (!adev->gmc.xgmi.pending_reset) - amdgpu_amdkfd_device_init(adev); - -- r = amdgpu_amdkfd_resume_iommu(adev); -- if (r) -- goto init_failed; -- - amdgpu_fru_get_product_info(adev); - - init_failed: -- if (amdgpu_sriov_vf(adev)) -- amdgpu_virt_release_full_gpu(adev, true); - - return r; - } -@@ -2745,6 +2816,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) - adev->ip_blocks[i].status.hw = false; - } - -+ if (amdgpu_sriov_vf(adev)) { -+ if (amdgpu_virt_release_full_gpu(adev, false)) -+ DRM_ERROR("failed to release exclusive mode on fini\n"); -+ } -+ - return 0; - } - -@@ -2805,10 +2881,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) - - amdgpu_ras_fini(adev); - -- if (amdgpu_sriov_vf(adev)) -- if (amdgpu_virt_release_full_gpu(adev, false)) -- DRM_ERROR("failed to release exclusive mode on fini\n"); -- - return 0; - } - -@@ -2992,8 +3064,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) - int i, r; - - static enum amd_ip_block_type ip_order[] = { -- AMD_IP_BLOCK_TYPE_GMC, - AMD_IP_BLOCK_TYPE_COMMON, -+ AMD_IP_BLOCK_TYPE_GMC, - AMD_IP_BLOCK_TYPE_PSP, - AMD_IP_BLOCK_TYPE_IH, - }; -@@ -3084,7 +3156,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) - continue; - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || -- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { -+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || -+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { - - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { -@@ -3131,6 +3204,15 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) - return r; - } - adev->ip_blocks[i].status.hw = true; -+ -+ if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { -+ /* disable gfxoff for IP resume. The gfxoff will be re-enabled in -+ * amdgpu_device_resume() after IP resume. -+ */ -+ amdgpu_gfx_off_ctrl(adev, false); -+ DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n"); -+ } -+ - } - - return 0; -@@ -3415,6 +3497,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, - int r, i; - bool px = false; - u32 max_MBps; -+ int tmp; - - adev->shutdown = false; - adev->flags = flags; -@@ -3531,6 +3614,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, - adev->rmmio_size = pci_resource_len(adev->pdev, 2); - } - -+ for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) -+ atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); -+ - adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); - if (adev->rmmio == NULL) { - return -ENOMEM; -@@ -3571,6 +3657,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, - if (r) - return r; - -+ /* Get rid of things like offb */ -+ r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); -+ if (r) -+ return r; -+ - /* doorbell bar mapping and doorbell index init*/ - amdgpu_device_doorbell_init(adev); - -@@ -3606,7 +3697,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, - } - } - } else { -+ tmp = amdgpu_reset_method; -+ /* It should do a default reset when loading or reloading the driver, -+ * regardless of the module parameter reset_method. -+ */ -+ amdgpu_reset_method = AMD_RESET_METHOD_NONE; - r = amdgpu_asic_reset(adev); -+ amdgpu_reset_method = tmp; - if (r) { - dev_err(adev->dev, "asic reset on init failed\n"); - goto failed; -@@ -3666,18 +3763,6 @@ fence_driver_init: - - r = amdgpu_device_ip_init(adev); - if (r) { -- /* failed in exclusive mode due to timeout */ -- if (amdgpu_sriov_vf(adev) && -- !amdgpu_sriov_runtime(adev) && -- amdgpu_virt_mmio_blocked(adev) && -- !amdgpu_virt_wait_reset(adev)) { -- dev_err(adev->dev, "VF exclusive mode timeout\n"); -- /* Don't send request since VF is inactive. */ -- adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; -- adev->virt.ops = NULL; -- r = -EAGAIN; -- goto release_ras_con; -- } - dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); - amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); - goto release_ras_con; -@@ -3756,8 +3841,10 @@ fence_driver_init: - msecs_to_jiffies(AMDGPU_RESUME_MS)); - } - -- if (amdgpu_sriov_vf(adev)) -+ if (amdgpu_sriov_vf(adev)) { -+ amdgpu_virt_release_full_gpu(adev, true); - flush_delayed_work(&adev->delayed_init_work); -+ } - - r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); - if (r) -@@ -3792,6 +3879,20 @@ fence_driver_init: - return 0; - - release_ras_con: -+ if (amdgpu_sriov_vf(adev)) -+ amdgpu_virt_release_full_gpu(adev, true); -+ -+ /* failed in exclusive mode due to timeout */ -+ if (amdgpu_sriov_vf(adev) && -+ !amdgpu_sriov_runtime(adev) && -+ amdgpu_virt_mmio_blocked(adev) && -+ !amdgpu_virt_wait_reset(adev)) { -+ dev_err(adev->dev, "VF exclusive mode timeout\n"); -+ /* Don't send request since VF is inactive. */ -+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; -+ adev->virt.ops = NULL; -+ r = -EAGAIN; -+ } - amdgpu_release_ras_context(adev); - - failed: -@@ -3850,7 +3951,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) - /* disable all interrupts */ - amdgpu_irq_disable_all(adev); - if (adev->mode_info.mode_config_initialized){ -- if (!amdgpu_device_has_dc_support(adev)) -+ if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) - drm_helper_force_disable_all(adev_to_drm(adev)); - else - drm_atomic_helper_shutdown(adev_to_drm(adev)); -@@ -3876,8 +3977,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) - - void amdgpu_device_fini_sw(struct amdgpu_device *adev) - { -- amdgpu_device_ip_fini(adev); - amdgpu_fence_driver_sw_fini(adev); -+ amdgpu_device_ip_fini(adev); - release_firmware(adev->firmware.gpu_info_fw); - adev->firmware.gpu_info_fw = NULL; - adev->accel_working = false; -@@ -3909,6 +4010,25 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) - - } - -+/** -+ * amdgpu_device_evict_resources - evict device resources -+ * @adev: amdgpu device object -+ * -+ * Evicts all ttm device resources(vram BOs, gart table) from the lru list -+ * of the vram memory type. Mainly used for evicting device resources -+ * at suspend time. -+ * -+ */ -+static void amdgpu_device_evict_resources(struct amdgpu_device *adev) -+{ -+ /* No need to evict vram on APUs for suspend to ram or s2idle */ -+ if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) -+ return; -+ -+ if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) -+ DRM_WARN("evicting device resources failed\n"); -+ -+} - - /* - * Suspend & resume. -@@ -3926,12 +4046,20 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) - int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) - { - struct amdgpu_device *adev = drm_to_adev(dev); -+ int r = 0; - - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) - return 0; - - adev->in_suspend = true; - -+ if (amdgpu_sriov_vf(adev)) { -+ amdgpu_virt_fini_data_exchange(adev); -+ r = amdgpu_virt_request_full_gpu(adev, false); -+ if (r) -+ return r; -+ } -+ - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) - DRM_WARN("smart shift update failed\n"); - -@@ -3941,6 +4069,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) - amdgpu_fbdev_set_suspend(adev, 1); - - cancel_delayed_work_sync(&adev->delayed_init_work); -+ flush_delayed_work(&adev->gfx.gfx_off_delay_work); - - amdgpu_ras_suspend(adev); - -@@ -3949,17 +4078,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) - if (!adev->in_s0ix) - amdgpu_amdkfd_suspend(adev, adev->in_runpm); - -- /* evict vram memory */ -- amdgpu_bo_evict_vram(adev); -+ /* First evict vram memory */ -+ amdgpu_device_evict_resources(adev); - - amdgpu_fence_driver_hw_fini(adev); - - amdgpu_device_ip_suspend_phase2(adev); -- /* evict remaining vram memory -- * This second call to evict vram is to evict the gart page table -- * using the CPU. -+ /* This second call to evict device resources is to evict -+ * the gart page table using the CPU. - */ -- amdgpu_bo_evict_vram(adev); -+ amdgpu_device_evict_resources(adev); -+ -+ if (amdgpu_sriov_vf(adev)) -+ amdgpu_virt_release_full_gpu(adev, false); - - return 0; - } -@@ -3979,6 +4110,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) - struct amdgpu_device *adev = drm_to_adev(dev); - int r = 0; - -+ if (amdgpu_sriov_vf(adev)) { -+ r = amdgpu_virt_request_full_gpu(adev, true); -+ if (r) -+ return r; -+ } -+ - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) - return 0; - -@@ -3993,6 +4130,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) - } - - r = amdgpu_device_ip_resume(adev); -+ -+ /* no matter what r is, always need to properly release full GPU */ -+ if (amdgpu_sriov_vf(adev)) { -+ amdgpu_virt_init_data_exchange(adev); -+ amdgpu_virt_release_full_gpu(adev, true); -+ } -+ - if (r) { - dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); - return r; -@@ -4015,6 +4159,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) - /* Make sure IB tests flushed */ - flush_delayed_work(&adev->delayed_init_work); - -+ if (adev->in_s0ix) { -+ /* re-enable gfxoff after IP resume. This re-enables gfxoff after -+ * it was disabled for IP resume in amdgpu_device_ip_resume_phase2(). -+ */ -+ amdgpu_gfx_off_ctrl(adev, true); -+ DRM_DEBUG("will enable gfxoff for the mission mode\n"); -+ } - if (fbcon) - amdgpu_fbdev_set_suspend(adev, 0); - -@@ -4230,7 +4381,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) - dev_info(adev->dev, "recover vram bo from shadow start\n"); - mutex_lock(&adev->shadow_list_lock); - list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { -- shadow = &vmbo->bo; -+ /* If vm is compute context or adev is APU, shadow will be NULL */ -+ if (!vmbo->shadow) -+ continue; -+ shadow = vmbo->shadow; -+ - /* No need to recover an evicted BO */ - if (shadow->tbo.resource->mem_type != TTM_PL_TT || - shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || -@@ -4466,10 +4621,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, - if (reset_context->reset_req_dev == adev) - job = reset_context->job; - -- /* no need to dump if device is not in good state during probe period */ -- if (!adev->gmc.xgmi.pending_reset) -- amdgpu_debugfs_wait_dump(adev); -- - if (amdgpu_sriov_vf(adev)) { - /* stop the data exchange thread */ - amdgpu_virt_fini_data_exchange(adev); -@@ -4791,6 +4942,8 @@ static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) - pm_runtime_enable(&(p->dev)); - pm_runtime_resume(&(p->dev)); - } -+ -+ pci_dev_put(p); - } - - static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) -@@ -4829,6 +4982,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) - - if (expires < ktime_get_mono_fast_ns()) { - dev_warn(adev->dev, "failed to suspend display audio\n"); -+ pci_dev_put(p); - /* TODO: abort the succeeding gpu reset? */ - return -ETIMEDOUT; - } -@@ -4836,6 +4990,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) - - pm_runtime_disable(&(p->dev)); - -+ pci_dev_put(p); - return 0; - } - -@@ -5130,7 +5285,7 @@ skip_hw_reset: - drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); - } - -- if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { -+ if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { - drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); - } - -@@ -5610,7 +5765,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) - { - #ifdef CONFIG_X86_64 -- if (adev->flags & AMD_IS_APU) -+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) - return; - #endif - if (adev->gmc.xgmi.connected_to_cpu) -@@ -5626,7 +5781,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, - struct amdgpu_ring *ring) - { - #ifdef CONFIG_X86_64 -- if (adev->flags & AMD_IS_APU) -+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) - return; - #endif - if (adev->gmc.xgmi.connected_to_cpu) -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c -index ada7bc19118ac..a919f5daacd91 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c -@@ -415,10 +415,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) - } - } - -+union gc_info { -+ struct gc_info_v1_0 v1; -+ struct gc_info_v2_0 v2; -+}; -+ - int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) - { - struct binary_header *bhdr; -- struct gc_info_v1_0 *gc_info; -+ union gc_info *gc_info; - - if (!adev->mman.discovery_bin) { - DRM_ERROR("ip discovery uninitialized\n"); -@@ -426,27 +431,54 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) - } - - bhdr = (struct binary_header *)adev->mman.discovery_bin; -- gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin + -+ gc_info = (union gc_info *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[GC].offset)); -- -- adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se); -- adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) + -- le32_to_cpu(gc_info->gc_num_wgp1_per_sa)); -- adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se); -- adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se); -- adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c); -- adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs); -- adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds); -- adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth); -- adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth); -- adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer); -- adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size); -- adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd); -- adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu); -- adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size); -- adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) / -- le32_to_cpu(gc_info->gc_num_sa_per_se); -- adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc); -- -+ switch (gc_info->v1.header.version_major) { -+ case 1: -+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); -+ adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + -+ le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); -+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); -+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); -+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); -+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); -+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); -+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); -+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); -+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); -+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); -+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); -+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); -+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); -+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / -+ le32_to_cpu(gc_info->v1.gc_num_sa_per_se); -+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); -+ break; -+ case 2: -+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); -+ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); -+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); -+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); -+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); -+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); -+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); -+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); -+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); -+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); -+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); -+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); -+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); -+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); -+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / -+ le32_to_cpu(gc_info->v2.gc_num_sh_per_se); -+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); -+ break; -+ default: -+ dev_err(adev->dev, -+ "Unhandled GC info table %d.%d\n", -+ gc_info->v1.header.version_major, -+ gc_info->v1.header.version_minor); -+ return -EINVAL; -+ } - return 0; - } -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c -index dc50c05f23fc2..d2286a83e302f 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c -@@ -1110,6 +1110,7 @@ int amdgpu_display_gem_fb_verify_and_init( - goto err; - - ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); -+ - if (ret) - goto err; - -@@ -1145,7 +1146,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, - if (ret) - return ret; - -- if (!dev->mode_config.allow_fb_modifiers) { -+ if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) { - drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, - "GFX9+ requires FB check based on format modifier\n"); - ret = check_tiling_flags_gfx6(rfb); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c -index ae6ab93c868b8..7444484a12bf8 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c -@@ -384,7 +384,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) - struct amdgpu_vm_bo_base *bo_base; - int r; - -- if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM) -+ if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM) - return; - - r = ttm_bo_validate(&bo->tbo, &placement, &ctx); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -index f18240f873878..deae92fde3b88 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -@@ -23,7 +23,6 @@ - */ - - #include --#include - #include - #include - #include -@@ -38,6 +37,7 @@ - #include - #include - #include -+#include - - #include "amdgpu.h" - #include "amdgpu_irq.h" -@@ -679,7 +679,7 @@ MODULE_PARM_DESC(sched_policy, - * Maximum number of processes that HWS can schedule concurrently. The maximum is the - * number of VMIDs assigned to the HWS, which is also the default. - */ --int hws_max_conc_proc = 8; -+int hws_max_conc_proc = -1; - module_param(hws_max_conc_proc, int, 0444); - MODULE_PARM_DESC(hws_max_conc_proc, - "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); -@@ -890,6 +890,718 @@ MODULE_PARM_DESC(smu_pptable_id, - "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)"); - module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); - -+/* These devices are not supported by amdgpu. -+ * They are supported by the mach64, r128, radeon drivers -+ */ -+static const u16 amdgpu_unsupported_pciidlist[] = { -+ /* mach64 */ -+ 0x4354, -+ 0x4358, -+ 0x4554, -+ 0x4742, -+ 0x4744, -+ 0x4749, -+ 0x474C, -+ 0x474D, -+ 0x474E, -+ 0x474F, -+ 0x4750, -+ 0x4751, -+ 0x4752, -+ 0x4753, -+ 0x4754, -+ 0x4755, -+ 0x4756, -+ 0x4757, -+ 0x4758, -+ 0x4759, -+ 0x475A, -+ 0x4C42, -+ 0x4C44, -+ 0x4C47, -+ 0x4C49, -+ 0x4C4D, -+ 0x4C4E, -+ 0x4C50, -+ 0x4C51, -+ 0x4C52, -+ 0x4C53, -+ 0x5654, -+ 0x5655, -+ 0x5656, -+ /* r128 */ -+ 0x4c45, -+ 0x4c46, -+ 0x4d46, -+ 0x4d4c, -+ 0x5041, -+ 0x5042, -+ 0x5043, -+ 0x5044, -+ 0x5045, -+ 0x5046, -+ 0x5047, -+ 0x5048, -+ 0x5049, -+ 0x504A, -+ 0x504B, -+ 0x504C, -+ 0x504D, -+ 0x504E, -+ 0x504F, -+ 0x5050, -+ 0x5051, -+ 0x5052, -+ 0x5053, -+ 0x5054, -+ 0x5055, -+ 0x5056, -+ 0x5057, -+ 0x5058, -+ 0x5245, -+ 0x5246, -+ 0x5247, -+ 0x524b, -+ 0x524c, -+ 0x534d, -+ 0x5446, -+ 0x544C, -+ 0x5452, -+ /* radeon */ -+ 0x3150, -+ 0x3151, -+ 0x3152, -+ 0x3154, -+ 0x3155, -+ 0x3E50, -+ 0x3E54, -+ 0x4136, -+ 0x4137, -+ 0x4144, -+ 0x4145, -+ 0x4146, -+ 0x4147, -+ 0x4148, -+ 0x4149, -+ 0x414A, -+ 0x414B, -+ 0x4150, -+ 0x4151, -+ 0x4152, -+ 0x4153, -+ 0x4154, -+ 0x4155, -+ 0x4156, -+ 0x4237, -+ 0x4242, -+ 0x4336, -+ 0x4337, -+ 0x4437, -+ 0x4966, -+ 0x4967, -+ 0x4A48, -+ 0x4A49, -+ 0x4A4A, -+ 0x4A4B, -+ 0x4A4C, -+ 0x4A4D, -+ 0x4A4E, -+ 0x4A4F, -+ 0x4A50, -+ 0x4A54, -+ 0x4B48, -+ 0x4B49, -+ 0x4B4A, -+ 0x4B4B, -+ 0x4B4C, -+ 0x4C57, -+ 0x4C58, -+ 0x4C59, -+ 0x4C5A, -+ 0x4C64, -+ 0x4C66, -+ 0x4C67, -+ 0x4E44, -+ 0x4E45, -+ 0x4E46, -+ 0x4E47, -+ 0x4E48, -+ 0x4E49, -+ 0x4E4A, -+ 0x4E4B, -+ 0x4E50, -+ 0x4E51, -+ 0x4E52, -+ 0x4E53, -+ 0x4E54, -+ 0x4E56, -+ 0x5144, -+ 0x5145, -+ 0x5146, -+ 0x5147, -+ 0x5148, -+ 0x514C, -+ 0x514D, -+ 0x5157, -+ 0x5158, -+ 0x5159, -+ 0x515A, -+ 0x515E, -+ 0x5460, -+ 0x5462, -+ 0x5464, -+ 0x5548, -+ 0x5549, -+ 0x554A, -+ 0x554B, -+ 0x554C, -+ 0x554D, -+ 0x554E, -+ 0x554F, -+ 0x5550, -+ 0x5551, -+ 0x5552, -+ 0x5554, -+ 0x564A, -+ 0x564B, -+ 0x564F, -+ 0x5652, -+ 0x5653, -+ 0x5657, -+ 0x5834, -+ 0x5835, -+ 0x5954, -+ 0x5955, -+ 0x5974, -+ 0x5975, -+ 0x5960, -+ 0x5961, -+ 0x5962, -+ 0x5964, -+ 0x5965, -+ 0x5969, -+ 0x5a41, -+ 0x5a42, -+ 0x5a61, -+ 0x5a62, -+ 0x5b60, -+ 0x5b62, -+ 0x5b63, -+ 0x5b64, -+ 0x5b65, -+ 0x5c61, -+ 0x5c63, -+ 0x5d48, -+ 0x5d49, -+ 0x5d4a, -+ 0x5d4c, -+ 0x5d4d, -+ 0x5d4e, -+ 0x5d4f, -+ 0x5d50, -+ 0x5d52, -+ 0x5d57, -+ 0x5e48, -+ 0x5e4a, -+ 0x5e4b, -+ 0x5e4c, -+ 0x5e4d, -+ 0x5e4f, -+ 0x6700, -+ 0x6701, -+ 0x6702, -+ 0x6703, -+ 0x6704, -+ 0x6705, -+ 0x6706, -+ 0x6707, -+ 0x6708, -+ 0x6709, -+ 0x6718, -+ 0x6719, -+ 0x671c, -+ 0x671d, -+ 0x671f, -+ 0x6720, -+ 0x6721, -+ 0x6722, -+ 0x6723, -+ 0x6724, -+ 0x6725, -+ 0x6726, -+ 0x6727, -+ 0x6728, -+ 0x6729, -+ 0x6738, -+ 0x6739, -+ 0x673e, -+ 0x6740, -+ 0x6741, -+ 0x6742, -+ 0x6743, -+ 0x6744, -+ 0x6745, -+ 0x6746, -+ 0x6747, -+ 0x6748, -+ 0x6749, -+ 0x674A, -+ 0x6750, -+ 0x6751, -+ 0x6758, -+ 0x6759, -+ 0x675B, -+ 0x675D, -+ 0x675F, -+ 0x6760, -+ 0x6761, -+ 0x6762, -+ 0x6763, -+ 0x6764, -+ 0x6765, -+ 0x6766, -+ 0x6767, -+ 0x6768, -+ 0x6770, -+ 0x6771, -+ 0x6772, -+ 0x6778, -+ 0x6779, -+ 0x677B, -+ 0x6840, -+ 0x6841, -+ 0x6842, -+ 0x6843, -+ 0x6849, -+ 0x684C, -+ 0x6850, -+ 0x6858, -+ 0x6859, -+ 0x6880, -+ 0x6888, -+ 0x6889, -+ 0x688A, -+ 0x688C, -+ 0x688D, -+ 0x6898, -+ 0x6899, -+ 0x689b, -+ 0x689c, -+ 0x689d, -+ 0x689e, -+ 0x68a0, -+ 0x68a1, -+ 0x68a8, -+ 0x68a9, -+ 0x68b0, -+ 0x68b8, -+ 0x68b9, -+ 0x68ba, -+ 0x68be, -+ 0x68bf, -+ 0x68c0, -+ 0x68c1, -+ 0x68c7, -+ 0x68c8, -+ 0x68c9, -+ 0x68d8, -+ 0x68d9, -+ 0x68da, -+ 0x68de, -+ 0x68e0, -+ 0x68e1, -+ 0x68e4, -+ 0x68e5, -+ 0x68e8, -+ 0x68e9, -+ 0x68f1, -+ 0x68f2, -+ 0x68f8, -+ 0x68f9, -+ 0x68fa, -+ 0x68fe, -+ 0x7100, -+ 0x7101, -+ 0x7102, -+ 0x7103, -+ 0x7104, -+ 0x7105, -+ 0x7106, -+ 0x7108, -+ 0x7109, -+ 0x710A, -+ 0x710B, -+ 0x710C, -+ 0x710E, -+ 0x710F, -+ 0x7140, -+ 0x7141, -+ 0x7142, -+ 0x7143, -+ 0x7144, -+ 0x7145, -+ 0x7146, -+ 0x7147, -+ 0x7149, -+ 0x714A, -+ 0x714B, -+ 0x714C, -+ 0x714D, -+ 0x714E, -+ 0x714F, -+ 0x7151, -+ 0x7152, -+ 0x7153, -+ 0x715E, -+ 0x715F, -+ 0x7180, -+ 0x7181, -+ 0x7183, -+ 0x7186, -+ 0x7187, -+ 0x7188, -+ 0x718A, -+ 0x718B, -+ 0x718C, -+ 0x718D, -+ 0x718F, -+ 0x7193, -+ 0x7196, -+ 0x719B, -+ 0x719F, -+ 0x71C0, -+ 0x71C1, -+ 0x71C2, -+ 0x71C3, -+ 0x71C4, -+ 0x71C5, -+ 0x71C6, -+ 0x71C7, -+ 0x71CD, -+ 0x71CE, -+ 0x71D2, -+ 0x71D4, -+ 0x71D5, -+ 0x71D6, -+ 0x71DA, -+ 0x71DE, -+ 0x7200, -+ 0x7210, -+ 0x7211, -+ 0x7240, -+ 0x7243, -+ 0x7244, -+ 0x7245, -+ 0x7246, -+ 0x7247, -+ 0x7248, -+ 0x7249, -+ 0x724A, -+ 0x724B, -+ 0x724C, -+ 0x724D, -+ 0x724E, -+ 0x724F, -+ 0x7280, -+ 0x7281, -+ 0x7283, -+ 0x7284, -+ 0x7287, -+ 0x7288, -+ 0x7289, -+ 0x728B, -+ 0x728C, -+ 0x7290, -+ 0x7291, -+ 0x7293, -+ 0x7297, -+ 0x7834, -+ 0x7835, -+ 0x791e, -+ 0x791f, -+ 0x793f, -+ 0x7941, -+ 0x7942, -+ 0x796c, -+ 0x796d, -+ 0x796e, -+ 0x796f, -+ 0x9400, -+ 0x9401, -+ 0x9402, -+ 0x9403, -+ 0x9405, -+ 0x940A, -+ 0x940B, -+ 0x940F, -+ 0x94A0, -+ 0x94A1, -+ 0x94A3, -+ 0x94B1, -+ 0x94B3, -+ 0x94B4, -+ 0x94B5, -+ 0x94B9, -+ 0x9440, -+ 0x9441, -+ 0x9442, -+ 0x9443, -+ 0x9444, -+ 0x9446, -+ 0x944A, -+ 0x944B, -+ 0x944C, -+ 0x944E, -+ 0x9450, -+ 0x9452, -+ 0x9456, -+ 0x945A, -+ 0x945B, -+ 0x945E, -+ 0x9460, -+ 0x9462, -+ 0x946A, -+ 0x946B, -+ 0x947A, -+ 0x947B, -+ 0x9480, -+ 0x9487, -+ 0x9488, -+ 0x9489, -+ 0x948A, -+ 0x948F, -+ 0x9490, -+ 0x9491, -+ 0x9495, -+ 0x9498, -+ 0x949C, -+ 0x949E, -+ 0x949F, -+ 0x94C0, -+ 0x94C1, -+ 0x94C3, -+ 0x94C4, -+ 0x94C5, -+ 0x94C6, -+ 0x94C7, -+ 0x94C8, -+ 0x94C9, -+ 0x94CB, -+ 0x94CC, -+ 0x94CD, -+ 0x9500, -+ 0x9501, -+ 0x9504, -+ 0x9505, -+ 0x9506, -+ 0x9507, -+ 0x9508, -+ 0x9509, -+ 0x950F, -+ 0x9511, -+ 0x9515, -+ 0x9517, -+ 0x9519, -+ 0x9540, -+ 0x9541, -+ 0x9542, -+ 0x954E, -+ 0x954F, -+ 0x9552, -+ 0x9553, -+ 0x9555, -+ 0x9557, -+ 0x955f, -+ 0x9580, -+ 0x9581, -+ 0x9583, -+ 0x9586, -+ 0x9587, -+ 0x9588, -+ 0x9589, -+ 0x958A, -+ 0x958B, -+ 0x958C, -+ 0x958D, -+ 0x958E, -+ 0x958F, -+ 0x9590, -+ 0x9591, -+ 0x9593, -+ 0x9595, -+ 0x9596, -+ 0x9597, -+ 0x9598, -+ 0x9599, -+ 0x959B, -+ 0x95C0, -+ 0x95C2, -+ 0x95C4, -+ 0x95C5, -+ 0x95C6, -+ 0x95C7, -+ 0x95C9, -+ 0x95CC, -+ 0x95CD, -+ 0x95CE, -+ 0x95CF, -+ 0x9610, -+ 0x9611, -+ 0x9612, -+ 0x9613, -+ 0x9614, -+ 0x9615, -+ 0x9616, -+ 0x9640, -+ 0x9641, -+ 0x9642, -+ 0x9643, -+ 0x9644, -+ 0x9645, -+ 0x9647, -+ 0x9648, -+ 0x9649, -+ 0x964a, -+ 0x964b, -+ 0x964c, -+ 0x964e, -+ 0x964f, -+ 0x9710, -+ 0x9711, -+ 0x9712, -+ 0x9713, -+ 0x9714, -+ 0x9715, -+ 0x9802, -+ 0x9803, -+ 0x9804, -+ 0x9805, -+ 0x9806, -+ 0x9807, -+ 0x9808, -+ 0x9809, -+ 0x980A, -+ 0x9900, -+ 0x9901, -+ 0x9903, -+ 0x9904, -+ 0x9905, -+ 0x9906, -+ 0x9907, -+ 0x9908, -+ 0x9909, -+ 0x990A, -+ 0x990B, -+ 0x990C, -+ 0x990D, -+ 0x990E, -+ 0x990F, -+ 0x9910, -+ 0x9913, -+ 0x9917, -+ 0x9918, -+ 0x9919, -+ 0x9990, -+ 0x9991, -+ 0x9992, -+ 0x9993, -+ 0x9994, -+ 0x9995, -+ 0x9996, -+ 0x9997, -+ 0x9998, -+ 0x9999, -+ 0x999A, -+ 0x999B, -+ 0x999C, -+ 0x999D, -+ 0x99A0, -+ 0x99A2, -+ 0x99A4, -+ /* radeon secondary ids */ -+ 0x3171, -+ 0x3e70, -+ 0x4164, -+ 0x4165, -+ 0x4166, -+ 0x4168, -+ 0x4170, -+ 0x4171, -+ 0x4172, -+ 0x4173, -+ 0x496e, -+ 0x4a69, -+ 0x4a6a, -+ 0x4a6b, -+ 0x4a70, -+ 0x4a74, -+ 0x4b69, -+ 0x4b6b, -+ 0x4b6c, -+ 0x4c6e, -+ 0x4e64, -+ 0x4e65, -+ 0x4e66, -+ 0x4e67, -+ 0x4e68, -+ 0x4e69, -+ 0x4e6a, -+ 0x4e71, -+ 0x4f73, -+ 0x5569, -+ 0x556b, -+ 0x556d, -+ 0x556f, -+ 0x5571, -+ 0x5854, -+ 0x5874, -+ 0x5940, -+ 0x5941, -+ 0x5b70, -+ 0x5b72, -+ 0x5b73, -+ 0x5b74, -+ 0x5b75, -+ 0x5d44, -+ 0x5d45, -+ 0x5d6d, -+ 0x5d6f, -+ 0x5d72, -+ 0x5d77, -+ 0x5e6b, -+ 0x5e6d, -+ 0x7120, -+ 0x7124, -+ 0x7129, -+ 0x712e, -+ 0x712f, -+ 0x7162, -+ 0x7163, -+ 0x7166, -+ 0x7167, -+ 0x7172, -+ 0x7173, -+ 0x71a0, -+ 0x71a1, -+ 0x71a3, -+ 0x71a7, -+ 0x71bb, -+ 0x71e0, -+ 0x71e1, -+ 0x71e2, -+ 0x71e6, -+ 0x71e7, -+ 0x71f2, -+ 0x7269, -+ 0x726b, -+ 0x726e, -+ 0x72a0, -+ 0x72a8, -+ 0x72b1, -+ 0x72b3, -+ 0x793f, -+}; -+ - static const struct pci_device_id pciidlist[] = { - #ifdef CONFIG_DRM_AMDGPU_SI - {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, -@@ -1224,10 +1936,10 @@ static const struct pci_device_id pciidlist[] = { - {0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH}, - - /* Aldebaran */ -- {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, -- {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, -- {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, -- {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, -+ {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, -+ {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, -+ {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, -+ {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN}, - - /* CYAN_SKILLFISH */ - {0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU}, -@@ -1237,6 +1949,7 @@ static const struct pci_device_id pciidlist[] = { - {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, - {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, - {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, -+ {0x1002, 0x7424, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, - {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, - - {0, 0, 0} -@@ -1246,14 +1959,45 @@ MODULE_DEVICE_TABLE(pci, pciidlist); - - static const struct drm_driver amdgpu_kms_driver; - -+static bool amdgpu_is_fw_framebuffer(resource_size_t base, -+ resource_size_t size) -+{ -+ bool found = false; -+#if IS_REACHABLE(CONFIG_FB) -+ struct apertures_struct *a; -+ -+ a = alloc_apertures(1); -+ if (!a) -+ return false; -+ -+ a->ranges[0].base = base; -+ a->ranges[0].size = size; -+ -+ found = is_firmware_framebuffer(a); -+ kfree(a); -+#endif -+ return found; -+} -+ - static int amdgpu_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) - { - struct drm_device *ddev; - struct amdgpu_device *adev; - unsigned long flags = ent->driver_data; -- int ret, retry = 0; -+ int ret, retry = 0, i; - bool supports_atomic = false; -+ bool is_fw_fb; -+ resource_size_t base, size; -+ -+ if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) -+ amdgpu_aspm = 0; -+ -+ /* skip devices which are owned by radeon */ -+ for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { -+ if (amdgpu_unsupported_pciidlist[i] == pdev->device) -+ return -ENODEV; -+ } - - if (amdgpu_virtual_display || - amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) -@@ -1264,6 +2008,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, - "See modparam exp_hw_support\n"); - return -ENODEV; - } -+ /* differentiate between P10 and P11 asics with the same DID */ -+ if (pdev->device == 0x67FF && -+ (pdev->revision == 0xE3 || -+ pdev->revision == 0xE7 || -+ pdev->revision == 0xF3 || -+ pdev->revision == 0xF7)) { -+ flags &= ~AMD_ASIC_MASK; -+ flags |= CHIP_POLARIS10; -+ } - - /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, - * however, SME requires an indirect IOMMU mapping because the encryption -@@ -1310,10 +2063,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, - } - #endif - -- /* Get rid of things like offb */ -- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver); -- if (ret) -- return ret; -+ base = pci_resource_start(pdev, 0); -+ size = pci_resource_len(pdev, 0); -+ is_fw_fb = amdgpu_is_fw_framebuffer(base, size); - - adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); - if (IS_ERR(adev)) -@@ -1322,6 +2074,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, - adev->dev = &pdev->dev; - adev->pdev = pdev; - ddev = adev_to_drm(adev); -+ adev->is_fw_fb = is_fw_fb; - - if (!supports_atomic) - ddev->driver_features &= ~DRIVER_ATOMIC; -@@ -1332,12 +2085,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, - - pci_set_drvdata(pdev, ddev); - -- ret = amdgpu_driver_load_kms(adev, ent->driver_data); -+ ret = amdgpu_driver_load_kms(adev, flags); - if (ret) - goto err_pci; - - retry_init: -- ret = drm_dev_register(ddev, ent->driver_data); -+ ret = drm_dev_register(ddev, flags); - if (ret == -EAGAIN && ++retry <= 3) { - DRM_INFO("retry init %d\n", retry); - /* Don't request EX mode too frequently which is attacking */ -@@ -1471,13 +2224,20 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) - static int amdgpu_pmops_prepare(struct device *dev) - { - struct drm_device *drm_dev = dev_get_drvdata(dev); -+ struct amdgpu_device *adev = drm_to_adev(drm_dev); - - /* Return a positive number here so - * DPM_FLAG_SMART_SUSPEND works properly - */ - if (amdgpu_device_supports_boco(drm_dev)) -- return pm_runtime_suspended(dev) && -- pm_suspend_via_firmware(); -+ return pm_runtime_suspended(dev); -+ -+ /* if we will not support s3 or s2i for the device -+ * then skip suspend -+ */ -+ if (!amdgpu_acpi_is_s0ix_active(adev) && -+ !amdgpu_acpi_is_s3_active(adev)) -+ return 1; - - return 0; - } -@@ -1491,15 +2251,23 @@ static int amdgpu_pmops_suspend(struct device *dev) - { - struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(drm_dev); -- int r; - - if (amdgpu_acpi_is_s0ix_active(adev)) - adev->in_s0ix = true; -- adev->in_s3 = true; -- r = amdgpu_device_suspend(drm_dev, true); -- adev->in_s3 = false; -+ else -+ adev->in_s3 = true; -+ return amdgpu_device_suspend(drm_dev, true); -+} - -- return r; -+static int amdgpu_pmops_suspend_noirq(struct device *dev) -+{ -+ struct drm_device *drm_dev = dev_get_drvdata(dev); -+ struct amdgpu_device *adev = drm_to_adev(drm_dev); -+ -+ if (amdgpu_acpi_should_gpu_reset(adev)) -+ return amdgpu_asic_reset(adev); -+ -+ return 0; - } - - static int amdgpu_pmops_resume(struct device *dev) -@@ -1511,6 +2279,8 @@ static int amdgpu_pmops_resume(struct device *dev) - r = amdgpu_device_resume(drm_dev, true); - if (amdgpu_acpi_is_s0ix_active(adev)) - adev->in_s0ix = false; -+ else -+ adev->in_s3 = false; - return r; - } - -@@ -1575,12 +2345,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) - if (amdgpu_device_supports_px(drm_dev)) - drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - -+ /* -+ * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some -+ * proper cleanups and put itself into a state ready for PNP. That -+ * can address some random resuming failure observed on BOCO capable -+ * platforms. -+ * TODO: this may be also needed for PX capable platform. -+ */ -+ if (amdgpu_device_supports_boco(drm_dev)) -+ adev->mp1_state = PP_MP1_STATE_UNLOAD; -+ - ret = amdgpu_device_suspend(drm_dev, false); - if (ret) { - adev->in_runpm = false; -+ if (amdgpu_device_supports_boco(drm_dev)) -+ adev->mp1_state = PP_MP1_STATE_NONE; - return ret; - } - -+ if (amdgpu_device_supports_boco(drm_dev)) -+ adev->mp1_state = PP_MP1_STATE_NONE; -+ - if (amdgpu_device_supports_px(drm_dev)) { - /* Only need to handle PCI state in the driver for ATPX - * PCI core handles it for _PR3. -@@ -1634,8 +2419,11 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) - amdgpu_device_baco_exit(drm_dev); - } - ret = amdgpu_device_resume(drm_dev, false); -- if (ret) -+ if (ret) { -+ if (amdgpu_device_supports_px(drm_dev)) -+ pci_disable_device(pdev); - return ret; -+ } - - if (amdgpu_device_supports_px(drm_dev)) - drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; -@@ -1719,6 +2507,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = { - .prepare = amdgpu_pmops_prepare, - .complete = amdgpu_pmops_complete, - .suspend = amdgpu_pmops_suspend, -+ .suspend_noirq = amdgpu_pmops_suspend_noirq, - .resume = amdgpu_pmops_resume, - .freeze = amdgpu_pmops_freeze, - .thaw = amdgpu_pmops_thaw, -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c -index cd0acbea75da6..d58ab9deb0280 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c -@@ -341,7 +341,8 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) - } - - /* disable all the possible outputs/crtcs before entering KMS mode */ -- if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display) -+ if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display && -+ !amdgpu_sriov_vf(adev)) - drm_helper_disable_unused_functions(adev_to_drm(adev)); - - drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c -index 8d682befe0d68..8599e0ffa8292 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c -@@ -552,9 +552,6 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) - if (!ring || !ring->fence_drv.initialized) - continue; - -- if (!ring->no_scheduler) -- drm_sched_stop(&ring->sched, NULL); -- - /* You can't wait for HW to signal if it's gone */ - if (!drm_dev_is_unplugged(&adev->ddev)) - r = amdgpu_fence_wait_empty(ring); -@@ -564,7 +561,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) - if (r) - amdgpu_fence_driver_force_completion(ring); - -- if (ring->fence_drv.irq_src) -+ if (!drm_dev_is_unplugged(adev_to_drm(adev)) && -+ ring->fence_drv.irq_src) - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); - -@@ -582,7 +580,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev) - if (!ring || !ring->fence_drv.initialized) - continue; - -- if (!ring->no_scheduler) -+ /* -+ * Notice we check for sched.ops since there's some -+ * override on the meaning of sched.ready by amdgpu. -+ * The natural check would be sched.ready, which is -+ * set as drm_sched_init() finishes... -+ */ -+ if (ring->sched.ops) - drm_sched_fini(&ring->sched); - - for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) -@@ -614,11 +618,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) - if (!ring || !ring->fence_drv.initialized) - continue; - -- if (!ring->no_scheduler) { -- drm_sched_resubmit_jobs(&ring->sched); -- drm_sched_start(&ring->sched, true); -- } -- - /* enable the interrupt */ - if (ring->fence_drv.irq_src) - amdgpu_irq_get(adev, ring->fence_drv.irq_src, -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c -index d6aa032890ee8..13ca51ff8bd0b 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c -@@ -61,7 +61,7 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) - } - - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, -- TTM_BO_VM_NUM_PREFAULT, 1); -+ TTM_BO_VM_NUM_PREFAULT); - - drm_dev_exit(idx); - } else { -@@ -419,11 +419,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, - if (r) - goto release_object; - -- if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { -- r = amdgpu_mn_register(bo, args->addr); -- if (r) -- goto release_object; -- } -+ r = amdgpu_mn_register(bo, args->addr); -+ if (r) -+ goto release_object; - - if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c -index 1916ec84dd71f..252712f930f4e 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c -@@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, - * adev->gfx.mec.num_pipe_per_mec - * adev->gfx.mec.num_queue_per_pipe; - -- while (queue_bit-- >= 0) { -+ while (--queue_bit >= 0) { - if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) - continue; - -@@ -579,9 +579,6 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) - - if (adev->gfx.gfx_off_req_count == 0 && - !adev->gfx.gfx_off_state) { -- /* If going to s2idle, no need to wait */ -- if (adev->in_s0ix) -- delay = GFX_OFF_NO_DELAY; - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, - delay); - } -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c -index f3d62e196901a..0c7963dfacad1 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c -@@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, - */ - int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) - { -- unsigned int count = AMDGPU_IH_MAX_NUM_IVS; -+ unsigned int count; - u32 wptr; - - if (!ih->enabled || adev->shutdown) -@@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) - wptr = amdgpu_ih_get_wptr(adev, ih); - - restart_ih: -+ count = AMDGPU_IH_MAX_NUM_IVS; - DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); - - /* Order reading of wptr vs. reading of IH ring data */ -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c -index 7e45640fbee02..e8485b1f02ed6 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c -@@ -43,6 +43,17 @@ - #include "amdgpu_display.h" - #include "amdgpu_ras.h" - -+static void amdgpu_runtime_pm_quirk(struct amdgpu_device *adev) -+{ -+ /* -+ * Add below quirk on several sienna_cichlid cards to disable -+ * runtime pm to fix EMI failures. -+ */ -+ if (((adev->pdev->device == 0x73A1) && (adev->pdev->revision == 0x00)) || -+ ((adev->pdev->device == 0x73BF) && (adev->pdev->revision == 0xCF))) -+ adev->runpm = false; -+} -+ - void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) - { - struct amdgpu_gpu_instance *gpu_instance; -@@ -152,21 +163,10 @@ static void amdgpu_get_audio_func(struct amdgpu_device *adev) - int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) - { - struct drm_device *dev; -- struct pci_dev *parent; - int r, acpi_status; - - dev = adev_to_drm(adev); - -- if (amdgpu_has_atpx() && -- (amdgpu_is_atpx_hybrid() || -- amdgpu_has_atpx_dgpu_power_cntl()) && -- ((flags & AMD_IS_APU) == 0) && -- !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) -- flags |= AMD_IS_PX; -- -- parent = pci_upstream_bridge(adev->pdev); -- adev->has_pr3 = parent ? pci_pr3_present(parent) : false; -- - /* amdgpu_device_init should report only fatal error - * like memory allocation failure or iomapping failure, - * or memory manager initialization failure, it must -@@ -206,6 +206,15 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) - adev->runpm = true; - break; - } -+ /* XXX: disable runtime pm if we are the primary adapter -+ * to avoid displays being re-enabled after DPMS. -+ * This needs to be sorted out and fixed properly. -+ */ -+ if (adev->is_fw_fb) -+ adev->runpm = false; -+ -+ amdgpu_runtime_pm_quirk(adev); -+ - if (adev->runpm) - dev_info(adev->dev, "Using BACO for runtime pm\n"); - } -@@ -573,6 +582,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) - crtc = (struct drm_crtc *)minfo->crtcs[i]; - if (crtc && crtc->base.id == info->mode_crtc.id) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); -+ - ui32 = amdgpu_crtc->crtc_id; - found = 1; - break; -@@ -591,7 +601,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) - if (ret) - return ret; - -- ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); -+ ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip))); - return ret ? -EFAULT : 0; - } - case AMDGPU_INFO_HW_IP_COUNT: { -@@ -739,17 +749,18 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) - ? -EFAULT : 0; - } - case AMDGPU_INFO_READ_MMR_REG: { -- unsigned n, alloc_size; -+ unsigned int n, alloc_size; - uint32_t *regs; -- unsigned se_num = (info->read_mmr_reg.instance >> -+ unsigned int se_num = (info->read_mmr_reg.instance >> - AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & - AMDGPU_INFO_MMR_SE_INDEX_MASK; -- unsigned sh_num = (info->read_mmr_reg.instance >> -+ unsigned int sh_num = (info->read_mmr_reg.instance >> - AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & - AMDGPU_INFO_MMR_SH_INDEX_MASK; - - /* set full masks if the userspace set all bits -- * in the bitfields */ -+ * in the bitfields -+ */ - if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) - se_num = 0xffffffff; - else if (se_num >= AMDGPU_GFX_MAX_SE) -@@ -873,7 +884,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) - return ret; - } - case AMDGPU_INFO_VCE_CLOCK_TABLE: { -- unsigned i; -+ unsigned int i; - struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; - struct amd_vce_state *vce_state; - -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c -index 01a78c7865367..8a0b652da4f4b 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c -@@ -78,9 +78,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) - static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) - { - struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); -- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); -+ struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; - struct amdgpu_bo_vm *vmbo; - -+ bo = shadow_bo->parent; - vmbo = to_amdgpu_bo_vm(bo); - /* in case amdgpu_device_recover_vram got NULL of bo->parent */ - if (!list_empty(&vmbo->shadow_list)) { -@@ -684,13 +685,11 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, - * num of amdgpu_vm_pt entries. - */ - BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm)); -- bp->destroy = &amdgpu_bo_vm_destroy; - r = amdgpu_bo_create(adev, bp, &bo_ptr); - if (r) - return r; - - *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); -- INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); - return r; - } - -@@ -741,6 +740,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) - - mutex_lock(&adev->shadow_list_lock); - list_add_tail(&vmbo->shadow_list, &adev->shadow_list); -+ vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo); -+ vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; - mutex_unlock(&adev->shadow_list_lock); - } - -@@ -912,6 +913,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, - if (WARN_ON_ONCE(min_offset > max_offset)) - return -EINVAL; - -+ /* Check domain to be pinned to against preferred domains */ -+ if (bo->preferred_domains & domain) -+ domain = bo->preferred_domains & domain; -+ - /* A shared bo cannot be migrated to VRAM */ - if (bo->tbo.base.import_attach) { - if (domain & AMDGPU_GEM_DOMAIN_GTT) -@@ -1038,29 +1043,6 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo) - } - } - --/** -- * amdgpu_bo_evict_vram - evict VRAM buffers -- * @adev: amdgpu device object -- * -- * Evicts all VRAM buffers on the lru list of the memory type. -- * Mainly used for evicting vram at suspend time. -- * -- * Returns: -- * 0 for success or a negative error code on failure. -- */ --int amdgpu_bo_evict_vram(struct amdgpu_device *adev) --{ -- struct ttm_resource_manager *man; -- -- if (adev->in_s3 && (adev->flags & AMD_IS_APU)) { -- /* No need to evict vram on APUs for suspend to ram */ -- return 0; -- } -- -- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); -- return ttm_resource_manager_evict_all(&adev->mman.bdev, man); --} -- - static const char *amdgpu_vram_names[] = { - "UNKNOWN", - "GDDR1", -@@ -1343,7 +1325,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) - !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) - return; - -- dma_resv_lock(bo->base.resv, NULL); -+ if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) -+ return; - - r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); - if (!WARN_ON(r)) { -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h -index 9d6c001c15f89..d8ef8a53a562d 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h -@@ -304,7 +304,6 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); - int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, - u64 min_offset, u64 max_offset); - void amdgpu_bo_unpin(struct amdgpu_bo *bo); --int amdgpu_bo_evict_vram(struct amdgpu_device *adev); - int amdgpu_bo_init(struct amdgpu_device *adev); - void amdgpu_bo_fini(struct amdgpu_device *adev); - int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -index 9b41cb8c3de54..f305a0f8e9b9a 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -@@ -314,7 +314,39 @@ static int psp_sw_init(void *handle) - } - } - -+ ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, -+ amdgpu_sriov_vf(adev) ? -+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, -+ &psp->fw_pri_bo, -+ &psp->fw_pri_mc_addr, -+ &psp->fw_pri_buf); -+ if (ret) -+ return ret; -+ -+ ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, -+ AMDGPU_GEM_DOMAIN_VRAM, -+ &psp->fence_buf_bo, -+ &psp->fence_buf_mc_addr, -+ &psp->fence_buf); -+ if (ret) -+ goto failed1; -+ -+ ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, -+ AMDGPU_GEM_DOMAIN_VRAM, -+ &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, -+ (void **)&psp->cmd_buf_mem); -+ if (ret) -+ goto failed2; -+ - return 0; -+ -+failed2: -+ amdgpu_bo_free_kernel(&psp->fence_buf_bo, -+ &psp->fence_buf_mc_addr, &psp->fence_buf); -+failed1: -+ amdgpu_bo_free_kernel(&psp->fw_pri_bo, -+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf); -+ return ret; - } - - static int psp_sw_fini(void *handle) -@@ -344,6 +376,13 @@ static int psp_sw_fini(void *handle) - kfree(cmd); - cmd = NULL; - -+ amdgpu_bo_free_kernel(&psp->fw_pri_bo, -+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf); -+ amdgpu_bo_free_kernel(&psp->fence_buf_bo, -+ &psp->fence_buf_mc_addr, &psp->fence_buf); -+ amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, -+ (void **)&psp->cmd_buf_mem); -+ - return 0; - } - -@@ -2207,12 +2246,16 @@ static int psp_hw_start(struct psp_context *psp) - return ret; - } - -+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) -+ goto skip_pin_bo; -+ - ret = psp_tmr_init(psp); - if (ret) { - DRM_ERROR("PSP tmr init failed!\n"); - return ret; - } - -+skip_pin_bo: - /* - * For ASICs with DF Cstate management centralized - * to PMFW, TMR setup should be performed after PMFW -@@ -2462,7 +2505,7 @@ static int psp_load_smu_fw(struct psp_context *psp) - static bool fw_load_skip_check(struct psp_context *psp, - struct amdgpu_firmware_info *ucode) - { -- if (!ucode->fw) -+ if (!ucode->fw || !ucode->ucode_size) - return true; - - if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && -@@ -2576,51 +2619,18 @@ static int psp_load_fw(struct amdgpu_device *adev) - struct psp_context *psp = &adev->psp; - - if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { -- psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ -- goto skip_memalloc; -- } -- -- if (amdgpu_sriov_vf(adev)) { -- ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, -- AMDGPU_GEM_DOMAIN_VRAM, -- &psp->fw_pri_bo, -- &psp->fw_pri_mc_addr, -- &psp->fw_pri_buf); -+ /* should not destroy ring, only stop */ -+ psp_ring_stop(psp, PSP_RING_TYPE__KM); - } else { -- ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, -- AMDGPU_GEM_DOMAIN_GTT, -- &psp->fw_pri_bo, -- &psp->fw_pri_mc_addr, -- &psp->fw_pri_buf); -- } -- -- if (ret) -- goto failed; -+ memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); - -- ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, -- AMDGPU_GEM_DOMAIN_VRAM, -- &psp->fence_buf_bo, -- &psp->fence_buf_mc_addr, -- &psp->fence_buf); -- if (ret) -- goto failed; -- -- ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, -- AMDGPU_GEM_DOMAIN_VRAM, -- &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, -- (void **)&psp->cmd_buf_mem); -- if (ret) -- goto failed; -- -- memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); -- -- ret = psp_ring_init(psp, PSP_RING_TYPE__KM); -- if (ret) { -- DRM_ERROR("PSP ring init failed!\n"); -- goto failed; -+ ret = psp_ring_init(psp, PSP_RING_TYPE__KM); -+ if (ret) { -+ DRM_ERROR("PSP ring init failed!\n"); -+ goto failed; -+ } - } - --skip_memalloc: - ret = psp_hw_start(psp); - if (ret) - goto failed; -@@ -2719,6 +2729,9 @@ static int psp_hw_fini(void *handle) - psp_rap_terminate(psp); - psp_dtm_terminate(psp); - psp_hdcp_terminate(psp); -+ -+ if (adev->gmc.xgmi.num_physical_nodes > 1) -+ psp_xgmi_terminate(psp); - } - - psp_asd_unload(psp); -@@ -2726,13 +2739,6 @@ static int psp_hw_fini(void *handle) - psp_tmr_terminate(psp); - psp_ring_destroy(psp, PSP_RING_TYPE__KM); - -- amdgpu_bo_free_kernel(&psp->fw_pri_bo, -- &psp->fw_pri_mc_addr, &psp->fw_pri_buf); -- amdgpu_bo_free_kernel(&psp->fence_buf_bo, -- &psp->fence_buf_mc_addr, &psp->fence_buf); -- amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, -- (void **)&psp->cmd_buf_mem); -- - return 0; - } - -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c -index b7d861ed52849..88f986a61c93a 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c -@@ -66,6 +66,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, - { - struct fd f = fdget(fd); - struct amdgpu_fpriv *fpriv; -+ struct amdgpu_ctx_mgr *mgr; - struct amdgpu_ctx *ctx; - uint32_t id; - int r; -@@ -79,8 +80,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, - return r; - } - -- idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id) -+ mgr = &fpriv->ctx_mgr; -+ mutex_lock(&mgr->lock); -+ idr_for_each_entry(&mgr->ctx_handles, ctx, id) - amdgpu_ctx_priority_override(ctx, priority); -+ mutex_unlock(&mgr->lock); - - fdput(f); - return 0; -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c -index 94126dc396888..51c76d6322c94 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c -@@ -1892,7 +1892,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, - unsigned i; - int r; - -- if (direct_submit && !ring->sched.ready) { -+ if (!direct_submit && !ring->sched.ready) { - DRM_ERROR("Trying to move memory with ring turned off.\n"); - return -EINVAL; - } -@@ -2036,6 +2036,36 @@ error_free: - return r; - } - -+/** -+ * amdgpu_ttm_evict_resources - evict memory buffers -+ * @adev: amdgpu device object -+ * @mem_type: evicted BO's memory type -+ * -+ * Evicts all @mem_type buffers on the lru list of the memory type. -+ * -+ * Returns: -+ * 0 for success or a negative error code on failure. -+ */ -+int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) -+{ -+ struct ttm_resource_manager *man; -+ -+ switch (mem_type) { -+ case TTM_PL_VRAM: -+ case TTM_PL_TT: -+ case AMDGPU_PL_GWS: -+ case AMDGPU_PL_GDS: -+ case AMDGPU_PL_OA: -+ man = ttm_manager_type(&adev->mman.bdev, mem_type); -+ break; -+ default: -+ DRM_ERROR("Trying to evict invalid memory type\n"); -+ return -EINVAL; -+ } -+ -+ return ttm_resource_manager_evict_all(&adev->mman.bdev, man); -+} -+ - #if defined(CONFIG_DEBUG_FS) - - static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused) -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h -index 3205fd5200601..639c7b41e30b9 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h -@@ -190,6 +190,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); - uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem); - uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_resource *mem); -+int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type); - - void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); - -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c -index abd8469380e51..0ed0736d515aa 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c -@@ -723,8 +723,7 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev) - - void amdgpu_ucode_free_bo(struct amdgpu_device *adev) - { -- if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) -- amdgpu_bo_free_kernel(&adev->firmware.fw_buf, -+ amdgpu_bo_free_kernel(&adev->firmware.fw_buf, - &adev->firmware.fw_buf_mc, - &adev->firmware.fw_buf_ptr); - } -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c -index 008a308a4ecaf..0c10222707902 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c -@@ -149,6 +149,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) - break; - case CHIP_VANGOGH: - fw_name = FIRMWARE_VANGOGH; -+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && -+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) -+ adev->vcn.indirect_sram = true; - break; - case CHIP_DIMGREY_CAVEFISH: - fw_name = FIRMWARE_DIMGREY_CAVEFISH; -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c -index ca058fbcccd43..b508126a9738f 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c -@@ -24,6 +24,7 @@ - #include - - #include -+#include - - #include "amdgpu.h" - #include "amdgpu_ras.h" -@@ -613,16 +614,34 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) - - void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) - { -- uint64_t bp_block_offset = 0; -- uint32_t bp_block_size = 0; -- struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; -- - adev->virt.fw_reserve.p_pf2vf = NULL; - adev->virt.fw_reserve.p_vf2pf = NULL; - adev->virt.vf2pf_update_interval_ms = 0; - - if (adev->mman.fw_vram_usage_va != NULL) { -- adev->virt.vf2pf_update_interval_ms = 2000; -+ /* go through this logic in ip_init and reset to init workqueue*/ -+ amdgpu_virt_exchange_data(adev); -+ -+ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); -+ schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); -+ } else if (adev->bios != NULL) { -+ /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ -+ adev->virt.fw_reserve.p_pf2vf = -+ (struct amd_sriov_msg_pf2vf_info_header *) -+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); -+ -+ amdgpu_virt_read_pf2vf_data(adev); -+ } -+} -+ -+ -+void amdgpu_virt_exchange_data(struct amdgpu_device *adev) -+{ -+ uint64_t bp_block_offset = 0; -+ uint32_t bp_block_size = 0; -+ struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; -+ -+ if (adev->mman.fw_vram_usage_va != NULL) { - - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) -@@ -648,22 +667,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) - if (adev->virt.ras_init_done) - amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); - } -- } else if (adev->bios != NULL) { -- adev->virt.fw_reserve.p_pf2vf = -- (struct amd_sriov_msg_pf2vf_info_header *) -- (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); -- -- amdgpu_virt_read_pf2vf_data(adev); -- -- return; -- } -- -- if (adev->virt.vf2pf_update_interval_ms != 0) { -- INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); -- schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); - } - } - -+ - void amdgpu_detect_virtualization(struct amdgpu_device *adev) - { - uint32_t reg; -@@ -694,10 +701,17 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; - - if (!reg) { -- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ -+ /* passthrough mode exclus sriov mod */ -+ if (is_virtual_machine() && !xen_initial_domain()) - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; - } - -+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) -+ /* VF MMIO access (except mailbox range) from CPU -+ * will be blocked during sriov runtime -+ */ -+ adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; -+ - /* we have the ability to check now */ - if (amdgpu_sriov_vf(adev)) { - switch (adev->asic_type) { -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h -index 8d4c20bb71c59..4af3610f4a827 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h -@@ -31,6 +31,7 @@ - #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ - #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ - #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ -+#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ - - /* all asic after AI use this offset */ - #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 -@@ -61,6 +62,8 @@ struct amdgpu_vf_error_buffer { - uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; - }; - -+enum idh_request; -+ - /** - * struct amdgpu_virt_ops - amdgpu device virt operations - */ -@@ -70,7 +73,8 @@ struct amdgpu_virt_ops { - int (*req_init_data)(struct amdgpu_device *adev); - int (*reset_gpu)(struct amdgpu_device *adev); - int (*wait_reset)(struct amdgpu_device *adev); -- void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); -+ void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, -+ u32 data1, u32 data2, u32 data3); - }; - - /* -@@ -278,6 +282,9 @@ struct amdgpu_video_codec_info; - #define amdgpu_passthrough(adev) \ - ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) - -+#define amdgpu_sriov_vf_mmio_access_protection(adev) \ -+((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) -+ - static inline bool is_virtual_machine(void) - { - #ifdef CONFIG_X86 -@@ -308,6 +315,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); - void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); - void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); - void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); -+void amdgpu_virt_exchange_data(struct amdgpu_device *adev); - void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); - void amdgpu_detect_virtualization(struct amdgpu_device *adev); - -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c -index ce982afeff913..4e8274de8fc0c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c -@@ -16,6 +16,8 @@ - #include "ivsrcid/ivsrcid_vislands30.h" - #include "amdgpu_vkms.h" - #include "amdgpu_display.h" -+#include "atom.h" -+#include "amdgpu_irq.h" - - /** - * DOC: amdgpu_vkms -@@ -41,20 +43,20 @@ static const u32 amdgpu_vkms_formats[] = { - - static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) - { -- struct amdgpu_vkms_output *output = container_of(timer, -- struct amdgpu_vkms_output, -- vblank_hrtimer); -- struct drm_crtc *crtc = &output->crtc; -+ struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer); -+ struct drm_crtc *crtc = &amdgpu_crtc->base; -+ struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); - u64 ret_overrun; - bool ret; - -- ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, -+ ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer, - output->period_ns); - WARN_ON(ret_overrun != 1); - - ret = drm_crtc_handle_vblank(crtc); -+ /* Don't queue timer again when vblank is disabled. */ - if (!ret) -- DRM_ERROR("amdgpu_vkms failure on handling vblank"); -+ return HRTIMER_NORESTART; - - return HRTIMER_RESTART; - } -@@ -65,22 +67,21 @@ static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) - unsigned int pipe = drm_crtc_index(crtc); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); -+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - drm_calc_timestamping_constants(crtc, &crtc->mode); - -- hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -- out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate; - out->period_ns = ktime_set(0, vblank->framedur_ns); -- hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); -+ hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL); - - return 0; - } - - static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) - { -- struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); -+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - -- hrtimer_cancel(&out->vblank_hrtimer); -+ hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer); - } - - static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, -@@ -92,13 +93,14 @@ static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, - unsigned int pipe = crtc->index; - struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; -+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - if (!READ_ONCE(vblank->enabled)) { - *vblank_time = ktime_get(); - return true; - } - -- *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); -+ *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires); - - if (WARN_ON(*vblank_time == vblank->time)) - return true; -@@ -142,15 +144,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, - static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_atomic_state *state) - { -+ unsigned long flags; - if (crtc->state->event) { -- spin_lock(&crtc->dev->event_lock); -+ spin_lock_irqsave(&crtc->dev->event_lock, flags); - - if (drm_crtc_vblank_get(crtc) != 0) - drm_crtc_send_vblank_event(crtc, crtc->state->event); - else - drm_crtc_arm_vblank_event(crtc, crtc->state->event); - -- spin_unlock(&crtc->dev->event_lock); -+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - - crtc->state->event = NULL; - } -@@ -165,6 +168,8 @@ static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { - static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, - struct drm_plane *primary, struct drm_plane *cursor) - { -+ struct amdgpu_device *adev = drm_to_adev(dev); -+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int ret; - - ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, -@@ -176,6 +181,17 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, - - drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs); - -+ amdgpu_crtc->crtc_id = drm_crtc_index(crtc); -+ adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc; -+ -+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; -+ amdgpu_crtc->encoder = NULL; -+ amdgpu_crtc->connector = NULL; -+ amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; -+ -+ hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate; -+ - return ret; - } - -@@ -401,7 +417,7 @@ int amdgpu_vkms_output_init(struct drm_device *dev, - { - struct drm_connector *connector = &output->connector; - struct drm_encoder *encoder = &output->encoder; -- struct drm_crtc *crtc = &output->crtc; -+ struct drm_crtc *crtc = &output->crtc.base; - struct drm_plane *primary, *cursor = NULL; - int ret; - -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h -index 97f1b79c0724e..4f8722ff37c25 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h -@@ -10,15 +10,14 @@ - #define YRES_MAX 16384 - - #define drm_crtc_to_amdgpu_vkms_output(target) \ -- container_of(target, struct amdgpu_vkms_output, crtc) -+ container_of(target, struct amdgpu_vkms_output, crtc.base) - - extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block; - - struct amdgpu_vkms_output { -- struct drm_crtc crtc; -+ struct amdgpu_crtc crtc; - struct drm_encoder encoder; - struct drm_connector connector; -- struct hrtimer vblank_hrtimer; - ktime_t period_ns; - struct drm_pending_vblank_event *event; - }; -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c -index 6b15cad78de9d..0e4554950e072 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c -@@ -768,11 +768,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, - * Check if all VM PDs/PTs are ready for updates - * - * Returns: -- * True if eviction list is empty. -+ * True if VM is not evicting. - */ - bool amdgpu_vm_ready(struct amdgpu_vm *vm) - { -- return list_empty(&vm->evicted); -+ bool ret; -+ -+ amdgpu_vm_eviction_lock(vm); -+ ret = !vm->evicting; -+ amdgpu_vm_eviction_unlock(vm); -+ -+ return ret && list_empty(&vm->evicted); - } - - /** -@@ -977,7 +983,6 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev, - return r; - } - -- (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); - amdgpu_bo_add_to_shadow_list(*vmbo); - - return 0; -@@ -2329,14 +2334,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, - uint64_t eaddr; - - /* validate the parameters */ -- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || -- size == 0 || size & ~PAGE_MASK) -+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) -+ return -EINVAL; -+ if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; -- if (saddr >= eaddr || -- (bo && offset + size > amdgpu_bo_size(bo)) || -+ if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; - -@@ -2395,14 +2400,14 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, - int r; - - /* validate the parameters */ -- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || -- size == 0 || size & ~PAGE_MASK) -+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) -+ return -EINVAL; -+ if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; -- if (saddr >= eaddr || -- (bo && offset + size > amdgpu_bo_size(bo)) || -+ if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; - -@@ -2570,18 +2575,30 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, - - /* Insert partial mapping before the range */ - if (!list_empty(&before->list)) { -+ struct amdgpu_bo *bo = before->bo_va->base.bo; -+ - amdgpu_vm_it_insert(before, &vm->va); - if (before->flags & AMDGPU_PTE_PRT) - amdgpu_vm_prt_get(adev); -+ -+ if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && -+ !before->bo_va->base.moved) -+ amdgpu_vm_bo_moved(&before->bo_va->base); - } else { - kfree(before); - } - - /* Insert partial mapping after the range */ - if (!list_empty(&after->list)) { -+ struct amdgpu_bo *bo = after->bo_va->base.bo; -+ - amdgpu_vm_it_insert(after, &vm->va); - if (after->flags & AMDGPU_PTE_PRT) - amdgpu_vm_prt_get(adev); -+ -+ if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && -+ !after->bo_va->base.moved) -+ amdgpu_vm_bo_moved(&after->bo_va->base); - } else { - kfree(after); - } -@@ -3218,7 +3235,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) - */ - #ifdef CONFIG_X86_64 - if (amdgpu_vm_update_mode == -1) { -- if (amdgpu_gmc_vram_full_visible(&adev->gmc)) -+ /* For asic with VF MMIO access protection -+ * avoid using CPU for VM table updates -+ */ -+ if (amdgpu_gmc_vram_full_visible(&adev->gmc) && -+ !amdgpu_sriov_vf_mmio_access_protection(adev)) - adev->vm_manager.vm_update_mode = - AMDGPU_VM_USE_CPU_FOR_COMPUTE; - else -@@ -3265,6 +3286,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) - long timeout = msecs_to_jiffies(2000); - int r; - -+ /* No valid flags defined yet */ -+ if (args->in.flags) -+ return -EINVAL; -+ - switch (args->in.op) { - case AMDGPU_VM_OP_RESERVE_VMID: - /* We only have requirement to reserve vmid from gfxhub */ -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c -index 978ac927ac11d..ce0b9cb61f582 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c -@@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) - "%s", "xgmi_hive_info"); - if (ret) { - dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); -+ kobject_put(&hive->kobj); - kfree(hive); - hive = NULL; - goto pro_end; -@@ -722,7 +723,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) - amdgpu_put_xgmi_hive(hive); - } - -- return psp_xgmi_terminate(&adev->psp); -+ return 0; - } - - static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) -diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c -index 54f28c075f214..9be6da37032a7 100644 ---- a/drivers/gpu/drm/amd/amdgpu/cik.c -+++ b/drivers/gpu/drm/amd/amdgpu/cik.c -@@ -1428,6 +1428,10 @@ static int cik_asic_reset(struct amdgpu_device *adev) - { - int r; - -+ /* APUs don't have full asic reset */ -+ if (adev->flags & AMD_IS_APU) -+ return 0; -+ - if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { - dev_info(adev->dev, "BACO reset\n"); - r = amdgpu_dpm_baco_reset(adev); -@@ -1570,17 +1574,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) - u16 bridge_cfg2, gpu_cfg2; - u32 max_lw, current_lw, tmp; - -- pcie_capability_read_word(root, PCI_EXP_LNKCTL, -- &bridge_cfg); -- pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL, -- &gpu_cfg); -- -- tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; -- pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16); -- -- tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; -- pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL, -- tmp16); -+ pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); -+ pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); - - tmp = RREG32_PCIE(ixPCIE_LC_STATUS1); - max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> -@@ -1633,21 +1628,14 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) - msleep(100); - - /* linkctl */ -- pcie_capability_read_word(root, PCI_EXP_LNKCTL, -- &tmp16); -- tmp16 &= ~PCI_EXP_LNKCTL_HAWD; -- tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); -- pcie_capability_write_word(root, PCI_EXP_LNKCTL, -- tmp16); -- -- pcie_capability_read_word(adev->pdev, -- PCI_EXP_LNKCTL, -- &tmp16); -- tmp16 &= ~PCI_EXP_LNKCTL_HAWD; -- tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); -- pcie_capability_write_word(adev->pdev, -- PCI_EXP_LNKCTL, -- tmp16); -+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL, -+ PCI_EXP_LNKCTL_HAWD, -+ bridge_cfg & -+ PCI_EXP_LNKCTL_HAWD); -+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL, -+ PCI_EXP_LNKCTL_HAWD, -+ gpu_cfg & -+ PCI_EXP_LNKCTL_HAWD); - - /* linkctl2 */ - pcie_capability_read_word(root, PCI_EXP_LNKCTL2, -@@ -1715,7 +1703,7 @@ static void cik_program_aspm(struct amdgpu_device *adev) - bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; - bool disable_clkreq = false; - -- if (amdgpu_aspm == 0) -+ if (!amdgpu_device_should_use_aspm(adev)) - return; - - if (pci_is_root_bus(adev->pdev->bus)) -diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c -index 16dbe593cba2e..938f13956aeef 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c -@@ -7197,8 +7197,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); -- if (unlikely(r != 0)) -+ if (unlikely(r != 0)) { -+ amdgpu_bo_unreserve(ring->mqd_obj); - return r; -+ } - - gfx_v10_0_kiq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); -@@ -7729,8 +7731,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) - switch (adev->asic_type) { - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: -- clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | -- ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); -+ preempt_disable(); -+ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); -+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); -+ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); -+ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over -+ * roughly every 42 seconds. -+ */ -+ if (hi_check != clock_hi) { -+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); -+ clock_hi = hi_check; -+ } -+ preempt_enable(); -+ clock = clock_lo | (clock_hi << 32ULL); - break; - default: - preempt_disable(); -@@ -8411,8 +8424,14 @@ static int gfx_v10_0_set_powergating_state(void *handle, - break; - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: -+ if (!enable) -+ amdgpu_gfx_off_ctrl(adev, false); -+ - gfx_v10_cntl_pg(adev, enable); -- amdgpu_gfx_off_ctrl(adev, enable); -+ -+ if (enable) -+ amdgpu_gfx_off_ctrl(adev, true); -+ - break; - default: - break; -diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c -index 025184a556ee6..de1fab165041f 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c -@@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin"); - #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c - #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 - -+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025 -+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1 -+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 -+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 -+ - enum ta_ras_gfx_subblock { - /*CPC*/ - TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, -@@ -1267,6 +1272,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { - { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, - /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ - { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, -+ /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */ -+ { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 }, - { 0, 0, 0, 0, 0 }, - }; - -@@ -2617,7 +2624,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) - - gfx_v9_0_tiling_mode_table_init(adev); - -- gfx_v9_0_setup_rb(adev); -+ if (adev->gfx.num_gfx_rings) -+ gfx_v9_0_setup_rb(adev); - gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); - adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); - -@@ -3056,8 +3064,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_GDS | - AMD_PG_SUPPORT_RLC_SMU_HS)) { -- WREG32(mmRLC_JUMP_TABLE_RESTORE, -- adev->gfx.rlc.cp_table_gpu_addr >> 8); -+ WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE, -+ adev->gfx.rlc.cp_table_gpu_addr >> 8); - gfx_v9_0_init_gfx_power_gating(adev); - } - } -@@ -3863,8 +3871,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) - return r; - - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); -- if (unlikely(r != 0)) -+ if (unlikely(r != 0)) { -+ amdgpu_bo_unreserve(ring->mqd_obj); - return r; -+ } - - gfx_v9_0_kiq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); -@@ -4010,7 +4020,8 @@ static int gfx_v9_0_hw_fini(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -- amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); -+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) -+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); - amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); - amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - -@@ -4228,19 +4239,38 @@ failed_kiq_read: - - static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) - { -- uint64_t clock; -+ uint64_t clock, clock_lo, clock_hi, hi_check; - -- amdgpu_gfx_off_ctrl(adev, false); -- mutex_lock(&adev->gfx.gpu_clock_mutex); -- if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { -- clock = gfx_v9_0_kiq_read_clock(adev); -- } else { -- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); -- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | -- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); -+ switch (adev->asic_type) { -+ case CHIP_RENOIR: -+ preempt_disable(); -+ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); -+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); -+ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); -+ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over -+ * roughly every 42 seconds. -+ */ -+ if (hi_check != clock_hi) { -+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); -+ clock_hi = hi_check; -+ } -+ preempt_enable(); -+ clock = clock_lo | (clock_hi << 32ULL); -+ break; -+ default: -+ amdgpu_gfx_off_ctrl(adev, false); -+ mutex_lock(&adev->gfx.gpu_clock_mutex); -+ if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { -+ clock = gfx_v9_0_kiq_read_clock(adev); -+ } else { -+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); -+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | -+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); -+ } -+ mutex_unlock(&adev->gfx.gpu_clock_mutex); -+ amdgpu_gfx_off_ctrl(adev, true); -+ break; - } -- mutex_unlock(&adev->gfx.gpu_clock_mutex); -- amdgpu_gfx_off_ctrl(adev, true); - return clock; - } - -diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c -index bda1542ef1ddf..f51fd0688eca7 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c -@@ -162,7 +162,6 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); -- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC);/* XXX for emulation. */ - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); -diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c -index 14c1c1a297dd3..6e0ace2fbfab1 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c -@@ -196,7 +196,6 @@ static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); -- tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC); /* UC, uncached */ - -diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c -index 1a374ec0514a5..9328991e8807f 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c -+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c -@@ -197,7 +197,6 @@ static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev) - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); -- tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC); /* UC, uncached */ - -diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c -index e47104a1f5596..9c07ec8b97327 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c -@@ -414,6 +414,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint32_t seq; - uint16_t queried_pasid; - bool ret; -+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - -@@ -432,7 +433,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); -- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); -+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); - if (r < 1) { - dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); - return -ETIME; -@@ -788,7 +789,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) - adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); - - #ifdef CONFIG_X86_64 -- if (adev->flags & AMD_IS_APU) { -+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { - adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev); - adev->gmc.aper_size = adev->gmc.real_vram_size; - } -@@ -1021,10 +1022,14 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) - return -EINVAL; - } - -+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) -+ goto skip_pin_bo; -+ - r = amdgpu_gart_table_vram_pin(adev); - if (r) - return r; - -+skip_pin_bo: - r = adev->gfxhub.funcs->gart_enable(adev); - if (r) - return r; -diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c -index 0e81e03e9b498..0fe714f54cca9 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c -@@ -841,12 +841,12 @@ static int gmc_v6_0_sw_init(void *handle) - - adev->gmc.mc_mask = 0xffffffffffULL; - -- r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); -+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); - if (r) { - dev_warn(adev->dev, "No suitable DMA available.\n"); - return r; - } -- adev->need_swiotlb = drm_need_swiotlb(44); -+ adev->need_swiotlb = drm_need_swiotlb(40); - - r = gmc_v6_0_init_microcode(adev); - if (r) { -diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c -index 0a50fdaced7e5..63c47f61d0dfd 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c -@@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) - adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); - - #ifdef CONFIG_X86_64 -- if (adev->flags & AMD_IS_APU && -- adev->gmc.real_vram_size > adev->gmc.aper_size) { -+ if ((adev->flags & AMD_IS_APU) && -+ adev->gmc.real_vram_size > adev->gmc.aper_size && -+ !amdgpu_passthrough(adev)) { - adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; - adev->gmc.aper_size = adev->gmc.real_vram_size; - } -diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c -index 492ebed2915be..bef9610084f10 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c -@@ -515,10 +515,10 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) - static int gmc_v8_0_mc_init(struct amdgpu_device *adev) - { - int r; -+ u32 tmp; - - adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); - if (!adev->gmc.vram_width) { -- u32 tmp; - int chansize, numchan; - - /* Get VRAM informations */ -@@ -562,8 +562,15 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) - adev->gmc.vram_width = numchan * chansize; - } - /* size in MB on si */ -- adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; -- adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; -+ tmp = RREG32(mmCONFIG_MEMSIZE); -+ /* some boards may have garbage in the upper 16 bits */ -+ if (tmp & 0xffff0000) { -+ DRM_INFO("Probable bad vram size: 0x%08x\n", tmp); -+ if (tmp & 0xffff) -+ tmp &= 0xffff; -+ } -+ adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL; -+ adev->gmc.real_vram_size = adev->gmc.mc_vram_size; - - if (!(adev->flags & AMD_IS_APU)) { - r = amdgpu_device_resize_fb_bar(adev); -@@ -574,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) - adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); - - #ifdef CONFIG_X86_64 -- if (adev->flags & AMD_IS_APU) { -+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { - adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; - adev->gmc.aper_size = adev->gmc.real_vram_size; - } -diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c -index 5551359d5dfdc..342e540410b18 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c -@@ -72,6 +72,9 @@ - #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d - #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 - -+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea -+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2 -+ - - static const char *gfxhub_client_ids[] = { - "CB", -@@ -860,6 +863,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint32_t seq; - uint16_t queried_pasid; - bool ret; -+ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; - struct amdgpu_ring *ring = &adev->gfx.kiq.ring; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - -@@ -899,7 +903,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq.ring_lock); -- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); -+ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); - if (r < 1) { - dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); - up_read(&adev->reset_sem); -@@ -1103,6 +1107,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) - u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - unsigned size; - -+ /* TODO move to DC so GMC doesn't need to hard-code DCN registers */ -+ - if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = AMDGPU_VBIOS_VGA_ALLOCATION; - } else { -@@ -1110,7 +1116,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) - - switch (adev->asic_type) { - case CHIP_RAVEN: -- case CHIP_RENOIR: - viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); - size = (REG_GET_FIELD(viewport, - HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * -@@ -1118,6 +1123,14 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) - HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * - 4); - break; -+ case CHIP_RENOIR: -+ viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2); -+ size = (REG_GET_FIELD(viewport, -+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * -+ REG_GET_FIELD(viewport, -+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * -+ 4); -+ break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: -@@ -1375,7 +1388,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) - */ - - /* check whether both host-gpu and gpu-gpu xgmi links exist */ -- if ((adev->flags & AMD_IS_APU) || -+ if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || - (adev->gmc.xgmi.supported && - adev->gmc.xgmi.connected_to_cpu)) { - adev->gmc.aper_base = -@@ -1640,7 +1653,7 @@ static int gmc_v9_0_sw_fini(void *handle) - amdgpu_gem_force_release(adev); - amdgpu_vm_manager_fini(adev); - amdgpu_gart_table_vram_free(adev); -- amdgpu_bo_unref(&adev->gmc.pdb0_bo); -+ amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); - amdgpu_bo_fini(adev); - - return 0; -@@ -1708,10 +1721,14 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) - return -EINVAL; - } - -+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) -+ goto skip_pin_bo; -+ - r = amdgpu_gart_table_vram_pin(adev); - if (r) - return r; - -+skip_pin_bo: - r = adev->gfxhub.funcs->gart_enable(adev); - if (r) - return r; -@@ -1802,7 +1819,6 @@ static int gmc_v9_0_hw_fini(void *handle) - return 0; - } - -- amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); - amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); - - return 0; -diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c -index a99953833820e..4259f623a9d7a 100644 ---- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c -@@ -145,7 +145,6 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); -- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC);/* XXX for emulation. */ - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); -@@ -177,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); - WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); - -+ tmp = mmVM_L2_CNTL3_DEFAULT; - if (adev->gmc.translate_further) { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, -diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c -index f80a14a1b82dc..f5f7181f9af5f 100644 ---- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c -+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c -@@ -165,7 +165,6 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev) - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); -- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC);/* XXX for emulation. */ - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); -diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c -index 7ded6b2f058ef..2e58ed2caa485 100644 ---- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c -@@ -269,7 +269,6 @@ static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); -- tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC); /* UC, uncached */ - -diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c -index 88e457a150e02..c63b6b9349350 100644 ---- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c -+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c -@@ -194,7 +194,6 @@ static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev) - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); -- tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC); /* UC, uncached */ - -diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c -index c4ef822bbe8c5..ff49eeaf78824 100644 ---- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c -+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c -@@ -189,8 +189,6 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) - ENABLE_ADVANCED_DRIVER_MODEL, 1); - tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, - SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); -- tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, -- ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, - MTYPE, MTYPE_UC);/* XXX for emulation. */ - tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, -diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c -index b184b656b9b6b..6f21154d4891f 100644 ---- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c -+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c -@@ -366,6 +366,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, - WREG32_PCIE(smnPCIE_LC_CNTL, data); - } - -+#ifdef CONFIG_PCIEASPM - static void nbio_v2_3_program_ltr(struct amdgpu_device *adev) - { - uint32_t def, data; -@@ -387,9 +388,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); - } -+#endif - - static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) - { -+#ifdef CONFIG_PCIEASPM - uint32_t def, data; - - def = data = RREG32_PCIE(smnPCIE_LC_CNTL); -@@ -445,7 +448,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL6, data); - -- nbio_v2_3_program_ltr(adev); -+ /* Don't bother about LTR if LTR is not enabled -+ * in the path */ -+ if (adev->pdev->ltr_path) -+ nbio_v2_3_program_ltr(adev); - - def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3); - data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; -@@ -469,6 +475,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) - data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL3, data); -+#endif - } - - static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev) -diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c -index 0d2d629e2d6a2..be3f6c52c3ffd 100644 ---- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c -+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c -@@ -278,6 +278,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) - WREG32_PCIE(smnPCIE_CI_CNTL, data); - } - -+#ifdef CONFIG_PCIEASPM - static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) - { - uint32_t def, data; -@@ -299,9 +300,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); - } -+#endif - - static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) - { -+#ifdef CONFIG_PCIEASPM - uint32_t def, data; - - def = data = RREG32_PCIE(smnPCIE_LC_CNTL); -@@ -357,7 +360,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL6, data); - -- nbio_v6_1_program_ltr(adev); -+ /* Don't bother about LTR if LTR is not enabled -+ * in the path */ -+ if (adev->pdev->ltr_path) -+ nbio_v6_1_program_ltr(adev); - - def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); - data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; -@@ -381,6 +387,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) - data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL3, data); -+#endif - } - - const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { -diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c -index f50045cebd44c..74cd7543729be 100644 ---- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c -+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c -@@ -630,6 +630,7 @@ const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = { - .ras_fini = amdgpu_nbio_ras_fini, - }; - -+#ifdef CONFIG_PCIEASPM - static void nbio_v7_4_program_ltr(struct amdgpu_device *adev) - { - uint32_t def, data; -@@ -651,9 +652,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); - } -+#endif - - static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) - { -+#ifdef CONFIG_PCIEASPM - uint32_t def, data; - - def = data = RREG32_PCIE(smnPCIE_LC_CNTL); -@@ -709,7 +712,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL6, data); - -- nbio_v7_4_program_ltr(adev); -+ /* Don't bother about LTR if LTR is not enabled -+ * in the path */ -+ if (adev->pdev->ltr_path) -+ nbio_v7_4_program_ltr(adev); - - def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); - data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; -@@ -733,6 +739,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) - data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL3, data); -+#endif - } - - const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { -diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c -index 01efda4398e56..947e8c09493dc 100644 ---- a/drivers/gpu/drm/amd/amdgpu/nv.c -+++ b/drivers/gpu/drm/amd/amdgpu/nv.c -@@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, -+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, - }; - - static const struct amdgpu_video_codecs yc_video_codecs_decode = { -@@ -583,7 +584,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev) - - static void nv_program_aspm(struct amdgpu_device *adev) - { -- if (!amdgpu_aspm) -+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) - return; - - if (!(adev->flags & AMD_IS_APU) && -diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c -index 47a500f64db20..bcf356df1ef33 100644 ---- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c -@@ -101,14 +101,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) - int ret; - int retry_loop; - -+ /* Wait for bootloader to signify that it is ready having bit 31 of -+ * C2PMSG_35 set to 1. All other bits are expected to be cleared. -+ * If there is an error in processing command, bits[7:0] will be set. -+ * This is applicable for PSP v13.0.6 and newer. -+ */ - for (retry_loop = 0; retry_loop < 10; retry_loop++) { -- /* Wait for bootloader to signify that is -- ready having bit 31 of C2PMSG_35 set to 1 */ -- ret = psp_wait_for(psp, -- SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), -- 0x80000000, -- 0x80000000, -- false); -+ ret = psp_wait_for( -+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), -+ 0x80000000, 0xffffffff, false); - - if (ret == 0) - return 0; -diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c -index 8931000dcd418..0fad9258e0960 100644 ---- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c -@@ -770,8 +770,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) - - DRM_DEBUG("Using doorbell -- " - "wptr_offs == 0x%08x " -- "lower_32_bits(ring->wptr) << 2 == 0x%08x " -- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", -+ "lower_32_bits(ring->wptr << 2) == 0x%08x " -+ "upper_32_bits(ring->wptr << 2) == 0x%08x\n", - ring->wptr_offs, - lower_32_bits(ring->wptr << 2), - upper_32_bits(ring->wptr << 2)); -@@ -978,13 +978,13 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se - - - /** -- * sdma_v4_0_gfx_stop - stop the gfx async dma engines -+ * sdma_v4_0_gfx_enable - enable the gfx async dma engines - * - * @adev: amdgpu_device pointer -- * -- * Stop the gfx async dma ring buffers (VEGA10). -+ * @enable: enable SDMA RB/IB -+ * control the gfx async dma ring buffers (VEGA10). - */ --static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) -+static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable) - { - struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; - u32 rb_cntl, ib_cntl; -@@ -999,10 +999,10 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) - } - - rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); -- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); -+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0); - WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); - ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL); -- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); -+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, enable ? 1 : 0); - WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); - } - } -@@ -1129,7 +1129,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) - int i; - - if (!enable) { -- sdma_v4_0_gfx_stop(adev); -+ sdma_v4_0_gfx_enable(adev, enable); - sdma_v4_0_rlc_stop(adev); - if (adev->sdma.has_page_queue) - sdma_v4_0_page_stop(adev); -@@ -2044,9 +2044,11 @@ static int sdma_v4_0_hw_fini(void *handle) - if (amdgpu_sriov_vf(adev)) - return 0; - -- for (i = 0; i < adev->sdma.num_instances; i++) { -- amdgpu_irq_put(adev, &adev->sdma.ecc_irq, -- AMDGPU_SDMA_IRQ_INSTANCE0 + i); -+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { -+ for (i = 0; i < adev->sdma.num_instances; i++) { -+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, -+ AMDGPU_SDMA_IRQ_INSTANCE0 + i); -+ } - } - - sdma_v4_0_ctx_switch_enable(adev, false); -@@ -2062,6 +2064,12 @@ static int sdma_v4_0_suspend(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ /* SMU saves SDMA state for us */ -+ if (adev->in_s0ix) { -+ sdma_v4_0_gfx_enable(adev, false); -+ return 0; -+ } -+ - return sdma_v4_0_hw_fini(adev); - } - -@@ -2069,6 +2077,14 @@ static int sdma_v4_0_resume(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ /* SMU restores SDMA state for us */ -+ if (adev->in_s0ix) { -+ sdma_v4_0_enable(adev, true); -+ sdma_v4_0_gfx_enable(adev, true); -+ amdgpu_ttm_set_buffer_funcs_status(adev, true); -+ return 0; -+ } -+ - return sdma_v4_0_hw_init(adev); - } - -diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c -index 50bf3b71bc93c..0f75864365d61 100644 ---- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c -@@ -400,8 +400,8 @@ static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring) - if (ring->use_doorbell) { - DRM_DEBUG("Using doorbell -- " - "wptr_offs == 0x%08x " -- "lower_32_bits(ring->wptr) << 2 == 0x%08x " -- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", -+ "lower_32_bits(ring->wptr << 2) == 0x%08x " -+ "upper_32_bits(ring->wptr << 2) == 0x%08x\n", - ring->wptr_offs, - lower_32_bits(ring->wptr << 2), - upper_32_bits(ring->wptr << 2)); -@@ -782,9 +782,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) - - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), -- lower_32_bits(ring->wptr) << 2); -+ lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), -- upper_32_bits(ring->wptr) << 2); -+ upper_32_bits(ring->wptr << 2)); - } - - doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); -diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c -index e32efcfb0c8b1..f643b977b5f4c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c -+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c -@@ -287,8 +287,8 @@ static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring) - if (ring->use_doorbell) { - DRM_DEBUG("Using doorbell -- " - "wptr_offs == 0x%08x " -- "lower_32_bits(ring->wptr) << 2 == 0x%08x " -- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", -+ "lower_32_bits(ring->wptr << 2) == 0x%08x " -+ "upper_32_bits(ring->wptr << 2) == 0x%08x\n", - ring->wptr_offs, - lower_32_bits(ring->wptr << 2), - upper_32_bits(ring->wptr << 2)); -@@ -660,8 +660,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); - - if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ -- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); -- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); -+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); -+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); - } - - doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); -diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c -index e6d2f74a79765..fd34c2100bd96 100644 ---- a/drivers/gpu/drm/amd/amdgpu/si.c -+++ b/drivers/gpu/drm/amd/amdgpu/si.c -@@ -2276,17 +2276,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) - u16 bridge_cfg2, gpu_cfg2; - u32 max_lw, current_lw, tmp; - -- pcie_capability_read_word(root, PCI_EXP_LNKCTL, -- &bridge_cfg); -- pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL, -- &gpu_cfg); -- -- tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; -- pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16); -- -- tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; -- pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL, -- tmp16); -+ pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); -+ pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD); - - tmp = RREG32_PCIE(PCIE_LC_STATUS1); - max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; -@@ -2331,21 +2322,14 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) - - mdelay(100); - -- pcie_capability_read_word(root, PCI_EXP_LNKCTL, -- &tmp16); -- tmp16 &= ~PCI_EXP_LNKCTL_HAWD; -- tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); -- pcie_capability_write_word(root, PCI_EXP_LNKCTL, -- tmp16); -- -- pcie_capability_read_word(adev->pdev, -- PCI_EXP_LNKCTL, -- &tmp16); -- tmp16 &= ~PCI_EXP_LNKCTL_HAWD; -- tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); -- pcie_capability_write_word(adev->pdev, -- PCI_EXP_LNKCTL, -- tmp16); -+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL, -+ PCI_EXP_LNKCTL_HAWD, -+ bridge_cfg & -+ PCI_EXP_LNKCTL_HAWD); -+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL, -+ PCI_EXP_LNKCTL_HAWD, -+ gpu_cfg & -+ PCI_EXP_LNKCTL_HAWD); - - pcie_capability_read_word(root, PCI_EXP_LNKCTL2, - &tmp16); -@@ -2453,7 +2437,7 @@ static void si_program_aspm(struct amdgpu_device *adev) - bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; - bool disable_clkreq = false; - -- if (amdgpu_aspm == 0) -+ if (!amdgpu_device_should_use_aspm(adev)) - return; - - if (adev->flags & AMD_IS_APU) -diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c -index 0fc97c364fd76..529bb6c6ac6f5 100644 ---- a/drivers/gpu/drm/amd/amdgpu/soc15.c -+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c -@@ -461,8 +461,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, - *value = 0; - for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { - en = &soc15_allowed_read_registers[i]; -- if (adev->reg_offset[en->hwip][en->inst] && -- reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] -+ if (!adev->reg_offset[en->hwip][en->inst]) -+ continue; -+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] - + en->reg_offset)) - continue; - -@@ -607,8 +608,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) - static int soc15_asic_reset(struct amdgpu_device *adev) - { - /* original raven doesn't have full asic reset */ -- if ((adev->apu_flags & AMD_APU_IS_RAVEN) && -- !(adev->apu_flags & AMD_APU_IS_RAVEN2)) -+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) || -+ (adev->apu_flags & AMD_APU_IS_RAVEN2)) - return 0; - - switch (soc15_asic_reset_method(adev)) { -@@ -689,7 +690,7 @@ static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) - - static void soc15_program_aspm(struct amdgpu_device *adev) - { -- if (!amdgpu_aspm) -+ if (!amdgpu_device_should_use_aspm(adev)) - return; - - if (!(adev->flags & AMD_IS_APU) && -@@ -1273,8 +1274,11 @@ static int soc15_common_early_init(void *handle) - AMD_CG_SUPPORT_SDMA_LS | - AMD_CG_SUPPORT_VCN_MGCG; - -+ /* -+ * MMHUB PG needs to be disabled for Picasso for -+ * stability reasons. -+ */ - adev->pg_flags = AMD_PG_SUPPORT_SDMA | -- AMD_PG_SUPPORT_MMHUB | - AMD_PG_SUPPORT_VCN; - } else { - adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | -@@ -1413,22 +1417,17 @@ static int soc15_common_sw_fini(void *handle) - return 0; - } - --static void soc15_doorbell_range_init(struct amdgpu_device *adev) -+static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev) - { - int i; -- struct amdgpu_ring *ring; - -- /* sdma/ih doorbell range are programed by hypervisor */ -+ /* sdma doorbell range is programed by hypervisor */ - if (!amdgpu_sriov_vf(adev)) { - for (i = 0; i < adev->sdma.num_instances; i++) { -- ring = &adev->sdma.instance[i].ring; - adev->nbio.funcs->sdma_doorbell_range(adev, i, -- ring->use_doorbell, ring->doorbell_index, -+ true, adev->doorbell_index.sdma_engine[i] << 1, - adev->doorbell_index.sdma_doorbell_range); - } -- -- adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, -- adev->irq.ih.doorbell_index); - } - } - -@@ -1453,10 +1452,11 @@ static int soc15_common_hw_init(void *handle) - soc15_enable_doorbell_aperture(adev, true); - /* HW doorbell routing policy: doorbell writing not - * in SDMA/IH/MM/ACV range will be routed to CP. So -- * we need to init SDMA/IH/MM/ACV doorbell range prior -- * to CP ip block init and ring test. -+ * we need to init SDMA doorbell range prior -+ * to CP ip block init and ring test. IH already -+ * happens before CP. - */ -- soc15_doorbell_range_init(adev); -+ soc15_sdma_doorbell_range_init(adev); - - return 0; - } -diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c -index 7232241e3bfb2..0fef925b66024 100644 ---- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c -+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c -@@ -698,6 +698,19 @@ static int uvd_v3_1_hw_fini(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ cancel_delayed_work_sync(&adev->uvd.idle_work); -+ -+ if (RREG32(mmUVD_STATUS) != 0) -+ uvd_v3_1_stop(adev); -+ -+ return 0; -+} -+ -+static int uvd_v3_1_suspend(void *handle) -+{ -+ int r; -+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; -+ - /* - * Proper cleanups before halting the HW engine: - * - cancel the delayed idle work -@@ -722,17 +735,6 @@ static int uvd_v3_1_hw_fini(void *handle) - AMD_CG_STATE_GATE); - } - -- if (RREG32(mmUVD_STATUS) != 0) -- uvd_v3_1_stop(adev); -- -- return 0; --} -- --static int uvd_v3_1_suspend(void *handle) --{ -- int r; -- struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- - r = uvd_v3_1_hw_fini(adev); - if (r) - return r; -diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c -index 52d6de969f462..c108b83817951 100644 ---- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c -+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c -@@ -212,6 +212,19 @@ static int uvd_v4_2_hw_fini(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ cancel_delayed_work_sync(&adev->uvd.idle_work); -+ -+ if (RREG32(mmUVD_STATUS) != 0) -+ uvd_v4_2_stop(adev); -+ -+ return 0; -+} -+ -+static int uvd_v4_2_suspend(void *handle) -+{ -+ int r; -+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; -+ - /* - * Proper cleanups before halting the HW engine: - * - cancel the delayed idle work -@@ -236,17 +249,6 @@ static int uvd_v4_2_hw_fini(void *handle) - AMD_CG_STATE_GATE); - } - -- if (RREG32(mmUVD_STATUS) != 0) -- uvd_v4_2_stop(adev); -- -- return 0; --} -- --static int uvd_v4_2_suspend(void *handle) --{ -- int r; -- struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- - r = uvd_v4_2_hw_fini(adev); - if (r) - return r; -diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c -index db6d06758e4d4..563493d1f8306 100644 ---- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c -@@ -210,6 +210,19 @@ static int uvd_v5_0_hw_fini(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ cancel_delayed_work_sync(&adev->uvd.idle_work); -+ -+ if (RREG32(mmUVD_STATUS) != 0) -+ uvd_v5_0_stop(adev); -+ -+ return 0; -+} -+ -+static int uvd_v5_0_suspend(void *handle) -+{ -+ int r; -+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; -+ - /* - * Proper cleanups before halting the HW engine: - * - cancel the delayed idle work -@@ -234,17 +247,6 @@ static int uvd_v5_0_hw_fini(void *handle) - AMD_CG_STATE_GATE); - } - -- if (RREG32(mmUVD_STATUS) != 0) -- uvd_v5_0_stop(adev); -- -- return 0; --} -- --static int uvd_v5_0_suspend(void *handle) --{ -- int r; -- struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- - r = uvd_v5_0_hw_fini(adev); - if (r) - return r; -diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c -index bc571833632ea..72f8762907681 100644 ---- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c -@@ -543,6 +543,19 @@ static int uvd_v6_0_hw_fini(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ cancel_delayed_work_sync(&adev->uvd.idle_work); -+ -+ if (RREG32(mmUVD_STATUS) != 0) -+ uvd_v6_0_stop(adev); -+ -+ return 0; -+} -+ -+static int uvd_v6_0_suspend(void *handle) -+{ -+ int r; -+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; -+ - /* - * Proper cleanups before halting the HW engine: - * - cancel the delayed idle work -@@ -567,17 +580,6 @@ static int uvd_v6_0_hw_fini(void *handle) - AMD_CG_STATE_GATE); - } - -- if (RREG32(mmUVD_STATUS) != 0) -- uvd_v6_0_stop(adev); -- -- return 0; --} -- --static int uvd_v6_0_suspend(void *handle) --{ -- int r; -- struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- - r = uvd_v6_0_hw_fini(adev); - if (r) - return r; -diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c -index b6e82d75561f6..1fd9ca21a091b 100644 ---- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c -@@ -606,6 +606,23 @@ static int uvd_v7_0_hw_fini(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ cancel_delayed_work_sync(&adev->uvd.idle_work); -+ -+ if (!amdgpu_sriov_vf(adev)) -+ uvd_v7_0_stop(adev); -+ else { -+ /* full access mode, so don't touch any UVD register */ -+ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); -+ } -+ -+ return 0; -+} -+ -+static int uvd_v7_0_suspend(void *handle) -+{ -+ int r; -+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; -+ - /* - * Proper cleanups before halting the HW engine: - * - cancel the delayed idle work -@@ -630,21 +647,6 @@ static int uvd_v7_0_hw_fini(void *handle) - AMD_CG_STATE_GATE); - } - -- if (!amdgpu_sriov_vf(adev)) -- uvd_v7_0_stop(adev); -- else { -- /* full access mode, so don't touch any UVD register */ -- DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); -- } -- -- return 0; --} -- --static int uvd_v7_0_suspend(void *handle) --{ -- int r; -- struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- - r = uvd_v7_0_hw_fini(adev); - if (r) - return r; -diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c -index b70c17f0c52e8..98952fd387e73 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c -@@ -479,6 +479,17 @@ static int vce_v2_0_hw_fini(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ cancel_delayed_work_sync(&adev->vce.idle_work); -+ -+ return 0; -+} -+ -+static int vce_v2_0_suspend(void *handle) -+{ -+ int r; -+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; -+ -+ - /* - * Proper cleanups before halting the HW engine: - * - cancel the delayed idle work -@@ -502,14 +513,6 @@ static int vce_v2_0_hw_fini(void *handle) - AMD_CG_STATE_GATE); - } - -- return 0; --} -- --static int vce_v2_0_suspend(void *handle) --{ -- int r; -- struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- - r = vce_v2_0_hw_fini(adev); - if (r) - return r; -diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c -index 9de66893ccd6d..8fb5df7181e09 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c -@@ -490,6 +490,21 @@ static int vce_v3_0_hw_fini(void *handle) - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -+ cancel_delayed_work_sync(&adev->vce.idle_work); -+ -+ r = vce_v3_0_wait_for_idle(handle); -+ if (r) -+ return r; -+ -+ vce_v3_0_stop(adev); -+ return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); -+} -+ -+static int vce_v3_0_suspend(void *handle) -+{ -+ int r; -+ struct amdgpu_device *adev = (struct amdgpu_device *)handle; -+ - /* - * Proper cleanups before halting the HW engine: - * - cancel the delayed idle work -@@ -513,19 +528,6 @@ static int vce_v3_0_hw_fini(void *handle) - AMD_CG_STATE_GATE); - } - -- r = vce_v3_0_wait_for_idle(handle); -- if (r) -- return r; -- -- vce_v3_0_stop(adev); -- return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); --} -- --static int vce_v3_0_suspend(void *handle) --{ -- int r; -- struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- - r = vce_v3_0_hw_fini(adev); - if (r) - return r; -diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c -index fec902b800c28..70b8c88d30513 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c -@@ -542,29 +542,8 @@ static int vce_v4_0_hw_fini(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - -- /* -- * Proper cleanups before halting the HW engine: -- * - cancel the delayed idle work -- * - enable powergating -- * - enable clockgating -- * - disable dpm -- * -- * TODO: to align with the VCN implementation, move the -- * jobs for clockgating/powergating/dpm setting to -- * ->set_powergating_state(). -- */ - cancel_delayed_work_sync(&adev->vce.idle_work); - -- if (adev->pm.dpm_enabled) { -- amdgpu_dpm_enable_vce(adev, false); -- } else { -- amdgpu_asic_set_vce_clocks(adev, 0, 0); -- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, -- AMD_PG_STATE_GATE); -- amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, -- AMD_CG_STATE_GATE); -- } -- - if (!amdgpu_sriov_vf(adev)) { - /* vce_v4_0_wait_for_idle(handle); */ - vce_v4_0_stop(adev); -@@ -594,6 +573,29 @@ static int vce_v4_0_suspend(void *handle) - drm_dev_exit(idx); - } - -+ /* -+ * Proper cleanups before halting the HW engine: -+ * - cancel the delayed idle work -+ * - enable powergating -+ * - enable clockgating -+ * - disable dpm -+ * -+ * TODO: to align with the VCN implementation, move the -+ * jobs for clockgating/powergating/dpm setting to -+ * ->set_powergating_state(). -+ */ -+ cancel_delayed_work_sync(&adev->vce.idle_work); -+ -+ if (adev->pm.dpm_enabled) { -+ amdgpu_dpm_enable_vce(adev, false); -+ } else { -+ amdgpu_asic_set_vce_clocks(adev, 0, 0); -+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, -+ AMD_PG_STATE_GATE); -+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, -+ AMD_CG_STATE_GATE); -+ } -+ - r = vce_v4_0_hw_fini(adev); - if (r) - return r; -diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c -index 121ee9f2b8d16..462008d506904 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c -@@ -253,6 +253,13 @@ static int vcn_v1_0_suspend(void *handle) - { - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; -+ bool idle_work_unexecuted; -+ -+ idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work); -+ if (idle_work_unexecuted) { -+ if (adev->pm.dpm_enabled) -+ amdgpu_dpm_enable_uvd(adev, false); -+ } - - r = vcn_v1_0_hw_fini(adev); - if (r) -diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c -index f4686e918e0d1..c405075a572c1 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c -@@ -22,6 +22,7 @@ - */ - - #include -+#include - - #include "amdgpu.h" - #include "amdgpu_vcn.h" -@@ -192,11 +193,14 @@ static int vcn_v2_0_sw_init(void *handle) - */ - static int vcn_v2_0_sw_fini(void *handle) - { -- int r; -+ int r, idx; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; - -- fw_shared->present_flag_0 = 0; -+ if (drm_dev_enter(&adev->ddev, &idx)) { -+ fw_shared->present_flag_0 = 0; -+ drm_dev_exit(idx); -+ } - - amdgpu_virt_free_mm_table(adev); - -diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c -index e0c0c3734432e..a0956d8623770 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c -+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c -@@ -22,6 +22,7 @@ - */ - - #include -+#include - - #include "amdgpu.h" - #include "amdgpu_vcn.h" -@@ -233,17 +234,21 @@ static int vcn_v2_5_sw_init(void *handle) - */ - static int vcn_v2_5_sw_fini(void *handle) - { -- int i, r; -+ int i, r, idx; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - volatile struct amdgpu_fw_shared *fw_shared; - -- for (i = 0; i < adev->vcn.num_vcn_inst; i++) { -- if (adev->vcn.harvest_config & (1 << i)) -- continue; -- fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; -- fw_shared->present_flag_0 = 0; -+ if (drm_dev_enter(&adev->ddev, &idx)) { -+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) { -+ if (adev->vcn.harvest_config & (1 << i)) -+ continue; -+ fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; -+ fw_shared->present_flag_0 = 0; -+ } -+ drm_dev_exit(idx); - } - -+ - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_free_mm_table(adev); - -diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c -index 3d18aab88b4e2..1310617f030f7 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c -@@ -601,8 +601,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); - - /* VCN global tiling registers */ -- WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( -- UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); -+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( -+ UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); - } - - static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) -@@ -1508,8 +1508,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) - - static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) - { -+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; - uint32_t tmp; - -+ vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state); -+ - /* Wait for power status to be 1 */ - SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); -diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c -index a9ca6988009e3..73728fa859970 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c -+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c -@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) - } - } - -+ if (!amdgpu_sriov_vf(adev)) -+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, -+ adev->irq.ih.doorbell_index); -+ - pci_set_master(adev->pdev); - - /* enable interrupts */ -diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c -index f51dfc38ac656..ac34af4cb178c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c -+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c -@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) - } - } - -+ if (!amdgpu_sriov_vf(adev)) -+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, -+ adev->irq.ih.doorbell_index); -+ - pci_set_master(adev->pdev); - - /* enable interrupts */ -diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c -index fe9a7cc8d9eb0..b9555ba6d32fb 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vi.c -+++ b/drivers/gpu/drm/amd/amdgpu/vi.c -@@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev) - u32 reference_clock = adev->clock.spll.reference_freq; - u32 tmp; - -- if (adev->flags & AMD_IS_APU) -- return reference_clock; -+ if (adev->flags & AMD_IS_APU) { -+ switch (adev->asic_type) { -+ case CHIP_STONEY: -+ /* vbios says 48Mhz, but the actual freq is 100Mhz */ -+ return 10000; -+ default: -+ return reference_clock; -+ } -+ } - - tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); - if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) -@@ -956,6 +963,10 @@ static int vi_asic_reset(struct amdgpu_device *adev) - { - int r; - -+ /* APUs don't have full asic reset */ -+ if (adev->flags & AMD_IS_APU) -+ return 0; -+ - if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { - dev_info(adev->dev, "BACO reset\n"); - r = amdgpu_dpm_baco_reset(adev); -@@ -1136,7 +1147,7 @@ static void vi_program_aspm(struct amdgpu_device *adev) - bool bL1SS = false; - bool bClkReqSupport = true; - -- if (!amdgpu_aspm) -+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) - return; - - if (adev->flags & AMD_IS_APU || -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c -index 86afd37b098d6..6688129df240e 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c -@@ -1807,13 +1807,9 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data) - if (!args->start_addr || !args->size) - return -EINVAL; - -- mutex_lock(&p->mutex); -- - r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr, - args->attrs); - -- mutex_unlock(&p->mutex); -- - return r; - } - #else -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c -index cfedfb1e8596c..e574aa32a111d 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c -@@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, - return -ENODEV; - /* same everything but the other direction */ - props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL); -+ if (!props2) -+ return -ENOMEM; -+ - props2->node_from = id_to; - props2->node_to = id_from; - props2->kobj = NULL; -@@ -1560,7 +1563,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) - /* Fetch the CRAT table from ACPI */ - status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table); - if (status == AE_NOT_FOUND) { -- pr_warn("CRAT table not found\n"); -+ pr_info("CRAT table not found\n"); - return -ENODATA; - } else if (ACPI_FAILURE(status)) { - const char *err = acpi_format_exception(status); -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c -index 4a416231b24c8..660eb7097cfc0 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c -@@ -834,15 +834,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, - } - - /* Verify module parameters regarding mapped process number*/ -- if ((hws_max_conc_proc < 0) -- || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { -- dev_err(kfd_device, -- "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n", -- hws_max_conc_proc, kfd->vm_info.vmid_num_kfd, -- kfd->vm_info.vmid_num_kfd); -+ if (hws_max_conc_proc >= 0) -+ kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); -+ else - kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; -- } else -- kfd->max_proc_per_quantum = hws_max_conc_proc; - - /* calculate max size of mqds needed for queues */ - size = max_num_of_queues_per_device * -@@ -916,6 +911,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, - kfd_double_confirm_iommu_support(kfd); - - if (kfd_iommu_device_init(kfd)) { -+ kfd->use_iommu_v2 = false; - dev_err(kfd_device, "Error initializing iommuv2\n"); - goto device_iommu_error; - } -@@ -924,6 +920,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, - - svm_migrate_init((struct amdgpu_device *)kfd->kgd); - -+ if(kgd2kfd_resume_iommu(kfd)) -+ goto device_iommu_error; -+ - if (kfd_resume(kfd)) - goto kfd_resume_error; - -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c -index f8fce9d05f50c..442857f3bde77 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c -@@ -138,19 +138,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, - } - - static void increment_queue_count(struct device_queue_manager *dqm, -- enum kfd_queue_type type) -+ struct qcm_process_device *qpd, -+ struct queue *q) - { - dqm->active_queue_count++; -- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) -+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || -+ q->properties.type == KFD_QUEUE_TYPE_DIQ) - dqm->active_cp_queue_count++; -+ -+ if (q->properties.is_gws) { -+ dqm->gws_queue_count++; -+ qpd->mapped_gws_queue = true; -+ } - } - - static void decrement_queue_count(struct device_queue_manager *dqm, -- enum kfd_queue_type type) -+ struct qcm_process_device *qpd, -+ struct queue *q) - { - dqm->active_queue_count--; -- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) -+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || -+ q->properties.type == KFD_QUEUE_TYPE_DIQ) - dqm->active_cp_queue_count--; -+ -+ if (q->properties.is_gws) { -+ dqm->gws_queue_count--; -+ qpd->mapped_gws_queue = false; -+ } - } - - static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) -@@ -390,7 +404,7 @@ add_queue_to_list: - list_add(&q->list, &qpd->queues_list); - qpd->queue_count++; - if (q->properties.is_active) -- increment_queue_count(dqm, q->properties.type); -+ increment_queue_count(dqm, qpd, q); - - /* - * Unconditionally increment this counter, regardless of the queue's -@@ -515,13 +529,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, - deallocate_vmid(dqm, qpd, q); - } - qpd->queue_count--; -- if (q->properties.is_active) { -- decrement_queue_count(dqm, q->properties.type); -- if (q->properties.is_gws) { -- dqm->gws_queue_count--; -- qpd->mapped_gws_queue = false; -- } -- } -+ if (q->properties.is_active) -+ decrement_queue_count(dqm, qpd, q); - - return retval; - } -@@ -613,12 +622,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) - * dqm->active_queue_count to determine whether a new runlist must be - * uploaded. - */ -- if (q->properties.is_active && !prev_active) -- increment_queue_count(dqm, q->properties.type); -- else if (!q->properties.is_active && prev_active) -- decrement_queue_count(dqm, q->properties.type); -- -- if (q->gws && !q->properties.is_gws) { -+ if (q->properties.is_active && !prev_active) { -+ increment_queue_count(dqm, &pdd->qpd, q); -+ } else if (!q->properties.is_active && prev_active) { -+ decrement_queue_count(dqm, &pdd->qpd, q); -+ } else if (q->gws && !q->properties.is_gws) { - if (q->properties.is_active) { - dqm->gws_queue_count++; - pdd->qpd.mapped_gws_queue = true; -@@ -680,11 +688,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, - mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( - q->properties.type)]; - q->properties.is_active = false; -- decrement_queue_count(dqm, q->properties.type); -- if (q->properties.is_gws) { -- dqm->gws_queue_count--; -- qpd->mapped_gws_queue = false; -- } -+ decrement_queue_count(dqm, qpd, q); - - if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) - continue; -@@ -730,7 +734,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, - continue; - - q->properties.is_active = false; -- decrement_queue_count(dqm, q->properties.type); -+ decrement_queue_count(dqm, qpd, q); - } - pdd->last_evict_timestamp = get_jiffies_64(); - retval = execute_queues_cpsch(dqm, -@@ -801,11 +805,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, - mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( - q->properties.type)]; - q->properties.is_active = true; -- increment_queue_count(dqm, q->properties.type); -- if (q->properties.is_gws) { -- dqm->gws_queue_count++; -- qpd->mapped_gws_queue = true; -- } -+ increment_queue_count(dqm, qpd, q); - - if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) - continue; -@@ -863,7 +863,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, - continue; - - q->properties.is_active = true; -- increment_queue_count(dqm, q->properties.type); -+ increment_queue_count(dqm, &pdd->qpd, q); - } - retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); -@@ -1225,6 +1225,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) - bool hanging; - - dqm_lock(dqm); -+ if (!dqm->sched_running) { -+ dqm_unlock(dqm); -+ return 0; -+ } -+ - if (!dqm->is_hws_hang) - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); - hanging = dqm->is_hws_hang || dqm->is_resetting; -@@ -1260,7 +1265,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, - dqm->total_queue_count); - - list_add(&kq->list, &qpd->priv_queue_list); -- increment_queue_count(dqm, kq->queue->properties.type); -+ increment_queue_count(dqm, qpd, kq->queue); - qpd->is_debug = true; - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); - dqm_unlock(dqm); -@@ -1274,7 +1279,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, - { - dqm_lock(dqm); - list_del(&kq->list); -- decrement_queue_count(dqm, kq->queue->properties.type); -+ decrement_queue_count(dqm, qpd, kq->queue); - qpd->is_debug = false; - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); - /* -@@ -1341,7 +1346,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, - qpd->queue_count++; - - if (q->properties.is_active) { -- increment_queue_count(dqm, q->properties.type); -+ increment_queue_count(dqm, qpd, q); - - execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); -@@ -1543,15 +1548,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, - list_del(&q->list); - qpd->queue_count--; - if (q->properties.is_active) { -- decrement_queue_count(dqm, q->properties.type); -+ decrement_queue_count(dqm, qpd, q); - retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); - if (retval == -ETIME) - qpd->reset_wavefronts = true; -- if (q->properties.is_gws) { -- dqm->gws_queue_count--; -- qpd->mapped_gws_queue = false; -- } - } - - /* -@@ -1742,7 +1743,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, - /* Clean all kernel queues */ - list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { - list_del(&kq->list); -- decrement_queue_count(dqm, kq->queue->properties.type); -+ decrement_queue_count(dqm, qpd, kq->queue); - qpd->is_debug = false; - dqm->total_queue_count--; - filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; -@@ -1755,13 +1756,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - deallocate_sdma_queue(dqm, q); - -- if (q->properties.is_active) { -- decrement_queue_count(dqm, q->properties.type); -- if (q->properties.is_gws) { -- dqm->gws_queue_count--; -- qpd->mapped_gws_queue = false; -- } -- } -+ if (q->properties.is_active) -+ decrement_queue_count(dqm, qpd, q); - - dqm->total_queue_count--; - } -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c -index 3eea4edee355d..8b5c82af2acd7 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c -@@ -528,14 +528,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) - struct kfd_event_waiter *event_waiters; - uint32_t i; - -- event_waiters = kmalloc_array(num_events, -- sizeof(struct kfd_event_waiter), -- GFP_KERNEL); -+ event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter), -+ GFP_KERNEL); -+ if (!event_waiters) -+ return NULL; - -- for (i = 0; (event_waiters) && (i < num_events) ; i++) { -+ for (i = 0; i < num_events; i++) - init_wait(&event_waiters[i].wait); -- event_waiters[i].activated = false; -- } - - return event_waiters; - } -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c -index 4a16e3c257b92..131d98c600eed 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c -@@ -780,7 +780,7 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, - static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) - { - unsigned long addr = vmf->address; -- struct vm_area_struct *vma; -+ struct svm_range_bo *svm_bo; - enum svm_work_list_ops op; - struct svm_range *parent; - struct svm_range *prange; -@@ -788,24 +788,42 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) - struct mm_struct *mm; - int r = 0; - -- vma = vmf->vma; -- mm = vma->vm_mm; -+ svm_bo = vmf->page->zone_device_data; -+ if (!svm_bo) { -+ pr_debug("failed get device page at addr 0x%lx\n", addr); -+ return VM_FAULT_SIGBUS; -+ } -+ if (!mmget_not_zero(svm_bo->eviction_fence->mm)) { -+ pr_debug("addr 0x%lx of process mm is detroyed\n", addr); -+ return VM_FAULT_SIGBUS; -+ } - -- p = kfd_lookup_process_by_mm(vma->vm_mm); -+ mm = svm_bo->eviction_fence->mm; -+ if (mm != vmf->vma->vm_mm) -+ pr_debug("addr 0x%lx is COW mapping in child process\n", addr); -+ -+ p = kfd_lookup_process_by_mm(mm); - if (!p) { - pr_debug("failed find process at fault address 0x%lx\n", addr); -- return VM_FAULT_SIGBUS; -+ r = VM_FAULT_SIGBUS; -+ goto out_mmput; - } -- addr >>= PAGE_SHIFT; -+ if (READ_ONCE(p->svms.faulting_task) == current) { -+ pr_debug("skipping ram migration\n"); -+ r = 0; -+ goto out_unref_process; -+ } -+ - pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); -+ addr >>= PAGE_SHIFT; - - mutex_lock(&p->svms.lock); - - prange = svm_range_from_addr(&p->svms, addr, &parent); - if (!prange) { -- pr_debug("cannot find svm range at 0x%lx\n", addr); -+ pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); - r = -EFAULT; -- goto out; -+ goto out_unlock_svms; - } - - mutex_lock(&parent->migrate_mutex); -@@ -827,10 +845,10 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) - goto out_unlock_prange; - } - -- r = svm_migrate_vram_to_ram(prange, mm); -+ r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm); - if (r) -- pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r, -- prange, prange->start, prange->last); -+ pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", -+ r, prange->svms, prange, prange->start, prange->last); - - /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ - if (p->xnack_enabled && parent == prange) -@@ -844,12 +862,13 @@ out_unlock_prange: - if (prange != parent) - mutex_unlock(&prange->migrate_mutex); - mutex_unlock(&parent->migrate_mutex); --out: -+out_unlock_svms: - mutex_unlock(&p->svms.lock); -- kfd_unref_process(p); -- -+out_unref_process: - pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); -- -+ kfd_unref_process(p); -+out_mmput: -+ mmput(mm); - return r ? VM_FAULT_SIGBUS : 0; - } - -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c -index 7f4e102ff4bd3..ddaafcd7b8256 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c -@@ -113,18 +113,19 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, - &(mqd_mem_obj->gtt_mem), - &(mqd_mem_obj->gpu_addr), - (void *)&(mqd_mem_obj->cpu_ptr), true); -+ -+ if (retval) { -+ kfree(mqd_mem_obj); -+ return NULL; -+ } - } else { - retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd), - &mqd_mem_obj); -- } -- -- if (retval) { -- kfree(mqd_mem_obj); -- return NULL; -+ if (retval) -+ return NULL; - } - - return mqd_mem_obj; -- - } - - static void init_mqd(struct mqd_manager *mm, void **mqd, -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h -index 6d8f9bb2d9057..47ec820cae72b 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h -@@ -755,6 +755,7 @@ struct svm_range_list { - atomic_t evicted_ranges; - struct delayed_work restore_work; - DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); -+ struct task_struct *faulting_task; - }; - - /* Process data */ -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c -index ed4bc5f844ce7..766b3660c8c86 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c -@@ -270,15 +270,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) - return ret; - } - -- ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, -- O_RDWR); -- if (ret < 0) { -- kfifo_free(&client->fifo); -- kfree(client); -- return ret; -- } -- *fd = ret; -- - init_waitqueue_head(&client->wait_queue); - spin_lock_init(&client->lock); - client->events = 0; -@@ -288,5 +279,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) - list_add_rcu(&client->list, &dev->smi_clients); - spin_unlock(&dev->smi_lock); - -+ ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, -+ O_RDWR); -+ if (ret < 0) { -+ spin_lock(&dev->smi_lock); -+ list_del_rcu(&client->list); -+ spin_unlock(&dev->smi_lock); -+ -+ synchronize_rcu(); -+ -+ kfifo_free(&client->fifo); -+ kfree(client); -+ return ret; -+ } -+ *fd = ret; -+ - return 0; - } -diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c -index 9d0f65a90002d..22a70aaccf13c 100644 ---- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c -+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c -@@ -936,7 +936,7 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, - } - - static int --svm_range_split_tail(struct svm_range *prange, struct svm_range *new, -+svm_range_split_tail(struct svm_range *prange, - uint64_t new_last, struct list_head *insert_list) - { - struct svm_range *tail; -@@ -948,7 +948,7 @@ svm_range_split_tail(struct svm_range *prange, struct svm_range *new, - } - - static int --svm_range_split_head(struct svm_range *prange, struct svm_range *new, -+svm_range_split_head(struct svm_range *prange, - uint64_t new_start, struct list_head *insert_list) - { - struct svm_range *head; -@@ -1307,7 +1307,7 @@ struct svm_validate_context { - struct svm_range *prange; - bool intr; - unsigned long bitmap[MAX_GPU_INSTANCE]; -- struct ttm_validate_buffer tv[MAX_GPU_INSTANCE+1]; -+ struct ttm_validate_buffer tv[MAX_GPU_INSTANCE]; - struct list_head validate_list; - struct ww_acquire_ctx ticket; - }; -@@ -1334,11 +1334,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) - ctx->tv[gpuidx].num_shared = 4; - list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); - } -- if (ctx->prange->svm_bo && ctx->prange->ttm_res) { -- ctx->tv[MAX_GPU_INSTANCE].bo = &ctx->prange->svm_bo->bo->tbo; -- ctx->tv[MAX_GPU_INSTANCE].num_shared = 1; -- list_add(&ctx->tv[MAX_GPU_INSTANCE].head, &ctx->validate_list); -- } - - r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list, - ctx->intr, NULL); -@@ -1494,9 +1489,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm, - - next = min(vma->vm_end, end); - npages = (next - addr) >> PAGE_SHIFT; -+ WRITE_ONCE(p->svms.faulting_task, current); - r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, - addr, npages, &hmm_range, - readonly, true, owner); -+ WRITE_ONCE(p->svms.faulting_task, NULL); - if (r) { - pr_debug("failed %d to get svm range pages\n", r); - goto unreserve_out; -@@ -1570,7 +1567,6 @@ retry_flush_work: - static void svm_range_restore_work(struct work_struct *work) - { - struct delayed_work *dwork = to_delayed_work(work); -- struct amdkfd_process_info *process_info; - struct svm_range_list *svms; - struct svm_range *prange; - struct kfd_process *p; -@@ -1590,12 +1586,10 @@ static void svm_range_restore_work(struct work_struct *work) - * the lifetime of this thread, kfd_process and mm will be valid. - */ - p = container_of(svms, struct kfd_process, svms); -- process_info = p->kgd_process_info; - mm = p->mm; - if (!mm) - return; - -- mutex_lock(&process_info->lock); - svm_range_list_lock_and_flush_work(svms, mm); - mutex_lock(&svms->lock); - -@@ -1648,7 +1642,6 @@ static void svm_range_restore_work(struct work_struct *work) - out_reschedule: - mutex_unlock(&svms->lock); - mmap_write_unlock(mm); -- mutex_unlock(&process_info->lock); - - /* If validation failed, reschedule another attempt */ - if (evicted_ranges) { -@@ -1764,49 +1757,54 @@ static struct svm_range *svm_range_clone(struct svm_range *old) - } - - /** -- * svm_range_handle_overlap - split overlap ranges -- * @svms: svm range list header -- * @new: range added with this attributes -- * @start: range added start address, in pages -- * @last: range last address, in pages -- * @update_list: output, the ranges attributes are updated. For set_attr, this -- * will do validation and map to GPUs. For unmap, this will be -- * removed and unmap from GPUs -- * @insert_list: output, the ranges will be inserted into svms, attributes are -- * not changes. For set_attr, this will add into svms. -- * @remove_list:output, the ranges will be removed from svms -- * @left: the remaining range after overlap, For set_attr, this will be added -- * as new range. -+ * svm_range_add - add svm range and handle overlap -+ * @p: the range add to this process svms -+ * @start: page size aligned -+ * @size: page size aligned -+ * @nattr: number of attributes -+ * @attrs: array of attributes -+ * @update_list: output, the ranges need validate and update GPU mapping -+ * @insert_list: output, the ranges need insert to svms -+ * @remove_list: output, the ranges are replaced and need remove from svms - * -- * Total have 5 overlap cases. -+ * Check if the virtual address range has overlap with any existing ranges, -+ * split partly overlapping ranges and add new ranges in the gaps. All changes -+ * should be applied to the range_list and interval tree transactionally. If -+ * any range split or allocation fails, the entire update fails. Therefore any -+ * existing overlapping svm_ranges are cloned and the original svm_ranges left -+ * unchanged. - * -- * This function handles overlap of an address interval with existing -- * struct svm_ranges for applying new attributes. This may require -- * splitting existing struct svm_ranges. All changes should be applied to -- * the range_list and interval tree transactionally. If any split operation -- * fails, the entire update fails. Therefore the existing overlapping -- * svm_ranges are cloned and the original svm_ranges left unchanged. If the -- * transaction succeeds, the modified clones are added and the originals -- * freed. Otherwise the clones are removed and the old svm_ranges remain. -+ * If the transaction succeeds, the caller can update and insert clones and -+ * new ranges, then free the originals. - * -- * Context: The caller must hold svms->lock -+ * Otherwise the caller can free the clones and new ranges, while the old -+ * svm_ranges remain unchanged. -+ * -+ * Context: Process context, caller must hold svms->lock -+ * -+ * Return: -+ * 0 - OK, otherwise error code - */ - static int --svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, -- unsigned long start, unsigned long last, -- struct list_head *update_list, -- struct list_head *insert_list, -- struct list_head *remove_list, -- unsigned long *left) -+svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, -+ uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, -+ struct list_head *update_list, struct list_head *insert_list, -+ struct list_head *remove_list) - { -+ unsigned long last = start + size - 1UL; -+ struct svm_range_list *svms = &p->svms; - struct interval_tree_node *node; -+ struct svm_range new = {0}; - struct svm_range *prange; - struct svm_range *tmp; - int r = 0; - -+ pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); -+ - INIT_LIST_HEAD(update_list); - INIT_LIST_HEAD(insert_list); - INIT_LIST_HEAD(remove_list); -+ svm_range_apply_attrs(p, &new, nattr, attrs); - - node = interval_tree_iter_first(&svms->objects, start, last); - while (node) { -@@ -1834,14 +1832,14 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, - - if (node->start < start) { - pr_debug("change old range start\n"); -- r = svm_range_split_head(prange, new, start, -+ r = svm_range_split_head(prange, start, - insert_list); - if (r) - goto out; - } - if (node->last > last) { - pr_debug("change old range last\n"); -- r = svm_range_split_tail(prange, new, last, -+ r = svm_range_split_tail(prange, last, - insert_list); - if (r) - goto out; -@@ -1853,7 +1851,7 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, - prange = old; - } - -- if (!svm_range_is_same_attrs(prange, new)) -+ if (!svm_range_is_same_attrs(prange, &new)) - list_add(&prange->update_list, update_list); - - /* insert a new node if needed */ -@@ -1873,8 +1871,16 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, - start = next_start; - } - -- if (left && start <= last) -- *left = last - start + 1; -+ /* add a final range at the end if needed */ -+ if (start <= last) { -+ prange = svm_range_new(svms, start, last); -+ if (!prange) { -+ r = -ENOMEM; -+ goto out; -+ } -+ list_add(&prange->insert_list, insert_list); -+ list_add(&prange->update_list, update_list); -+ } - - out: - if (r) -@@ -2177,6 +2183,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, - - if (range->event == MMU_NOTIFY_RELEASE) - return true; -+ if (!mmget_not_zero(mni->mm)) -+ return true; - - start = mni->interval_tree.start; - last = mni->interval_tree.last; -@@ -2203,6 +2211,7 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, - } - - svm_range_unlock(prange); -+ mmput(mni->mm); - - return true; - } -@@ -2702,59 +2711,6 @@ svm_range_is_valid(struct mm_struct *mm, uint64_t start, uint64_t size) - return true; - } - --/** -- * svm_range_add - add svm range and handle overlap -- * @p: the range add to this process svms -- * @start: page size aligned -- * @size: page size aligned -- * @nattr: number of attributes -- * @attrs: array of attributes -- * @update_list: output, the ranges need validate and update GPU mapping -- * @insert_list: output, the ranges need insert to svms -- * @remove_list: output, the ranges are replaced and need remove from svms -- * -- * Check if the virtual address range has overlap with the registered ranges, -- * split the overlapped range, copy and adjust pages address and vram nodes in -- * old and new ranges. -- * -- * Context: Process context, caller must hold svms->lock -- * -- * Return: -- * 0 - OK, otherwise error code -- */ --static int --svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, -- uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, -- struct list_head *update_list, struct list_head *insert_list, -- struct list_head *remove_list) --{ -- uint64_t last = start + size - 1UL; -- struct svm_range_list *svms; -- struct svm_range new = {0}; -- struct svm_range *prange; -- unsigned long left = 0; -- int r = 0; -- -- pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last); -- -- svm_range_apply_attrs(p, &new, nattr, attrs); -- -- svms = &p->svms; -- -- r = svm_range_handle_overlap(svms, &new, start, last, update_list, -- insert_list, remove_list, &left); -- if (r) -- return r; -- -- if (left) { -- prange = svm_range_new(svms, last - left + 1, last); -- list_add(&prange->insert_list, insert_list); -- list_add(&prange->update_list, update_list); -- } -- -- return 0; --} -- - /** - * svm_range_best_prefetch_location - decide the best prefetch location - * @prange: svm range structure -@@ -2979,7 +2935,6 @@ static int - svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, - uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) - { -- struct amdkfd_process_info *process_info = p->kgd_process_info; - struct mm_struct *mm = current->mm; - struct list_head update_list; - struct list_head insert_list; -@@ -2998,8 +2953,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, - - svms = &p->svms; - -- mutex_lock(&process_info->lock); -- - svm_range_list_lock_and_flush_work(svms, mm); - - if (!svm_range_is_valid(mm, start, size)) { -@@ -3075,8 +3028,6 @@ out_unlock_range: - mutex_unlock(&svms->lock); - mmap_read_unlock(mm); - out: -- mutex_unlock(&process_info->lock); -- - pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, - &p->svms, start, start + size - 1, r); - -diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig -index 127667e549c19..f25a2c80afcfd 100644 ---- a/drivers/gpu/drm/amd/display/Kconfig -+++ b/drivers/gpu/drm/amd/display/Kconfig -@@ -5,6 +5,7 @@ menu "Display Engine Configuration" - config DRM_AMD_DC - bool "AMD DC - Enable new display engine" - default y -+ depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64 - select SND_HDA_COMPONENT if SND_HDA_CORE - select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) - help -@@ -12,6 +13,12 @@ config DRM_AMD_DC - support for AMDGPU. This adds required support for Vega and - Raven ASICs. - -+ calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 || ARM64) -+ architectures built with Clang (all released versions), whereby the stack -+ frame gets blown up to well over 5k. This would cause an immediate kernel -+ panic on most architectures. We'll revert this when the following bug report -+ has been resolved: https://github.com/llvm/llvm-project/issues/41896. -+ - config DRM_AMD_DC_DCN - def_bool n - help -diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c -index 1ea31dcc7a8b0..4cf33abfb7cca 100644 ---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c -+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c -@@ -70,6 +70,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -215,6 +216,8 @@ static void handle_cursor_update(struct drm_plane *plane, - static const struct drm_format_info * - amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); - -+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); -+ - static bool - is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, - struct drm_crtc_state *new_crtc_state); -@@ -350,6 +353,35 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, - return false; - } - -+/** -+ * update_planes_and_stream_adapter() - Send planes to be updated in DC -+ * -+ * DC has a generic way to update planes and stream via -+ * dc_update_planes_and_stream function; however, DM might need some -+ * adjustments and preparation before calling it. This function is a wrapper -+ * for the dc_update_planes_and_stream that does any required configuration -+ * before passing control to DC. -+ */ -+static inline bool update_planes_and_stream_adapter(struct dc *dc, -+ int update_type, -+ int planes_count, -+ struct dc_stream_state *stream, -+ struct dc_stream_update *stream_update, -+ struct dc_surface_update *array_of_surface_update) -+{ -+ /* -+ * Previous frame finished and HW is ready for optimization. -+ */ -+ if (update_type == UPDATE_TYPE_FAST) -+ dc_post_update_surfaces_to_stream(dc); -+ -+ return dc_update_planes_and_stream(dc, -+ array_of_surface_update, -+ planes_count, -+ stream, -+ stream_update); -+} -+ - /** - * dm_pflip_high_irq() - Handle pageflip interrupt - * @interrupt_params: ignored -@@ -618,6 +650,113 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) - } - #endif - -+/** -+ * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. -+ * @adev: amdgpu_device pointer -+ * @notify: dmub notification structure -+ * -+ * Dmub AUX or SET_CONFIG command completion processing callback -+ * Copies dmub notification to DM which is to be read by AUX command. -+ * issuing thread and also signals the event to wake up the thread. -+ */ -+void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) -+{ -+ if (adev->dm.dmub_notify) -+ memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); -+ if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) -+ complete(&adev->dm.dmub_aux_transfer_done); -+} -+ -+/** -+ * dmub_hpd_callback - DMUB HPD interrupt processing callback. -+ * @adev: amdgpu_device pointer -+ * @notify: dmub notification structure -+ * -+ * Dmub Hpd interrupt processing callback. Gets displayindex through the -+ * ink index and calls helper to do the processing. -+ */ -+void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) -+{ -+ struct amdgpu_dm_connector *aconnector; -+ struct drm_connector *connector; -+ struct drm_connector_list_iter iter; -+ struct dc_link *link; -+ uint8_t link_index = 0; -+ struct drm_device *dev; -+ -+ if (adev == NULL) -+ return; -+ -+ if (notify == NULL) { -+ DRM_ERROR("DMUB HPD callback notification was NULL"); -+ return; -+ } -+ -+ if (notify->link_index > adev->dm.dc->link_count) { -+ DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); -+ return; -+ } -+ -+ link_index = notify->link_index; -+ link = adev->dm.dc->links[link_index]; -+ dev = adev->dm.ddev; -+ -+ drm_connector_list_iter_begin(dev, &iter); -+ drm_for_each_connector_iter(connector, &iter) { -+ aconnector = to_amdgpu_dm_connector(connector); -+ if (link && aconnector->dc_link == link) { -+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); -+ handle_hpd_irq_helper(aconnector); -+ break; -+ } -+ } -+ drm_connector_list_iter_end(&iter); -+ -+} -+ -+/** -+ * register_dmub_notify_callback - Sets callback for DMUB notify -+ * @adev: amdgpu_device pointer -+ * @type: Type of dmub notification -+ * @callback: Dmub interrupt callback function -+ * @dmub_int_thread_offload: offload indicator -+ * -+ * API to register a dmub callback handler for a dmub notification -+ * Also sets indicator whether callback processing to be offloaded. -+ * to dmub interrupt handling thread -+ * Return: true if successfully registered, false if there is existing registration -+ */ -+bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, -+dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) -+{ -+ if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { -+ adev->dm.dmub_callback[type] = callback; -+ adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; -+ } else -+ return false; -+ -+ return true; -+} -+ -+static void dm_handle_hpd_work(struct work_struct *work) -+{ -+ struct dmub_hpd_work *dmub_hpd_wrk; -+ -+ dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); -+ -+ if (!dmub_hpd_wrk->dmub_notify) { -+ DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); -+ return; -+ } -+ -+ if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { -+ dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, -+ dmub_hpd_wrk->dmub_notify); -+ } -+ kfree(dmub_hpd_wrk); -+ -+} -+ - #define DMUB_TRACE_MAX_READ 64 - /** - * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt -@@ -634,18 +773,33 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) - struct amdgpu_display_manager *dm = &adev->dm; - struct dmcub_trace_buf_entry entry = { 0 }; - uint32_t count = 0; -+ struct dmub_hpd_work *dmub_hpd_wrk; - - if (dc_enable_dmub_notifications(adev->dm.dc)) { -+ dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); -+ if (!dmub_hpd_wrk) { -+ DRM_ERROR("Failed to allocate dmub_hpd_wrk"); -+ return; -+ } -+ INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); -+ - if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { - do { - dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); -- } while (notify.pending_notification); -+ if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { -+ DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type, -+ ARRAY_SIZE(dm->dmub_thread_offload)); -+ continue; -+ } -+ if (dm->dmub_thread_offload[notify.type] == true) { -+ dmub_hpd_wrk->dmub_notify = ¬ify; -+ dmub_hpd_wrk->adev = adev; -+ queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); -+ } else { -+ dm->dmub_callback[notify.type](adev, ¬ify); -+ } - -- if (adev->dm.dmub_notify) -- memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification)); -- if (notify.type == DMUB_NOTIFICATION_AUX_REPLY) -- complete(&adev->dm.dmub_aux_transfer_done); -- // TODO : HPD Implementation -+ } while (notify.pending_notification); - - } else { - DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); -@@ -900,6 +1054,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) - return 0; - } - -+ /* Reset DMCUB if it was previously running - before we overwrite its memory. */ -+ status = dmub_srv_hw_reset(dmub_srv); -+ if (status != DMUB_STATUS_OK) -+ DRM_WARN("Error resetting DMUB HW: %d\n", status); -+ - hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; - - fw_inst_const = dmub_fw->data + -@@ -989,6 +1148,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) - return 0; - } - -+static void dm_dmub_hw_resume(struct amdgpu_device *adev) -+{ -+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv; -+ enum dmub_status status; -+ bool init; -+ -+ if (!dmub_srv) { -+ /* DMUB isn't supported on the ASIC. */ -+ return; -+ } -+ -+ status = dmub_srv_is_hw_init(dmub_srv, &init); -+ if (status != DMUB_STATUS_OK) -+ DRM_WARN("DMUB hardware init check failed: %d\n", status); -+ -+ if (status == DMUB_STATUS_OK && init) { -+ /* Wait for firmware load to finish. */ -+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); -+ if (status != DMUB_STATUS_OK) -+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); -+ } else { -+ /* Perform the full hardware initialization. */ -+ dm_dmub_hw_init(adev); -+ } -+} -+ - #if defined(CONFIG_DRM_AMD_DC_DCN) - static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) - { -@@ -1083,6 +1268,194 @@ static void vblank_control_worker(struct work_struct *work) - } - - #endif -+ -+static void dm_handle_hpd_rx_offload_work(struct work_struct *work) -+{ -+ struct hpd_rx_irq_offload_work *offload_work; -+ struct amdgpu_dm_connector *aconnector; -+ struct dc_link *dc_link; -+ struct amdgpu_device *adev; -+ enum dc_connection_type new_connection_type = dc_connection_none; -+ unsigned long flags; -+ -+ offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); -+ aconnector = offload_work->offload_wq->aconnector; -+ -+ if (!aconnector) { -+ DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); -+ goto skip; -+ } -+ -+ adev = drm_to_adev(aconnector->base.dev); -+ dc_link = aconnector->dc_link; -+ -+ mutex_lock(&aconnector->hpd_lock); -+ if (!dc_link_detect_sink(dc_link, &new_connection_type)) -+ DRM_ERROR("KMS: Failed to detect connector\n"); -+ mutex_unlock(&aconnector->hpd_lock); -+ -+ if (new_connection_type == dc_connection_none) -+ goto skip; -+ -+ if (amdgpu_in_reset(adev)) -+ goto skip; -+ -+ mutex_lock(&adev->dm.dc_lock); -+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) -+ dc_link_dp_handle_automated_test(dc_link); -+ else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && -+ hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && -+ dc_link_dp_allow_hpd_rx_irq(dc_link)) { -+ dc_link_dp_handle_link_loss(dc_link); -+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); -+ offload_work->offload_wq->is_handling_link_loss = false; -+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); -+ } -+ mutex_unlock(&adev->dm.dc_lock); -+ -+skip: -+ kfree(offload_work); -+ -+} -+ -+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) -+{ -+ int max_caps = dc->caps.max_links; -+ int i = 0; -+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; -+ -+ hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); -+ -+ if (!hpd_rx_offload_wq) -+ return NULL; -+ -+ -+ for (i = 0; i < max_caps; i++) { -+ hpd_rx_offload_wq[i].wq = -+ create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); -+ -+ if (hpd_rx_offload_wq[i].wq == NULL) { -+ DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); -+ goto out_err; -+ } -+ -+ spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); -+ } -+ -+ return hpd_rx_offload_wq; -+ -+out_err: -+ for (i = 0; i < max_caps; i++) { -+ if (hpd_rx_offload_wq[i].wq) -+ destroy_workqueue(hpd_rx_offload_wq[i].wq); -+ } -+ kfree(hpd_rx_offload_wq); -+ return NULL; -+} -+ -+struct amdgpu_stutter_quirk { -+ u16 chip_vendor; -+ u16 chip_device; -+ u16 subsys_vendor; -+ u16 subsys_device; -+ u8 revision; -+}; -+ -+static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { -+ /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ -+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, -+ { 0, 0, 0, 0, 0 }, -+}; -+ -+static bool dm_should_disable_stutter(struct pci_dev *pdev) -+{ -+ const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; -+ -+ while (p && p->chip_device != 0) { -+ if (pdev->vendor == p->chip_vendor && -+ pdev->device == p->chip_device && -+ pdev->subsystem_vendor == p->subsys_vendor && -+ pdev->subsystem_device == p->subsys_device && -+ pdev->revision == p->revision) { -+ return true; -+ } -+ ++p; -+ } -+ return false; -+} -+ -+static const struct dmi_system_id hpd_disconnect_quirk_table[] = { -+ { -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), -+ }, -+ }, -+ { -+ .matches = { -+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), -+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), -+ }, -+ }, -+ {} -+ /* TODO: refactor this from a fixed table to a dynamic option */ -+}; -+ -+static void retrieve_dmi_info(struct amdgpu_display_manager *dm) -+{ -+ const struct dmi_system_id *dmi_id; -+ -+ dm->aux_hpd_discon_quirk = false; -+ -+ dmi_id = dmi_first_match(hpd_disconnect_quirk_table); -+ if (dmi_id) { -+ dm->aux_hpd_discon_quirk = true; -+ DRM_INFO("aux_hpd_discon_quirk attached\n"); -+ } -+} -+ - static int amdgpu_dm_init(struct amdgpu_device *adev) - { - struct dc_init_data init_data; -@@ -1141,8 +1514,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) - case CHIP_RAVEN: - case CHIP_RENOIR: - init_data.flags.gpu_vm_support = true; -- if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) -+ switch (adev->dm.dmcub_fw_version) { -+ case 0: /* development */ -+ case 0x1: /* linux-firmware.git hash 6d9f399 */ -+ case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ -+ init_data.flags.disable_dmcu = false; -+ break; -+ default: - init_data.flags.disable_dmcu = true; -+ } - break; - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: -@@ -1167,6 +1547,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) - init_data.flags.power_down_display_on_boot = true; - - INIT_LIST_HEAD(&adev->dm.da_list); -+ -+ retrieve_dmi_info(&adev->dm); -+ - /* Display Core create. */ - adev->dm.dc = dc_create(&init_data); - -@@ -1184,6 +1567,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) - - if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) - adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; -+ if (dm_should_disable_stutter(adev->pdev)) -+ adev->dm.dc->debug.disable_stutter = true; - - if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) - adev->dm.dc->debug.disable_stutter = true; -@@ -1202,6 +1587,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) - - dc_hardware_init(adev->dm.dc); - -+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); -+ if (!adev->dm.hpd_rx_offload_wq) { -+ DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); -+ goto error; -+ } -+ - #if defined(CONFIG_DRM_AMD_DC_DCN) - if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { - struct dc_phy_addr_space_config pa_config; -@@ -1254,7 +1645,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) - DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); - goto error; - } -+ -+ adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); -+ if (!adev->dm.delayed_hpd_wq) { -+ DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); -+ goto error; -+ } -+ - amdgpu_dm_outbox_init(adev); -+#if defined(CONFIG_DRM_AMD_DC_DCN) -+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, -+ dmub_aux_setconfig_callback, false)) { -+ DRM_ERROR("amdgpu: fail to register dmub aux callback"); -+ goto error; -+ } -+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { -+ DRM_ERROR("amdgpu: fail to register dmub hpd callback"); -+ goto error; -+ } -+#endif - } - - if (amdgpu_dm_initialize_drm_device(adev)) { -@@ -1308,10 +1717,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) - } - #endif - -- for (i = 0; i < adev->dm.display_indexes_num; i++) { -- drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); -- } -- - amdgpu_dm_destroy_drm_device(&adev->dm); - - #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -@@ -1331,11 +1736,14 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) - dc_deinit_callbacks(adev->dm.dc); - #endif - -- dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); -+ if (adev->dm.dc) -+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); - - if (dc_enable_dmub_notifications(adev->dm.dc)) { - kfree(adev->dm.dmub_notify); - adev->dm.dmub_notify = NULL; -+ destroy_workqueue(adev->dm.delayed_hpd_wq); -+ adev->dm.delayed_hpd_wq = NULL; - } - - if (adev->dm.dmub_bo) -@@ -1361,6 +1769,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) - adev->dm.freesync_module = NULL; - } - -+ if (adev->dm.hpd_rx_offload_wq) { -+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) { -+ if (adev->dm.hpd_rx_offload_wq[i].wq) { -+ destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); -+ adev->dm.hpd_rx_offload_wq[i].wq = NULL; -+ } -+ } -+ -+ kfree(adev->dm.hpd_rx_offload_wq); -+ adev->dm.hpd_rx_offload_wq = NULL; -+ } -+ - mutex_destroy(&adev->dm.audio_lock); - mutex_destroy(&adev->dm.dc_lock); - -@@ -1980,6 +2400,16 @@ context_alloc_fail: - return res; - } - -+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) -+{ -+ int i; -+ -+ if (dm->hpd_rx_offload_wq) { -+ for (i = 0; i < dm->dc->caps.max_links; i++) -+ flush_workqueue(dm->hpd_rx_offload_wq[i].wq); -+ } -+} -+ - static int dm_suspend(void *handle) - { - struct amdgpu_device *adev = handle; -@@ -2001,6 +2431,8 @@ static int dm_suspend(void *handle) - - amdgpu_dm_irq_suspend(adev); - -+ hpd_rx_irq_work_suspend(dm); -+ - return ret; - } - -@@ -2011,6 +2443,8 @@ static int dm_suspend(void *handle) - - amdgpu_dm_irq_suspend(adev); - -+ hpd_rx_irq_work_suspend(dm); -+ - dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); - - return 0; -@@ -2145,10 +2579,13 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, - bundle->surface_updates[m].surface->force_full_update = - true; - } -- dc_commit_updates_for_stream( -- dm->dc, bundle->surface_updates, -- dc_state->stream_status->plane_count, -- dc_state->streams[k], &bundle->stream_update, dc_state); -+ -+ update_planes_and_stream_adapter(dm->dc, -+ UPDATE_TYPE_FULL, -+ dc_state->stream_status->plane_count, -+ dc_state->streams[k], -+ &bundle->stream_update, -+ bundle->surface_updates); - } - - cleanup: -@@ -2206,6 +2643,9 @@ static int dm_resume(void *handle) - if (amdgpu_in_reset(adev)) { - dc_state = dm->cached_dc_state; - -+ if (dc_enable_dmub_notifications(adev->dm.dc)) -+ amdgpu_dm_outbox_init(adev); -+ - r = dm_dmub_hw_init(adev); - if (r) - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); -@@ -2217,8 +2657,8 @@ static int dm_resume(void *handle) - - for (i = 0; i < dc_state->stream_count; i++) { - dc_state->streams[i]->mode_changed = true; -- for (j = 0; j < dc_state->stream_status->plane_count; j++) { -- dc_state->stream_status->plane_states[j]->update_flags.raw -+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { -+ dc_state->stream_status[i].plane_states[j]->update_flags.raw - = 0xffffffff; - } - } -@@ -2253,10 +2693,12 @@ static int dm_resume(void *handle) - /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ - dc_resource_state_construct(dm->dc, dm_state->context); - -+ /* Re-enable outbox interrupts for DPIA. */ -+ if (dc_enable_dmub_notifications(adev->dm.dc)) -+ amdgpu_dm_outbox_init(adev); -+ - /* Before powering on DC we need to re-initialize DMUB. */ -- r = dm_dmub_hw_init(adev); -- if (r) -- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); -+ dm_dmub_hw_resume(adev); - - /* power on hardware */ - dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); -@@ -2278,11 +2720,14 @@ static int dm_resume(void *handle) - drm_for_each_connector_iter(connector, &iter) { - aconnector = to_amdgpu_dm_connector(connector); - -+ if (!aconnector->dc_link) -+ continue; -+ - /* - * this is the case when traversing through already created - * MST connectors, should be skipped - */ -- if (aconnector->mst_port) -+ if (aconnector->dc_link->type == dc_connection_mst_branch) - continue; - - mutex_lock(&aconnector->hpd_lock); -@@ -2402,7 +2847,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { - - static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) - { -- u32 max_cll, min_cll, max, min, q, r; -+ u32 max_avg, min_cll, max, min, q, r; - struct amdgpu_dm_backlight_caps *caps; - struct amdgpu_display_manager *dm; - struct drm_connector *conn_base; -@@ -2432,7 +2877,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) - caps = &dm->backlight_caps[i]; - caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; - caps->aux_support = false; -- max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; -+ max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall; - min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; - - if (caps->ext_caps->bits.oled == 1 /*|| -@@ -2460,8 +2905,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) - * The results of the above expressions can be verified at - * pre_computed_values. - */ -- q = max_cll >> 5; -- r = max_cll % 32; -+ q = max_avg >> 5; -+ r = max_avg % 32; - max = (1 << q) * pre_computed_values[r]; - - // min luminance: maxLum * (CV/255)^2 / 100 -@@ -2583,13 +3028,12 @@ void amdgpu_dm_update_connector_after_detect( - aconnector->edid = - (struct edid *)sink->dc_edid.raw_edid; - -- drm_connector_update_edid_property(connector, -- aconnector->edid); - if (aconnector->dc_link->aux_mode) - drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, - aconnector->edid); - } - -+ drm_connector_update_edid_property(connector, aconnector->edid); - amdgpu_dm_update_freesync_caps(connector, aconnector->edid); - update_connector_ext_caps(aconnector); - } else { -@@ -2615,9 +3059,8 @@ void amdgpu_dm_update_connector_after_detect( - dc_sink_release(sink); - } - --static void handle_hpd_irq(void *param) -+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) - { -- struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; - struct drm_connector *connector = &aconnector->base; - struct drm_device *dev = connector->dev; - enum dc_connection_type new_connection_type = dc_connection_none; -@@ -2676,7 +3119,15 @@ static void handle_hpd_irq(void *param) - - } - --static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) -+static void handle_hpd_irq(void *param) -+{ -+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; -+ -+ handle_hpd_irq_helper(aconnector); -+ -+} -+ -+static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) - { - uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; - uint8_t dret; -@@ -2754,6 +3205,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) - DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); - } - -+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, -+ union hpd_irq_data hpd_irq_data) -+{ -+ struct hpd_rx_irq_offload_work *offload_work = -+ kzalloc(sizeof(*offload_work), GFP_KERNEL); -+ -+ if (!offload_work) { -+ DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); -+ return; -+ } -+ -+ INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); -+ offload_work->data = hpd_irq_data; -+ offload_work->offload_wq = offload_wq; -+ -+ queue_work(offload_wq->wq, &offload_work->work); -+ DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); -+} -+ - static void handle_hpd_rx_irq(void *param) - { - struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; -@@ -2765,14 +3235,16 @@ static void handle_hpd_rx_irq(void *param) - enum dc_connection_type new_connection_type = dc_connection_none; - struct amdgpu_device *adev = drm_to_adev(dev); - union hpd_irq_data hpd_irq_data; -- bool lock_flag = 0; -+ bool link_loss = false; -+ bool has_left_work = false; -+ int idx = aconnector->base.index; -+ struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; - - memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); - - if (adev->dm.disable_hpd_irq) - return; - -- - /* - * TODO:Temporary add mutex to protect hpd interrupt not have a gpio - * conflict, after implement i2c helper, this mutex should be -@@ -2780,43 +3252,41 @@ static void handle_hpd_rx_irq(void *param) - */ - mutex_lock(&aconnector->hpd_lock); - -- read_hpd_rx_irq_data(dc_link, &hpd_irq_data); -+ result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, -+ &link_loss, true, &has_left_work); - -- if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || -- (dc_link->type == dc_connection_mst_branch)) { -- if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { -- result = true; -- dm_handle_hpd_rx_irq(aconnector); -- goto out; -- } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { -- result = false; -- dm_handle_hpd_rx_irq(aconnector); -+ if (!has_left_work) -+ goto out; -+ -+ if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { -+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); -+ goto out; -+ } -+ -+ if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { -+ if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || -+ hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { -+ dm_handle_mst_sideband_msg(aconnector); - goto out; - } -- } - -- /* -- * TODO: We need the lock to avoid touching DC state while it's being -- * modified during automated compliance testing, or when link loss -- * happens. While this should be split into subhandlers and proper -- * interfaces to avoid having to conditionally lock like this in the -- * outer layer, we need this workaround temporarily to allow MST -- * lightup in some scenarios to avoid timeout. -- */ -- if (!amdgpu_in_reset(adev) && -- (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) || -- hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) { -- mutex_lock(&adev->dm.dc_lock); -- lock_flag = 1; -- } -+ if (link_loss) { -+ bool skip = false; - --#ifdef CONFIG_DRM_AMD_DC_HDCP -- result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL); --#else -- result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL); --#endif -- if (!amdgpu_in_reset(adev) && lock_flag) -- mutex_unlock(&adev->dm.dc_lock); -+ spin_lock(&offload_wq->offload_lock); -+ skip = offload_wq->is_handling_link_loss; -+ -+ if (!skip) -+ offload_wq->is_handling_link_loss = true; -+ -+ spin_unlock(&offload_wq->offload_lock); -+ -+ if (!skip) -+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); -+ -+ goto out; -+ } -+ } - - out: - if (result && !is_mst_root_connector) { -@@ -2901,6 +3371,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev) - amdgpu_dm_irq_register_interrupt(adev, &int_params, - handle_hpd_rx_irq, - (void *) aconnector); -+ -+ if (adev->dm.hpd_rx_offload_wq) -+ adev->dm.hpd_rx_offload_wq[connector->index].aconnector = -+ aconnector; - } - } - } -@@ -3213,7 +3687,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) - - /* Use GRPH_PFLIP interrupt */ - for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; -- i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; -+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; - i++) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); - if (r) { -@@ -3508,7 +3982,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap - max - min); - } - --static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, -+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, - int bl_idx, - u32 user_brightness) - { -@@ -3536,7 +4010,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, - DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); - } - -- return rc ? 0 : 1; -+ if (rc) -+ dm->actual_brightness[bl_idx] = user_brightness; - } - - static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) -@@ -3839,8 +4314,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) - } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { - amdgpu_dm_update_connector_after_detect(aconnector); - register_backlight_device(dm, link); -+ -+ if (dm->num_of_edps) -+ update_connector_ext_caps(aconnector); - if (amdgpu_dc_feature_mask & DC_PSR_MASK) - amdgpu_dm_set_psr_caps(link); -+ -+ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when -+ * PSR is also supported. -+ */ -+ if (link->psr_settings.psr_feature_enabled) -+ adev_to_drm(adev)->vblank_disable_immediate = false; - } - - -@@ -3979,6 +4463,17 @@ DEVICE_ATTR_WO(s3_debug); - static int dm_early_init(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; -+ struct amdgpu_mode_info *mode_info = &adev->mode_info; -+ struct atom_context *ctx = mode_info->atom_context; -+ int index = GetIndexIntoMasterTable(DATA, Object_Header); -+ u16 data_offset; -+ -+ /* if there is no object header, skip DM */ -+ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { -+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; -+ dev_info(adev->dev, "No object header, skipping DM\n"); -+ return -ENOENT; -+ } - - switch (adev->asic_type) { - #if defined(CONFIG_DRM_AMD_DC_SI) -@@ -5033,7 +5528,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, - plane_info->visible = true; - plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; - -- plane_info->layer_index = 0; -+ plane_info->layer_index = plane_state->normalized_zpos; - - ret = fill_plane_color_attributes(plane_state, plane_info->format, - &plane_info->color_space); -@@ -5100,7 +5595,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, - dc_plane_state->global_alpha = plane_info.global_alpha; - dc_plane_state->global_alpha_value = plane_info.global_alpha_value; - dc_plane_state->dcc = plane_info.dcc; -- dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 -+ dc_plane_state->layer_index = plane_info.layer_index; - dc_plane_state->flip_int_enabled = true; - - /* -@@ -5402,8 +5897,6 @@ static void fill_stream_properties_from_drm_display_mode( - - timing_out->aspect_ratio = get_aspect_ratio(mode_in); - -- stream->output_color_space = get_output_color_space(timing_out); -- - stream->out_transfer_func->type = TF_TYPE_PREDEFINED; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { -@@ -5414,6 +5907,8 @@ static void fill_stream_properties_from_drm_display_mode( - adjust_colour_depth_from_display_info(timing_out, info); - } - } -+ -+ stream->output_color_space = get_output_color_space(timing_out); - } - - static void fill_audio_info(struct audio_info *audio_info, -@@ -5587,6 +6082,7 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, - struct dsc_dec_dpcd_caps *dsc_caps) - { - stream->timing.flags.DSC = 0; -+ dsc_caps->is_dsc_supported = false; - - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { - dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, -@@ -7527,6 +8023,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, - mode = amdgpu_dm_create_common_mode(encoder, - common_modes[i].name, common_modes[i].w, - common_modes[i].h); -+ if (!mode) -+ continue; -+ - drm_mode_probed_add(connector, mode); - amdgpu_dm_connector->num_modes++; - } -@@ -8067,27 +8566,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, - } - - #ifdef CONFIG_DRM_AMD_DC_HDCP --static bool is_content_protection_different(struct drm_connector_state *state, -- const struct drm_connector_state *old_state, -- const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) -+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, -+ struct drm_crtc_state *old_crtc_state, -+ struct drm_connector_state *new_conn_state, -+ struct drm_connector_state *old_conn_state, -+ const struct drm_connector *connector, -+ struct hdcp_workqueue *hdcp_w) - { - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); - -- /* Handle: Type0/1 change */ -- if (old_state->hdcp_content_type != state->hdcp_content_type && -- state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { -- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; -+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", -+ connector->index, connector->status, connector->dpms); -+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n", -+ old_conn_state->content_protection, new_conn_state->content_protection); -+ -+ if (old_crtc_state) -+ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", -+ old_crtc_state->enable, -+ old_crtc_state->active, -+ old_crtc_state->mode_changed, -+ old_crtc_state->active_changed, -+ old_crtc_state->connectors_changed); -+ -+ if (new_crtc_state) -+ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", -+ new_crtc_state->enable, -+ new_crtc_state->active, -+ new_crtc_state->mode_changed, -+ new_crtc_state->active_changed, -+ new_crtc_state->connectors_changed); -+ -+ /* hdcp content type change */ -+ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && -+ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { -+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; -+ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); - return true; - } - -- /* CP is being re enabled, ignore this -- * -- * Handles: ENABLED -> DESIRED -- */ -- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && -- state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { -- state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; -+ /* CP is being re enabled, ignore this */ -+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && -+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { -+ if (new_crtc_state && new_crtc_state->mode_changed) { -+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; -+ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); -+ return true; -+ }; -+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; -+ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); - return false; - } - -@@ -8095,9 +8622,9 @@ static bool is_content_protection_different(struct drm_connector_state *state, - * - * Handles: UNDESIRED -> ENABLED - */ -- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && -- state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) -- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; -+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && -+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) -+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; - - /* Stream removed and re-enabled - * -@@ -8107,10 +8634,12 @@ static bool is_content_protection_different(struct drm_connector_state *state, - * - * Handles: DESIRED -> DESIRED (Special case) - */ -- if (!(old_state->crtc && old_state->crtc->enabled) && -- state->crtc && state->crtc->enabled && -+ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && -+ new_conn_state->crtc && new_conn_state->crtc->enabled && - connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { - dm_con_state->update_hdcp = false; -+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", -+ __func__); - return true; - } - -@@ -8122,35 +8651,42 @@ static bool is_content_protection_different(struct drm_connector_state *state, - * - * Handles: DESIRED -> DESIRED (Special case) - */ -- if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && -- connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { -+ if (dm_con_state->update_hdcp && -+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && -+ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { - dm_con_state->update_hdcp = false; -+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", -+ __func__); - return true; - } - -- /* -- * Handles: UNDESIRED -> UNDESIRED -- * DESIRED -> DESIRED -- * ENABLED -> ENABLED -- */ -- if (old_state->content_protection == state->content_protection) -+ if (old_conn_state->content_protection == new_conn_state->content_protection) { -+ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { -+ if (new_crtc_state && new_crtc_state->mode_changed) { -+ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", -+ __func__); -+ return true; -+ }; -+ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", -+ __func__); -+ return false; -+ }; -+ -+ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); - return false; -+ } - -- /* -- * Handles: UNDESIRED -> DESIRED -- * DESIRED -> UNDESIRED -- * ENABLED -> UNDESIRED -- */ -- if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) -+ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { -+ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", -+ __func__); - return true; -+ } - -- /* -- * Handles: DESIRED -> ENABLED -- */ -+ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); - return false; - } -- - #endif -+ - static void remove_stream(struct amdgpu_device *adev, - struct amdgpu_crtc *acrtc, - struct dc_stream_state *stream) -@@ -8447,15 +8983,15 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, - * We also need vupdate irq for the actual core vblank handling - * at end of vblank. - */ -- dm_set_vupdate_irq(new_state->base.crtc, true); -- drm_crtc_vblank_get(new_state->base.crtc); -+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0); -+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", - __func__, new_state->base.crtc->base.id); - } else if (old_vrr_active && !new_vrr_active) { - /* Transition VRR active -> inactive: - * Allow vblank irq disable again for fixed refresh rate. - */ -- dm_set_vupdate_irq(new_state->base.crtc, false); -+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0); - drm_crtc_vblank_put(new_state->base.crtc); - DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", - __func__, new_state->base.crtc->base.id); -@@ -8477,6 +9013,13 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) - handle_cursor_update(plane, old_plane_state); - } - -+static inline uint32_t get_mem_type(struct drm_framebuffer *fb) -+{ -+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); -+ -+ return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; -+} -+ - static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, - struct dc_state *dc_state, - struct drm_device *dev, -@@ -8546,6 +9089,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, - continue; - - dc_plane = dm_new_plane_state->dc_state; -+ if (!dc_plane) -+ continue; - - bundle->surface_updates[planes_count].surface = dc_plane; - if (new_pcrtc_state->color_mgmt_changed) { -@@ -8597,11 +9142,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, - - /* - * Only allow immediate flips for fast updates that don't -- * change FB pitch, DCC state, rotation or mirroing. -+ * change memory domain, FB pitch, DCC state, rotation or -+ * mirroring. - */ - bundle->flip_addrs[planes_count].flip_immediate = - crtc->state->async_flip && -- acrtc_state->update_type == UPDATE_TYPE_FAST; -+ acrtc_state->update_type == UPDATE_TYPE_FAST && -+ get_mem_type(old_plane_state->fb) == get_mem_type(fb); - - timestamp_ns = ktime_get_ns(); - bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); -@@ -8734,6 +9281,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, - if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) - bundle->stream_update.abm_level = &acrtc_state->abm_level; - -+ mutex_lock(&dm->dc_lock); -+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && -+ acrtc_state->stream->link->psr_settings.psr_allow_active) -+ amdgpu_dm_psr_disable(acrtc_state->stream); -+ mutex_unlock(&dm->dc_lock); -+ - /* - * If FreeSync state on the stream has changed then we need to - * re-adjust the min/max bounds now that DC doesn't handle this -@@ -8747,16 +9300,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, - spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); - } - mutex_lock(&dm->dc_lock); -- if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && -- acrtc_state->stream->link->psr_settings.psr_allow_active) -- amdgpu_dm_psr_disable(acrtc_state->stream); - -- dc_commit_updates_for_stream(dm->dc, -- bundle->surface_updates, -- planes_count, -- acrtc_state->stream, -- &bundle->stream_update, -- dc_state); -+ update_planes_and_stream_adapter(dm->dc, -+ acrtc_state->update_type, -+ planes_count, -+ acrtc_state->stream, -+ &bundle->stream_update, -+ bundle->surface_updates); - - /** - * Enable or disable the interrupts on the backend. -@@ -9084,10 +9634,67 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - -+ if (!adev->dm.hdcp_workqueue) -+ continue; -+ -+ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); -+ -+ if (!connector) -+ continue; -+ -+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", -+ connector->index, connector->status, connector->dpms); -+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n", -+ old_con_state->content_protection, new_con_state->content_protection); -+ -+ if (aconnector->dc_sink) { -+ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && -+ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { -+ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", -+ aconnector->dc_sink->edid_caps.display_name); -+ } -+ } -+ - new_crtc_state = NULL; -+ old_crtc_state = NULL; - -- if (acrtc) -+ if (acrtc) { - new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); -+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); -+ } -+ -+ if (old_crtc_state) -+ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", -+ old_crtc_state->enable, -+ old_crtc_state->active, -+ old_crtc_state->mode_changed, -+ old_crtc_state->active_changed, -+ old_crtc_state->connectors_changed); -+ -+ if (new_crtc_state) -+ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", -+ new_crtc_state->enable, -+ new_crtc_state->active, -+ new_crtc_state->mode_changed, -+ new_crtc_state->active_changed, -+ new_crtc_state->connectors_changed); -+ } -+ -+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { -+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); -+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); -+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); -+ -+ if (!adev->dm.hdcp_workqueue) -+ continue; -+ -+ new_crtc_state = NULL; -+ old_crtc_state = NULL; -+ -+ if (acrtc) { -+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); -+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); -+ } - - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - -@@ -9099,11 +9706,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) - continue; - } - -- if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) -+ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, -+ old_con_state, connector, adev->dm.hdcp_workqueue)) { -+ /* when display is unplugged from mst hub, connctor will -+ * be destroyed within dm_dp_mst_connector_destroy. connector -+ * hdcp perperties, like type, undesired, desired, enabled, -+ * will be lost. So, save hdcp properties into hdcp_work within -+ * amdgpu_dm_atomic_commit_tail. if the same display is -+ * plugged back with same display index, its hdcp properties -+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes -+ */ -+ -+ bool enable_encryption = false; -+ -+ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) -+ enable_encryption = true; -+ -+ if (aconnector->dc_link && aconnector->dc_sink && -+ aconnector->dc_link->type == dc_connection_mst_branch) { -+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; -+ struct hdcp_workqueue *hdcp_w = -+ &hdcp_work[aconnector->dc_link->link_index]; -+ -+ hdcp_w->hdcp_content_type[connector->index] = -+ new_con_state->hdcp_content_type; -+ hdcp_w->content_protection[connector->index] = -+ new_con_state->content_protection; -+ } -+ -+ if (new_crtc_state && new_crtc_state->mode_changed && -+ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) -+ enable_encryption = true; -+ -+ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); -+ - hdcp_update_display( - adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, -- new_con_state->hdcp_content_type, -- new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); -+ new_con_state->hdcp_content_type, enable_encryption); -+ } - } - #endif - -@@ -9182,32 +9822,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) - - - mutex_lock(&dm->dc_lock); -- dc_commit_updates_for_stream(dm->dc, -- dummy_updates, -- status->plane_count, -- dm_new_crtc_state->stream, -- &stream_update, -- dc_state); -+ dc_update_planes_and_stream(dm->dc, -+ dummy_updates, -+ status->plane_count, -+ dm_new_crtc_state->stream, -+ &stream_update); - mutex_unlock(&dm->dc_lock); - } - -- /* Count number of newly disabled CRTCs for dropping PM refs later. */ -- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, -- new_crtc_state, i) { -- if (old_crtc_state->active && !new_crtc_state->active) -- crtc_disable_count++; -- -- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); -- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); -- -- /* For freesync config update on crtc state and params for irq */ -- update_stream_irq_parameters(dm, dm_new_crtc_state); -- -- /* Handle vrr on->off / off->on transitions */ -- amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, -- dm_new_crtc_state); -- } -- - /** - * Enable interrupts for CRTCs that are newly enabled or went through - * a modeset. It was intentionally deferred until after the front end -@@ -9217,16 +9839,29 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - #ifdef CONFIG_DEBUG_FS -- bool configure_crc = false; - enum amdgpu_dm_pipe_crc_source cur_crc_src; - #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -- struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk; -+ struct crc_rd_work *crc_rd_wrk; -+#endif -+#endif -+ /* Count number of newly disabled CRTCs for dropping PM refs later. */ -+ if (old_crtc_state->active && !new_crtc_state->active) -+ crtc_disable_count++; -+ -+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); -+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); -+ -+ /* For freesync config update on crtc state and params for irq */ -+ update_stream_irq_parameters(dm, dm_new_crtc_state); -+ -+#ifdef CONFIG_DEBUG_FS -+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -+ crc_rd_wrk = dm->crc_rd_wrk; - #endif - spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - cur_crc_src = acrtc->dm_irq_params.crc_src; - spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); - #endif -- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - - if (new_crtc_state->active && - (!old_crtc_state->active || -@@ -9234,16 +9869,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) - dc_stream_retain(dm_new_crtc_state->stream); - acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; - manage_dm_interrupts(adev, acrtc, true); -+ } -+ /* Handle vrr on->off / off->on transitions */ -+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); - - #ifdef CONFIG_DEBUG_FS -+ if (new_crtc_state->active && -+ (!old_crtc_state->active || -+ drm_atomic_crtc_needs_modeset(new_crtc_state))) { - /** - * Frontend may have changed so reapply the CRC capture - * settings for the stream. - */ -- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); -- - if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { -- configure_crc = true; - #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - if (amdgpu_dm_crc_window_is_activated(crtc)) { - spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); -@@ -9255,14 +9893,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) - spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); - } - #endif -- } -- -- if (configure_crc) - if (amdgpu_dm_crtc_configure_crc_source( - crtc, dm_new_crtc_state, cur_crc_src)) - DRM_DEBUG_DRIVER("Failed to configure crc source"); --#endif -+ } - } -+#endif - } - - for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) -@@ -9286,7 +9922,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) - /* restore the backlight level */ - for (i = 0; i < dm->num_of_edps; i++) { - if (dm->backlight_dev[i] && -- (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i])) -+ (dm->actual_brightness[i] != dm->brightness[i])) - amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); - } - #endif -@@ -9686,7 +10322,16 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, - if (!dm_old_crtc_state->stream) - goto skip_modeset; - -+ /* Unset freesync video if it was active before */ -+ if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { -+ dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; -+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; -+ } -+ -+ /* Now check if we should set freesync video mode */ - if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && -+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && -+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && - is_timing_unchanged_for_freesync(new_crtc_state, - old_crtc_state)) { - new_crtc_state->mode_changed = false; -@@ -10070,8 +10715,9 @@ static int dm_update_plane_state(struct dc *dc, - return -EINVAL; - } - -+ if (dm_old_plane_state->dc_state) -+ dc_plane_state_release(dm_old_plane_state->dc_state); - -- dc_plane_state_release(dm_old_plane_state->dc_state); - dm_new_plane_state->dc_state = NULL; - - *lock_and_validation_needed = true; -@@ -10196,10 +10842,13 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, - static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) - { - struct drm_connector *connector; -- struct drm_connector_state *conn_state; -+ struct drm_connector_state *conn_state, *old_conn_state; - struct amdgpu_dm_connector *aconnector = NULL; - int i; -- for_each_new_connector_in_state(state, connector, conn_state, i) { -+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { -+ if (!conn_state->crtc) -+ conn_state = old_conn_state; -+ - if (conn_state->crtc != crtc) - continue; - -@@ -10332,8 +10981,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, - goto fail; - } - -- if (dm_old_con_state->abm_level != -- dm_new_con_state->abm_level) -+ if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || -+ dm_old_con_state->scaling != dm_new_con_state->scaling) - new_crtc_state->connectors_changed = true; - } - -@@ -10412,6 +11061,18 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, - } - } - -+ /* -+ * DC consults the zpos (layer_index in DC terminology) to determine the -+ * hw plane on which to enable the hw cursor (see -+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in -+ * atomic state, so call drm helper to normalize zpos. -+ */ -+ ret = drm_atomic_normalize_zpos(dev, state); -+ if (ret) { -+ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); -+ goto fail; -+ } -+ - /* Remove exiting planes if they are modified */ - for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { - ret = dm_update_plane_state(dc, state, plane, -diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h -index d1d353a7c77d3..f9c3e5a417138 100644 ---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h -+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h -@@ -47,6 +47,8 @@ - #define AMDGPU_DM_MAX_CRTC 6 - - #define AMDGPU_DM_MAX_NUM_EDP 2 -+ -+#define AMDGPU_DMUB_NOTIFICATION_MAX 5 - /* - #include "include/amdgpu_dal_power_if.h" - #include "amdgpu_dm_irq.h" -@@ -86,6 +88,21 @@ struct dm_compressor_info { - uint64_t gpu_addr; - }; - -+typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); -+ -+/** -+ * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ -+ * -+ * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq -+ * @dmub_notify: notification for callback function -+ * @adev: amdgpu_device pointer -+ */ -+struct dmub_hpd_work { -+ struct work_struct handle_hpd_work; -+ struct dmub_notification *dmub_notify; -+ struct amdgpu_device *adev; -+}; -+ - /** - * struct vblank_control_work - Work data for vblank control - * @work: Kernel work data for the work event -@@ -154,6 +171,48 @@ struct dal_allocation { - u64 gpu_addr; - }; - -+/** -+ * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq -+ * offload work -+ */ -+struct hpd_rx_irq_offload_work_queue { -+ /** -+ * @wq: workqueue structure to queue offload work. -+ */ -+ struct workqueue_struct *wq; -+ /** -+ * @offload_lock: To protect fields of offload work queue. -+ */ -+ spinlock_t offload_lock; -+ /** -+ * @is_handling_link_loss: Used to prevent inserting link loss event when -+ * we're handling link loss -+ */ -+ bool is_handling_link_loss; -+ /** -+ * @aconnector: The aconnector that this work queue is attached to -+ */ -+ struct amdgpu_dm_connector *aconnector; -+}; -+ -+/** -+ * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure -+ */ -+struct hpd_rx_irq_offload_work { -+ /** -+ * @work: offload work -+ */ -+ struct work_struct work; -+ /** -+ * @data: reference irq data which is used while handling offload work -+ */ -+ union hpd_irq_data data; -+ /** -+ * @offload_wq: offload work queue that this work is queued to -+ */ -+ struct hpd_rx_irq_offload_work_queue *offload_wq; -+}; -+ - /** - * struct amdgpu_display_manager - Central amdgpu display manager device - * -@@ -190,8 +249,30 @@ struct amdgpu_display_manager { - */ - struct dmub_srv *dmub_srv; - -+ /** -+ * @dmub_notify: -+ * -+ * Notification from DMUB. -+ */ -+ - struct dmub_notification *dmub_notify; - -+ /** -+ * @dmub_callback: -+ * -+ * Callback functions to handle notification from DMUB. -+ */ -+ -+ dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; -+ -+ /** -+ * @dmub_thread_offload: -+ * -+ * Flag to indicate if callback is offload. -+ */ -+ -+ bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; -+ - /** - * @dmub_fb_info: - * -@@ -422,7 +503,12 @@ struct amdgpu_display_manager { - */ - struct crc_rd_work *crc_rd_wrk; - #endif -- -+ /** -+ * @hpd_rx_offload_wq: -+ * -+ * Work queue to offload works of hpd_rx_irq -+ */ -+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq; - /** - * @mst_encoders: - * -@@ -439,6 +525,7 @@ struct amdgpu_display_manager { - */ - struct list_head da_list; - struct completion dmub_aux_transfer_done; -+ struct workqueue_struct *delayed_hpd_wq; - - /** - * @brightness: -@@ -446,6 +533,20 @@ struct amdgpu_display_manager { - * cached backlight values. - */ - u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; -+ /** -+ * @actual_brightness: -+ * -+ * last successfully applied backlight values. -+ */ -+ u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; -+ -+ /** -+ * @aux_hpd_discon_quirk: -+ * -+ * quirk for hpd discon while aux is on-going. -+ * occurred on certain intel platform -+ */ -+ bool aux_hpd_discon_quirk; - }; - - enum dsc_clock_force_state { -diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c -index cce062adc4391..8a441a22c46ec 100644 ---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c -+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c -@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) - ret = -EINVAL; - goto cleanup; - } -+ -+ if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && -+ (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { -+ DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); -+ ret = -EINVAL; -+ goto cleanup; -+ } -+ - } - - #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) -diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c -index 8080bba5b7a76..6d694cea24201 100644 ---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c -+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c -@@ -229,8 +229,10 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf, - break; - - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -247,6 +249,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, - { - struct amdgpu_dm_connector *connector = file_inode(f)->i_private; - struct dc_link *link = connector->dc_link; -+ struct dc *dc = (struct dc *)link->dc; - struct dc_link_settings prefer_link_settings; - char *wr_buf = NULL; - const uint32_t wr_buf_size = 40; -@@ -313,7 +316,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, - prefer_link_settings.lane_count = param[0]; - prefer_link_settings.link_rate = param[1]; - -- dp_retrain_link_dp_test(link, &prefer_link_settings, false); -+ dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); - - kfree(wr_buf); - return size; -@@ -387,8 +390,10 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, - break; - - r = put_user((*(rd_buf + result)), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -1315,8 +1320,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, - break; - } - -- if (!pipe_ctx) -+ if (!pipe_ctx) { -+ kfree(rd_buf); - return -ENXIO; -+ } - - dsc = pipe_ctx->stream_res.dsc; - if (dsc) -@@ -1332,8 +1339,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, - break; - - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -1502,8 +1511,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, - break; - } - -- if (!pipe_ctx) -+ if (!pipe_ctx) { -+ kfree(rd_buf); - return -ENXIO; -+ } - - dsc = pipe_ctx->stream_res.dsc; - if (dsc) -@@ -1519,8 +1530,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, - break; - - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -1687,8 +1700,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, - break; - } - -- if (!pipe_ctx) -+ if (!pipe_ctx) { -+ kfree(rd_buf); - return -ENXIO; -+ } - - dsc = pipe_ctx->stream_res.dsc; - if (dsc) -@@ -1704,8 +1719,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, - break; - - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -1868,8 +1885,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, - break; - } - -- if (!pipe_ctx) -+ if (!pipe_ctx) { -+ kfree(rd_buf); - return -ENXIO; -+ } - - dsc = pipe_ctx->stream_res.dsc; - if (dsc) -@@ -1885,8 +1904,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, - break; - - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -2044,8 +2065,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, - break; - } - -- if (!pipe_ctx) -+ if (!pipe_ctx) { -+ kfree(rd_buf); - return -ENXIO; -+ } - - dsc = pipe_ctx->stream_res.dsc; - if (dsc) -@@ -2061,8 +2084,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, - break; - - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -2101,8 +2126,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, - break; - } - -- if (!pipe_ctx) -+ if (!pipe_ctx) { -+ kfree(rd_buf); - return -ENXIO; -+ } - - dsc = pipe_ctx->stream_res.dsc; - if (dsc) -@@ -2118,8 +2145,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, - break; - - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -2173,8 +2202,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, - break; - } - -- if (!pipe_ctx) -+ if (!pipe_ctx) { -+ kfree(rd_buf); - return -ENXIO; -+ } - - dsc = pipe_ctx->stream_res.dsc; - if (dsc) -@@ -2190,8 +2221,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, - break; - - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -2245,8 +2278,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, - break; - } - -- if (!pipe_ctx) -+ if (!pipe_ctx) { -+ kfree(rd_buf); - return -ENXIO; -+ } - - dsc = pipe_ctx->stream_res.dsc; - if (dsc) -@@ -2262,8 +2297,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, - break; - - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - - buf += 1; - size -= 1; -@@ -2907,10 +2944,13 @@ static int crc_win_update_set(void *data, u64 val) - struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); - struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; - -+ if (!crc_rd_wrk) -+ return 0; -+ - if (val) { - spin_lock_irq(&adev_to_drm(adev)->event_lock); - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); -- if (crc_rd_wrk && crc_rd_wrk->crtc) { -+ if (crc_rd_wrk->crtc) { - old_crtc = crc_rd_wrk->crtc; - old_acrtc = to_amdgpu_crtc(old_crtc); - } -@@ -2967,7 +3007,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc) - &crc_win_y_end_fops); - debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, - &crc_win_update_fops); -- -+ dput(dir); - } - #endif - /* -@@ -3250,8 +3290,10 @@ static ssize_t dcc_en_bits_read( - dc->hwss.get_dcc_en_bits(dc, dcc_en_bits); - - rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); -- if (!rd_buf) -+ if (!rd_buf) { -+ kfree(dcc_en_bits); - return -ENOMEM; -+ } - - for (i = 0; i < num_pipes; i++) - offset += snprintf(rd_buf + offset, rd_buf_size - offset, -@@ -3264,8 +3306,10 @@ static ssize_t dcc_en_bits_read( - if (*pos >= rd_buf_size) - break; - r = put_user(*(rd_buf + result), buf); -- if (r) -+ if (r) { -+ kfree(rd_buf); - return r; /* r = -EFAULT */ -+ } - buf += 1; - size -= 1; - *pos += 1; -diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h -index 09294ff122fea..bbbf7d0eff82f 100644 ---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h -+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h -@@ -52,6 +52,20 @@ struct hdcp_workqueue { - struct mod_hdcp_link link; - - enum mod_hdcp_encryption_status encryption_status; -+ -+ /* when display is unplugged from mst hub, connctor will be -+ * destroyed within dm_dp_mst_connector_destroy. connector -+ * hdcp perperties, like type, undesired, desired, enabled, -+ * will be lost. So, save hdcp properties into hdcp_work within -+ * amdgpu_dm_atomic_commit_tail. if the same display is -+ * plugged back with same display index, its hdcp properties -+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes -+ */ -+ /* un-desired, desired, enabled */ -+ unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX]; -+ /* hdcp1.x, hdcp2.x */ -+ unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX]; -+ - uint8_t max_link; - - uint8_t *srm; -diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c -index 7af0d58c231b6..0b58a93864490 100644 ---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c -+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c -@@ -32,10 +32,16 @@ - #include "amdgpu_dm.h" - #include "amdgpu_dm_mst_types.h" - -+#ifdef CONFIG_DRM_AMD_DC_HDCP -+#include "amdgpu_dm_hdcp.h" -+#endif -+ - #include "dc.h" - #include "dm_helpers.h" - - #include "dc_link_ddc.h" -+#include "ddc_service_types.h" -+#include "dpcd_defs.h" - - #include "i2caux_interface.h" - #include "dmub_cmd.h" -@@ -53,6 +59,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, - ssize_t result = 0; - struct aux_payload payload; - enum aux_return_code_type operation_result; -+ struct amdgpu_device *adev; -+ struct ddc_service *ddc; - - if (WARN_ON(msg->size > 16)) - return -E2BIG; -@@ -69,6 +77,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, - result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, - &operation_result); - -+ /* -+ * w/a on certain intel platform where hpd is unexpected to pull low during -+ * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON -+ * aux transaction is succuess in such case, therefore bypass the error -+ */ -+ ddc = TO_DM_AUX(aux)->ddc_service; -+ adev = ddc->ctx->driver_context; -+ if (adev->dm.aux_hpd_discon_quirk) { -+ if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && -+ operation_result == AUX_RET_ERROR_HPD_DISCON) { -+ result = 0; -+ operation_result = AUX_RET_SUCCESS; -+ } -+ } -+ - if (payload.write && result >= 0) - result = msg->size; - -@@ -155,6 +178,31 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { - }; - - #if defined(CONFIG_DRM_AMD_DC_DCN) -+static bool needs_dsc_aux_workaround(struct dc_link *link) -+{ -+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && -+ (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && -+ link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) -+ return true; -+ -+ return false; -+} -+ -+bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port) -+{ -+ u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F -+ -+ if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) { -+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && -+ IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) { -+ DRM_INFO("Synaptics Cascaded MST hub\n"); -+ return true; -+ } -+ } -+ -+ return false; -+} -+ - static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) - { - struct dc_sink *dc_sink = aconnector->dc_sink; -@@ -164,7 +212,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto - u8 *dsc_branch_dec_caps = NULL; - - aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); --#if defined(CONFIG_HP_HOOK_WORKAROUND) -+ - /* - * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs - * because it only check the dsc/fec caps of the "port variable" and not the dock -@@ -174,10 +222,14 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto - * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux - * - */ -- -- if (!aconnector->dsc_aux && !port->parent->port_parent) -+ if (!aconnector->dsc_aux && !port->parent->port_parent && -+ needs_dsc_aux_workaround(aconnector->dc_link)) - aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; --#endif -+ -+ /* synaptics cascaded MST hub case */ -+ if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port)) -+ aconnector->dsc_aux = port->mgr->aux; -+ - if (!aconnector->dsc_aux) - return false; - -@@ -267,6 +319,32 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) - /* dc_link_add_remote_sink returns a new reference */ - aconnector->dc_sink = dc_sink; - -+ /* when display is unplugged from mst hub, connctor will be -+ * destroyed within dm_dp_mst_connector_destroy. connector -+ * hdcp perperties, like type, undesired, desired, enabled, -+ * will be lost. So, save hdcp properties into hdcp_work within -+ * amdgpu_dm_atomic_commit_tail. if the same display is -+ * plugged back with same display index, its hdcp properties -+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes -+ */ -+#ifdef CONFIG_DRM_AMD_DC_HDCP -+ if (aconnector->dc_sink && connector->state) { -+ struct drm_device *dev = connector->dev; -+ struct amdgpu_device *adev = drm_to_adev(dev); -+ -+ if (adev->dm.hdcp_workqueue) { -+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; -+ struct hdcp_workqueue *hdcp_w = -+ &hdcp_work[aconnector->dc_link->link_index]; -+ -+ connector->state->hdcp_content_type = -+ hdcp_w->hdcp_content_type[connector->index]; -+ connector->state->content_protection = -+ hdcp_w->content_protection[connector->index]; -+ } -+ } -+#endif -+ - if (aconnector->dc_sink) { - amdgpu_dm_update_freesync_caps( - connector, aconnector->edid); -@@ -356,7 +434,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs - static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) - { - drm_encoder_cleanup(encoder); -- kfree(encoder); - } - - static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { -diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h -index 900d3f7a84989..f7523fd23f543 100644 ---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h -+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h -@@ -26,6 +26,18 @@ - #ifndef __DAL_AMDGPU_DM_MST_TYPES_H__ - #define __DAL_AMDGPU_DM_MST_TYPES_H__ - -+#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C -+ -+/** -+ * Panamera MST Hub detection -+ * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case -+ * Check from beginning of branch device vendor specific field (050Ch) -+ */ -+#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0) -+#define BRANCH_HW_REVISION_PANAMERA_A2 0x10 -+#define SYNAPTICS_CASCADED_HUB_ID 0x5A -+#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0) -+ - struct amdgpu_display_manager; - struct amdgpu_dm_connector; - -diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c -index 70a554f1e725a..278ff281a1bd5 100644 ---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c -+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c -@@ -36,10 +36,14 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link) - { - uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; - -- if (!(link->connector_signal & SIGNAL_TYPE_EDP)) -+ if (!(link->connector_signal & SIGNAL_TYPE_EDP)) { -+ link->psr_settings.psr_feature_enabled = false; - return; -- if (link->type == dc_connection_none) -+ } -+ if (link->type == dc_connection_none) { -+ link->psr_settings.psr_feature_enabled = false; - return; -+ } - if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, - dpcd_data, sizeof(dpcd_data))) { - link->dpcd_caps.psr_caps.psr_version = dpcd_data[0]; -@@ -74,10 +78,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) - - link = stream->link; - -- psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version; -- -- if (psr_config.psr_version > 0) { -- psr_config.psr_exit_link_training_required = 0x1; -+ if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { -+ psr_config.psr_version = link->psr_settings.psr_version; - psr_config.psr_frame_capture_indication_req = 0; - psr_config.psr_rfb_setup_time = 0x37; - psr_config.psr_sdp_transmit_line_num_deadline = 0x20; -diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c -index 6dbde74c1e069..228f098e5d88f 100644 ---- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c -+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c -@@ -352,6 +352,7 @@ static enum bp_result get_gpio_i2c_info( - uint32_t count = 0; - unsigned int table_index = 0; - bool find_valid = false; -+ struct atom_gpio_pin_assignment *pin; - - if (!info) - return BP_RESULT_BADINPUT; -@@ -379,20 +380,17 @@ static enum bp_result get_gpio_i2c_info( - - sizeof(struct atom_common_table_header)) - / sizeof(struct atom_gpio_pin_assignment); - -+ pin = (struct atom_gpio_pin_assignment *) header->gpio_pin; -+ - for (table_index = 0; table_index < count; table_index++) { -- if (((record->i2c_id & I2C_HW_CAP) == ( -- header->gpio_pin[table_index].gpio_id & -- I2C_HW_CAP)) && -- ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == -- (header->gpio_pin[table_index].gpio_id & -- I2C_HW_ENGINE_ID_MASK)) && -- ((record->i2c_id & I2C_HW_LANE_MUX) == -- (header->gpio_pin[table_index].gpio_id & -- I2C_HW_LANE_MUX))) { -+ if (((record->i2c_id & I2C_HW_CAP) == (pin->gpio_id & I2C_HW_CAP)) && -+ ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) && -+ ((record->i2c_id & I2C_HW_LANE_MUX) == (pin->gpio_id & I2C_HW_LANE_MUX))) { - /* still valid */ - find_valid = true; - break; - } -+ pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment)); - } - - /* If we don't find the entry that we are looking for then -@@ -408,11 +406,8 @@ static enum bp_result get_gpio_i2c_info( - info->i2c_slave_address = record->i2c_slave_addr; - - /* TODO: check how to get register offset for en, Y, etc. */ -- info->gpio_info.clk_a_register_index = -- le16_to_cpu( -- header->gpio_pin[table_index].data_a_reg_index); -- info->gpio_info.clk_a_shift = -- header->gpio_pin[table_index].gpio_bitshift; -+ info->gpio_info.clk_a_register_index = le16_to_cpu(pin->data_a_reg_index); -+ info->gpio_info.clk_a_shift = pin->gpio_bitshift; - - return BP_RESULT_OK; - } -diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c -index 6ca288fb5fb9e..2d46bc527b218 100644 ---- a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c -+++ b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c -@@ -26,12 +26,12 @@ - #include "bw_fixed.h" - - --#define MIN_I64 \ -- (int64_t)(-(1LL << 63)) -- - #define MAX_I64 \ - (int64_t)((1ULL << 63) - 1) - -+#define MIN_I64 \ -+ (-MAX_I64 - 1) -+ - #define FRACTIONAL_PART_MASK \ - ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1) - -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c -index bb31541f80723..6420527fe476c 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c -@@ -306,8 +306,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) - case FAMILY_NV: - if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { - dcn3_clk_mgr_destroy(clk_mgr); -- } -- if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { -+ } else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { - dcn3_clk_mgr_destroy(clk_mgr); - } - if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c -index 1861a147a7fa1..5c5cbeb59c4d9 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c -@@ -437,8 +437,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) - clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; - - /* Refresh bounding box */ -+ DC_FP_START(); - clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( - clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); -+ DC_FP_END(); - } - - static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base) -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c -index 8ecc708bcd9ec..766759420eebb 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c -@@ -302,6 +302,9 @@ void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, b - /* bits 8:7 for cache timer scale, bits 6:1 for cache timer delay, bit 0 = 1 for enable, = 0 for disable */ - uint32_t param = (cache_timer_scale << 7) | (cache_timer_delay << 1) | (enable ? 1 : 0); - -+ smu_print("SMU Set display refresh from mall: enable = %d, cache_timer_delay = %d, cache_timer_scale = %d\n", -+ enable, cache_timer_delay, cache_timer_scale); -+ - dcn30_smu_send_msg_with_param(clk_mgr, - DALSMC_MSG_SetDisplayRefreshFromMall, param, NULL); - } -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c -index 7046da14bb2a5..329ce4e84b83c 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c -@@ -582,32 +582,32 @@ static struct wm_table lpddr5_wm_table = { - .wm_inst = WM_A, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, -- .sr_exit_time_us = 5.32, -- .sr_enter_plus_exit_time_us = 6.38, -+ .sr_exit_time_us = 13.5, -+ .sr_enter_plus_exit_time_us = 16.5, - .valid = true, - }, - { - .wm_inst = WM_B, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, -- .sr_exit_time_us = 9.82, -- .sr_enter_plus_exit_time_us = 11.196, -+ .sr_exit_time_us = 13.5, -+ .sr_enter_plus_exit_time_us = 16.5, - .valid = true, - }, - { - .wm_inst = WM_C, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, -- .sr_exit_time_us = 9.89, -- .sr_enter_plus_exit_time_us = 11.24, -+ .sr_exit_time_us = 13.5, -+ .sr_enter_plus_exit_time_us = 16.5, - .valid = true, - }, - { - .wm_inst = WM_D, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, -- .sr_exit_time_us = 9.748, -- .sr_enter_plus_exit_time_us = 11.102, -+ .sr_exit_time_us = 13.5, -+ .sr_enter_plus_exit_time_us = 16.5, - .valid = true, - }, - } -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c -index 377c4e53a2b37..5357620627afc 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c -@@ -81,6 +81,11 @@ int dcn31_get_active_display_cnt_wa( - stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || - stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) - tmds_present = true; -+ -+ /* Checking stream / link detection ensuring that PHY is active*/ -+ if (dc_is_dp_signal(stream->signal) && !stream->dpms_off) -+ display_count++; -+ - } - - for (i = 0; i < dc->link_count; i++) { -@@ -157,6 +162,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, - union display_idle_optimization_u idle_info = { 0 }; - idle_info.idle_info.df_request_disabled = 1; - idle_info.idle_info.phy_ref_clk_off = 1; -+ idle_info.idle_info.s0i2_rdy = 1; - dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data); - /* update power state */ - clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; -@@ -323,38 +329,38 @@ static struct clk_bw_params dcn31_bw_params = { - - }; - --static struct wm_table ddr4_wm_table = { -+static struct wm_table ddr5_wm_table = { - .entries = { - { - .wm_inst = WM_A, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.72, -- .sr_exit_time_us = 6.09, -- .sr_enter_plus_exit_time_us = 7.14, -+ .sr_exit_time_us = 9, -+ .sr_enter_plus_exit_time_us = 11, - .valid = true, - }, - { - .wm_inst = WM_B, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.72, -- .sr_exit_time_us = 10.12, -- .sr_enter_plus_exit_time_us = 11.48, -+ .sr_exit_time_us = 9, -+ .sr_enter_plus_exit_time_us = 11, - .valid = true, - }, - { - .wm_inst = WM_C, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.72, -- .sr_exit_time_us = 10.12, -- .sr_enter_plus_exit_time_us = 11.48, -+ .sr_exit_time_us = 9, -+ .sr_enter_plus_exit_time_us = 11, - .valid = true, - }, - { - .wm_inst = WM_D, - .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.72, -- .sr_exit_time_us = 10.12, -- .sr_enter_plus_exit_time_us = 11.48, -+ .sr_exit_time_us = 9, -+ .sr_enter_plus_exit_time_us = 11, - .valid = true, - }, - } -@@ -682,7 +688,7 @@ void dcn31_clk_mgr_construct( - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { - dcn31_bw_params.wm_table = lpddr5_wm_table; - } else { -- dcn31_bw_params.wm_table = ddr4_wm_table; -+ dcn31_bw_params.wm_table = ddr5_wm_table; - } - /* Saved clocks configured at boot for debug purposes */ - dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c -index 8c2b77eb94593..21d2cbc3cbb20 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c -@@ -119,6 +119,16 @@ int dcn31_smu_send_msg_with_param( - - result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); - -+ if (result == VBIOSSMC_Result_Failed) { -+ if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && -+ param == TABLE_WATERMARKS) -+ DC_LOG_WARNING("Watermarks table not configured properly by SMU"); -+ else -+ ASSERT(0); -+ REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); -+ return -1; -+ } -+ - if (IS_SMU_TIMEOUT(result)) { - ASSERT(0); - dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); -diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c -index c798c65d42765..634640d5c0ff4 100644 ---- a/drivers/gpu/drm/amd/display/dc/core/dc.c -+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c -@@ -771,6 +771,7 @@ static bool dc_construct_ctx(struct dc *dc, - - dc_ctx->perf_trace = dc_perf_trace_create(); - if (!dc_ctx->perf_trace) { -+ kfree(dc_ctx); - ASSERT_CRITICAL(false); - return false; - } -@@ -891,10 +892,13 @@ static bool dc_construct(struct dc *dc, - goto fail; - #ifdef CONFIG_DRM_AMD_DC_DCN - dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; --#endif - -- if (dc->res_pool->funcs->update_bw_bounding_box) -+ if (dc->res_pool->funcs->update_bw_bounding_box) { -+ DC_FP_START(); - dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); -+ DC_FP_END(); -+ } -+#endif - - /* Creation of current_state must occur after dc->dml - * is initialized in dc_create_resource_pool because -@@ -1118,6 +1122,8 @@ struct dc *dc_create(const struct dc_init_data *init_params) - - dc->caps.max_dp_protocol_version = DP_VERSION_1_4; - -+ dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; -+ - if (dc->res_pool->dmcu != NULL) - dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; - } -@@ -1783,6 +1789,11 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) - - post_surface_trace(dc); - -+ if (dc->ctx->dce_version >= DCE_VERSION_MAX) -+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); -+ else -+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); -+ - if (is_flip_pending_in_pipes(dc, context)) - return; - -@@ -2100,9 +2111,6 @@ static enum surface_update_type det_surface_update(const struct dc *dc, - enum surface_update_type overall_type = UPDATE_TYPE_FAST; - union surface_update_flags *update_flags = &u->surface->update_flags; - -- if (u->flip_addr) -- update_flags->bits.addr_update = 1; -- - if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { - update_flags->raw = 0xFFFFFFFF; - return UPDATE_TYPE_FULL; -@@ -2452,11 +2460,8 @@ static void copy_stream_update_to_stream(struct dc *dc, - if (update->abm_level) - stream->abm_level = *update->abm_level; - -- if (update->periodic_interrupt0) -- stream->periodic_interrupt0 = *update->periodic_interrupt0; -- -- if (update->periodic_interrupt1) -- stream->periodic_interrupt1 = *update->periodic_interrupt1; -+ if (update->periodic_interrupt) -+ stream->periodic_interrupt = *update->periodic_interrupt; - - if (update->gamut_remap) - stream->gamut_remap_matrix = *update->gamut_remap; -@@ -2526,6 +2531,137 @@ static void copy_stream_update_to_stream(struct dc *dc, - } - } - -+void dc_reset_state(struct dc *dc, struct dc_state *context) -+{ -+ dc_resource_state_destruct(context); -+ -+ /* clear the structure, but don't reset the reference count */ -+ memset(context, 0, offsetof(struct dc_state, refcount)); -+ -+ init_state(dc, context); -+} -+ -+static bool update_planes_and_stream_state(struct dc *dc, -+ struct dc_surface_update *srf_updates, int surface_count, -+ struct dc_stream_state *stream, -+ struct dc_stream_update *stream_update, -+ enum surface_update_type *new_update_type, -+ struct dc_state **new_context) -+{ -+ struct dc_state *context; -+ int i, j; -+ enum surface_update_type update_type; -+ const struct dc_stream_status *stream_status; -+ struct dc_context *dc_ctx = dc->ctx; -+ -+ stream_status = dc_stream_get_status(stream); -+ -+ if (!stream_status) { -+ if (surface_count) /* Only an error condition if surf_count non-zero*/ -+ ASSERT(false); -+ -+ return false; /* Cannot commit surface to stream that is not committed */ -+ } -+ -+ context = dc->current_state; -+ -+ update_type = dc_check_update_surfaces_for_stream( -+ dc, srf_updates, surface_count, stream_update, stream_status); -+ -+ /* update current stream with the new updates */ -+ copy_stream_update_to_stream(dc, context, stream, stream_update); -+ -+ /* do not perform surface update if surface has invalid dimensions -+ * (all zero) and no scaling_info is provided -+ */ -+ if (surface_count > 0) { -+ for (i = 0; i < surface_count; i++) { -+ if ((srf_updates[i].surface->src_rect.width == 0 || -+ srf_updates[i].surface->src_rect.height == 0 || -+ srf_updates[i].surface->dst_rect.width == 0 || -+ srf_updates[i].surface->dst_rect.height == 0) && -+ (!srf_updates[i].scaling_info || -+ srf_updates[i].scaling_info->src_rect.width == 0 || -+ srf_updates[i].scaling_info->src_rect.height == 0 || -+ srf_updates[i].scaling_info->dst_rect.width == 0 || -+ srf_updates[i].scaling_info->dst_rect.height == 0)) { -+ DC_ERROR("Invalid src/dst rects in surface update!\n"); -+ return false; -+ } -+ } -+ } -+ -+ if (update_type >= update_surface_trace_level) -+ update_surface_trace(dc, srf_updates, surface_count); -+ -+ if (update_type >= UPDATE_TYPE_FULL) { -+ struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; -+ -+ for (i = 0; i < surface_count; i++) -+ new_planes[i] = srf_updates[i].surface; -+ -+ /* initialize scratch memory for building context */ -+ context = dc_create_state(dc); -+ if (context == NULL) { -+ DC_ERROR("Failed to allocate new validate context!\n"); -+ return false; -+ } -+ -+ dc_resource_state_copy_construct( -+ dc->current_state, context); -+ -+ /*remove old surfaces from context */ -+ if (!dc_rem_all_planes_for_stream(dc, stream, context)) { -+ -+ BREAK_TO_DEBUGGER(); -+ goto fail; -+ } -+ -+ /* add surface to context */ -+ if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { -+ -+ BREAK_TO_DEBUGGER(); -+ goto fail; -+ } -+ } -+ -+ /* save update parameters into surface */ -+ for (i = 0; i < surface_count; i++) { -+ struct dc_plane_state *surface = srf_updates[i].surface; -+ -+ copy_surface_update_to_plane(surface, &srf_updates[i]); -+ -+ if (update_type >= UPDATE_TYPE_MED) { -+ for (j = 0; j < dc->res_pool->pipe_count; j++) { -+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; -+ -+ if (pipe_ctx->plane_state != surface) -+ continue; -+ -+ resource_build_scaling_params(pipe_ctx); -+ } -+ } -+ } -+ -+ if (update_type == UPDATE_TYPE_FULL) { -+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { -+ BREAK_TO_DEBUGGER(); -+ goto fail; -+ } -+ } -+ -+ *new_context = context; -+ *new_update_type = update_type; -+ -+ return true; -+ -+fail: -+ dc_release_state(context); -+ -+ return false; -+ -+} -+ - static void commit_planes_do_stream_update(struct dc *dc, - struct dc_stream_state *stream, - struct dc_stream_update *stream_update, -@@ -2540,13 +2676,8 @@ static void commit_planes_do_stream_update(struct dc *dc, - - if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { - -- if (stream_update->periodic_interrupt0 && -- dc->hwss.setup_periodic_interrupt) -- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0); -- -- if (stream_update->periodic_interrupt1 && -- dc->hwss.setup_periodic_interrupt) -- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1); -+ if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) -+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); - - if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || - stream_update->vrr_infopacket || -@@ -2703,7 +2834,8 @@ static void commit_planes_for_stream(struct dc *dc, - #endif - - if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) -- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { -+ if (top_pipe_to_program && -+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { - if (should_use_dmub_lock(stream->link)) { - union dmub_hw_lock_flags hw_locks = { 0 }; - struct dmub_hw_lock_inst_flags inst_flags = { 0 }; -@@ -2927,6 +3059,152 @@ static void commit_planes_for_stream(struct dc *dc, - } - } - -+static bool commit_minimal_transition_state(struct dc *dc, -+ struct dc_state *transition_base_context) -+{ -+ struct dc_state *transition_context = dc_create_state(dc); -+ enum pipe_split_policy tmp_policy; -+ enum dc_status ret = DC_ERROR_UNEXPECTED; -+ unsigned int i, j; -+ -+ if (!transition_context) -+ return false; -+ -+ tmp_policy = dc->debug.pipe_split_policy; -+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; -+ -+ dc_resource_state_copy_construct(transition_base_context, transition_context); -+ -+ //commit minimal state -+ if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) { -+ for (i = 0; i < transition_context->stream_count; i++) { -+ struct dc_stream_status *stream_status = &transition_context->stream_status[i]; -+ -+ for (j = 0; j < stream_status->plane_count; j++) { -+ struct dc_plane_state *plane_state = stream_status->plane_states[j]; -+ -+ /* force vsync flip when reconfiguring pipes to prevent underflow -+ * and corruption -+ */ -+ plane_state->flip_immediate = false; -+ } -+ } -+ -+ ret = dc_commit_state_no_check(dc, transition_context); -+ } -+ -+ //always release as dc_commit_state_no_check retains in good case -+ dc_release_state(transition_context); -+ -+ //restore previous pipe split policy -+ dc->debug.pipe_split_policy = tmp_policy; -+ -+ if (ret != DC_OK) { -+ //this should never happen -+ BREAK_TO_DEBUGGER(); -+ return false; -+ } -+ -+ //force full surface update -+ for (i = 0; i < dc->current_state->stream_count; i++) { -+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { -+ dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; -+ } -+ } -+ -+ return true; -+} -+ -+bool dc_update_planes_and_stream(struct dc *dc, -+ struct dc_surface_update *srf_updates, int surface_count, -+ struct dc_stream_state *stream, -+ struct dc_stream_update *stream_update) -+{ -+ struct dc_state *context; -+ enum surface_update_type update_type; -+ int i; -+ -+ /* In cases where MPO and split or ODM are used transitions can -+ * cause underflow. Apply stream configuration with minimal pipe -+ * split first to avoid unsupported transitions for active pipes. -+ */ -+ bool force_minimal_pipe_splitting = false; -+ bool is_plane_addition = false; -+ -+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); -+ -+ if (cur_stream_status && -+ dc->current_state->stream_count > 0 && -+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { -+ /* determine if minimal transition is required */ -+ if (cur_stream_status->plane_count > surface_count) { -+ force_minimal_pipe_splitting = true; -+ } else if (cur_stream_status->plane_count < surface_count) { -+ force_minimal_pipe_splitting = true; -+ is_plane_addition = true; -+ } -+ } -+ -+ /* on plane addition, minimal state is the current one */ -+ if (force_minimal_pipe_splitting && is_plane_addition && -+ !commit_minimal_transition_state(dc, dc->current_state)) -+ return false; -+ -+ if (!update_planes_and_stream_state( -+ dc, -+ srf_updates, -+ surface_count, -+ stream, -+ stream_update, -+ &update_type, -+ &context)) -+ return false; -+ -+ /* on plane addition, minimal state is the new one */ -+ if (force_minimal_pipe_splitting && !is_plane_addition) { -+ if (!commit_minimal_transition_state(dc, context)) { -+ dc_release_state(context); -+ return false; -+ } -+ -+ update_type = UPDATE_TYPE_FULL; -+ } -+ -+ commit_planes_for_stream( -+ dc, -+ srf_updates, -+ surface_count, -+ stream, -+ stream_update, -+ update_type, -+ context); -+ -+ if (dc->current_state != context) { -+ -+ /* Since memory free requires elevated IRQL, an interrupt -+ * request is generated by mem free. If this happens -+ * between freeing and reassigning the context, our vsync -+ * interrupt will call into dc and cause a memory -+ * corruption BSOD. Hence, we first reassign the context, -+ * then free the old context. -+ */ -+ -+ struct dc_state *old = dc->current_state; -+ -+ dc->current_state = context; -+ dc_release_state(old); -+ -+ // clear any forced full updates -+ for (i = 0; i < dc->res_pool->pipe_count; i++) { -+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; -+ -+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream) -+ pipe_ctx->plane_state->force_full_update = false; -+ } -+ } -+ return true; -+} -+ - void dc_commit_updates_for_stream(struct dc *dc, - struct dc_surface_update *srf_updates, - int surface_count, -@@ -2968,6 +3246,14 @@ void dc_commit_updates_for_stream(struct dc *dc, - if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) - new_pipe->plane_state->force_full_update = true; - } -+ } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) { -+ /* -+ * Previous frame finished and HW is ready for optimization. -+ * -+ * Only relevant for DCN behavior where we can guarantee the optimization -+ * is safe to apply - retain the legacy behavior for DCE. -+ */ -+ dc_post_update_surfaces_to_stream(dc); - } - - -@@ -3024,14 +3310,11 @@ void dc_commit_updates_for_stream(struct dc *dc, - pipe_ctx->plane_state->force_full_update = false; - } - } -- /*let's use current_state to update watermark etc*/ -- if (update_type >= UPDATE_TYPE_FULL) { -- dc_post_update_surfaces_to_stream(dc); - -- if (dc_ctx->dce_version >= DCE_VERSION_MAX) -- TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); -- else -- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); -+ /* Legacy optimization path for DCE. */ -+ if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { -+ dc_post_update_surfaces_to_stream(dc); -+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); - } - - return; -diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c -index 9039fb134db59..f858ae68aa5f6 100644 ---- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c -+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c -@@ -92,8 +92,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = { - { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3, - 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }, - { COLOR_SPACE_YCBCR2020_TYPE, -- { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2, -- 0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} }, -+ { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2, -+ 0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} }, - { COLOR_SPACE_YCBCR709_BLACK_TYPE, - { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, - 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} }, -diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c -index 1e44b13c1c7de..b727bd7e039d7 100644 ---- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c -+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c -@@ -1665,12 +1665,6 @@ struct dc_link *link_create(const struct link_init_data *init_params) - if (false == dc_link_construct(link, init_params)) - goto construct_fail; - -- /* -- * Must use preferred_link_setting, not reported_link_cap or verified_link_cap, -- * since struct preferred_link_setting won't be reset after S3. -- */ -- link->preferred_link_setting.dpcd_source_device_specific_field_support = true; -- - return link; - - construct_fail: -@@ -1696,6 +1690,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx) - union down_spread_ctrl old_downspread; - union down_spread_ctrl new_downspread; - -+ memset(&old_downspread, 0, sizeof(old_downspread)); -+ - core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, - &old_downspread.raw, sizeof(old_downspread)); - -diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c -index 6d655e158267a..6777adb66f9d7 100644 ---- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c -+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c -@@ -2075,7 +2075,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) - return max_link_cap; - } - --enum dc_status read_hpd_rx_irq_data( -+static enum dc_status read_hpd_rx_irq_data( - struct dc_link *link, - union hpd_irq_data *irq_data) - { -@@ -2743,7 +2743,7 @@ void decide_link_settings(struct dc_stream_state *stream, - } - - /*************************Short Pulse IRQ***************************/ --static bool allow_hpd_rx_irq(const struct dc_link *link) -+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link) - { - /* - * Don't handle RX IRQ unless one of following is met: -@@ -3118,7 +3118,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video - &dpcd_pattern_type.value, - sizeof(dpcd_pattern_type)); - -- channel_count = dpcd_test_mode.bits.channel_count + 1; -+ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT); - - // read pattern periods for requested channels when sawTooth pattern is requested - if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || -@@ -3177,7 +3177,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video - } - } - --static void handle_automated_test(struct dc_link *link) -+void dc_link_dp_handle_automated_test(struct dc_link *link) - { - union test_request test_request; - union test_response test_response; -@@ -3226,17 +3226,50 @@ static void handle_automated_test(struct dc_link *link) - sizeof(test_response)); - } - --bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss) -+void dc_link_dp_handle_link_loss(struct dc_link *link) -+{ -+ int i; -+ struct pipe_ctx *pipe_ctx; -+ -+ for (i = 0; i < MAX_PIPES; i++) { -+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) -+ break; -+ } -+ -+ if (pipe_ctx == NULL || pipe_ctx->stream == NULL) -+ return; -+ -+ for (i = 0; i < MAX_PIPES; i++) { -+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && -+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { -+ core_link_disable_stream(pipe_ctx); -+ } -+ } -+ -+ for (i = 0; i < MAX_PIPES; i++) { -+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && -+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { -+ core_link_enable_stream(link->dc->current_state, pipe_ctx); -+ } -+ } -+} -+ -+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, -+ bool defer_handling, bool *has_left_work) - { - union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } }; - union device_service_irq device_service_clear = { { 0 } }; - enum dc_status result; - bool status = false; -- struct pipe_ctx *pipe_ctx; -- int i; - - if (out_link_loss) - *out_link_loss = false; -+ -+ if (has_left_work) -+ *has_left_work = false; - /* For use cases related to down stream connection status change, - * PSR and device auto test, refer to function handle_sst_hpd_irq - * in DAL2.1*/ -@@ -3268,11 +3301,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd - &device_service_clear.raw, - sizeof(device_service_clear.raw)); - device_service_clear.raw = 0; -- handle_automated_test(link); -+ if (defer_handling && has_left_work) -+ *has_left_work = true; -+ else -+ dc_link_dp_handle_automated_test(link); - return false; - } - -- if (!allow_hpd_rx_irq(link)) { -+ if (!dc_link_dp_allow_hpd_rx_irq(link)) { - DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", - __func__, link->link_index); - return false; -@@ -3286,12 +3322,18 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd - * so do not handle as a normal sink status change interrupt. - */ - -- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) -+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { -+ if (defer_handling && has_left_work) -+ *has_left_work = true; - return true; -+ } - - /* check if we have MST msg and return since we poll for it */ -- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) -+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { -+ if (defer_handling && has_left_work) -+ *has_left_work = true; - return false; -+ } - - /* For now we only handle 'Downstream port status' case. - * If we got sink count changed it means -@@ -3308,29 +3350,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd - sizeof(hpd_irq_dpcd_data), - "Status: "); - -- for (i = 0; i < MAX_PIPES; i++) { -- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) -- break; -- } -- -- if (pipe_ctx == NULL || pipe_ctx->stream == NULL) -- return false; -- -- -- for (i = 0; i < MAX_PIPES; i++) { -- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && -- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) -- core_link_disable_stream(pipe_ctx); -- } -- -- for (i = 0; i < MAX_PIPES; i++) { -- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; -- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && -- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) -- core_link_enable_stream(link->dc->current_state, pipe_ctx); -- } -+ if (defer_handling && has_left_work) -+ *has_left_work = true; -+ else -+ dc_link_dp_handle_link_loss(link); - - status = false; - if (out_link_loss) -@@ -3650,7 +3673,9 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) - lttpr_dpcd_data, - sizeof(lttpr_dpcd_data)); - if (status != DC_OK) { -- dm_error("%s: Read LTTPR caps data failed.\n", __func__); -+#if defined(CONFIG_DRM_AMD_DC_DCN) -+ DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__); -+#endif - return false; - } - -@@ -3678,6 +3703,14 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) - lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; - -+ /* If this chip cap is set, at least one retimer must exist in the chain -+ * Override count to 1 if we receive a known bad count (0 or an invalid value) */ -+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && -+ (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { -+ ASSERT(0); -+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; -+ } -+ - /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ - is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && - link->dpcd_caps.lttpr_caps.max_lane_count > 0 && -@@ -3913,6 +3946,26 @@ static bool retrieve_link_cap(struct dc_link *link) - dp_hw_fw_revision.ieee_fw_rev, - sizeof(dp_hw_fw_revision.ieee_fw_rev)); - -+ /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */ -+ { -+ uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; -+ uint8_t fwrev_mbp_2018[] = { 7, 4 }; -+ uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; -+ -+ /* We also check for the firmware revision as 16,1 models have an -+ * identical device id and are incorrectly quirked otherwise. -+ */ -+ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && -+ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, -+ sizeof(str_mbp_2018)) && -+ (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, -+ sizeof(fwrev_mbp_2018)) || -+ !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, -+ sizeof(fwrev_mbp_2018_vega)))) { -+ link->reported_link_cap.link_rate = LINK_RATE_RBR2; -+ } -+ } -+ - memset(&link->dpcd_caps.dsc_caps, '\0', - sizeof(link->dpcd_caps.dsc_caps)); - memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); -@@ -4690,7 +4743,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready) - link_enc->funcs->fec_set_ready(link_enc, true); - link->fec_state = dc_link_fec_ready; - } else { -- link_enc->funcs->fec_set_ready(link->link_enc, false); -+ link_enc->funcs->fec_set_ready(link_enc, false); - link->fec_state = dc_link_fec_not_ready; - dm_error("dpcd write failed to set fec_ready"); - } -@@ -4788,18 +4841,10 @@ void dpcd_set_source_specific_data(struct dc_link *link) - - uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; - -- if (link->preferred_link_setting.dpcd_source_device_specific_field_support) { -- result_write_min_hblank = core_link_write_dpcd(link, -- DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), -- sizeof(hblank_size)); -- -- if (result_write_min_hblank == DC_ERROR_UNEXPECTED) -- link->preferred_link_setting.dpcd_source_device_specific_field_support = false; -- } else { -- DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n"); -- } -+ result_write_min_hblank = core_link_write_dpcd(link, -+ DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), -+ sizeof(hblank_size)); - } -- - DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, - WPP_BIT_FLAG_DC_DETECTION_DP_CAPS, - "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", -diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c -index a60396d5be445..fa4d671b5b2cc 100644 ---- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c -@@ -1062,12 +1062,13 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) - * on certain displays, such as the Sharp 4k. 36bpp is needed - * to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and - * SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc -- * precision on at least DCN display engines. However, at least -- * Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth, -- * so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3 -- * did not show such problems, so this seems to be the exception. -+ * precision on DCN display engines, but apparently not for DCE, as -+ * far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have -+ * problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth, -+ * neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel -+ * passthrough). Therefore only use 36 bpp on DCN where it is actually needed. - */ -- if (plane_state->ctx->dce_version > DCE_VERSION_11_0) -+ if (plane_state->ctx->dce_version > DCE_VERSION_MAX) - pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP; - else - pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; -@@ -1433,6 +1434,9 @@ bool dc_remove_plane_from_context( - struct dc_stream_status *stream_status = NULL; - struct resource_pool *pool = dc->res_pool; - -+ if (!plane_state) -+ return true; -+ - for (i = 0; i < context->stream_count; i++) - if (context->streams[i] == stream) { - stream_status = &context->stream_status[i]; -@@ -1599,6 +1603,9 @@ static bool are_stream_backends_same( - if (is_timing_changed(stream_a, stream_b)) - return false; - -+ if (stream_a->signal != stream_b->signal) -+ return false; -+ - if (stream_a->dpms_off != stream_b->dpms_off) - return false; - -@@ -1623,6 +1630,10 @@ bool dc_is_stream_unchanged( - if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) - return false; - -+ /*compare audio info*/ -+ if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0) -+ return false; -+ - return true; - } - -@@ -1795,9 +1806,6 @@ enum dc_status dc_remove_stream_from_ctx( - dc->res_pool, - del_pipe->stream_res.stream_enc, - false); -- /* Release link encoder from stream in new dc_state. */ -- if (dc->res_pool->funcs->link_enc_unassign) -- dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); - - if (del_pipe->stream_res.audio) - update_audio_usage( -diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h -index 3ab52d9a82cf6..e0f58fab5e8ed 100644 ---- a/drivers/gpu/drm/amd/display/dc/dc.h -+++ b/drivers/gpu/drm/amd/display/dc/dc.h -@@ -185,6 +185,7 @@ struct dc_caps { - struct dc_color_caps color; - bool vbios_lttpr_aware; - bool vbios_lttpr_enable; -+ uint32_t max_otg_num; - }; - - struct dc_bug_wa { -diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h -index 4f54bde1bb1c7..1948cd9427d7e 100644 ---- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h -+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h -@@ -109,7 +109,6 @@ struct dc_link_settings { - enum dc_link_spread link_spread; - bool use_link_rate_set; - uint8_t link_rate_set; -- bool dpcd_source_device_specific_field_support; - }; - - struct dc_lane_settings { -diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h -index 83845d006c54a..9b7c32f7fd86f 100644 ---- a/drivers/gpu/drm/amd/display/dc/dc_link.h -+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h -@@ -296,7 +296,8 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); - * false - no change in Downstream port status. No further action required - * from DM. */ - bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, -- union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss); -+ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss, -+ bool defer_handling, bool *has_left_work); - - /* - * On eDP links this function call will stall until T12 has elapsed. -@@ -305,9 +306,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, - */ - bool dc_link_wait_for_t12(struct dc_link *link); - --enum dc_status read_hpd_rx_irq_data( -- struct dc_link *link, -- union hpd_irq_data *irq_data); -+void dc_link_dp_handle_automated_test(struct dc_link *link); -+void dc_link_dp_handle_link_loss(struct dc_link *link); -+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link); - - struct dc_sink_init_data; - -diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h -index b8ebc1f095389..3e606faff58f4 100644 ---- a/drivers/gpu/drm/amd/display/dc/dc_stream.h -+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h -@@ -193,8 +193,7 @@ struct dc_stream_state { - /* DMCU info */ - unsigned int abm_level; - -- struct periodic_interrupt_config periodic_interrupt0; -- struct periodic_interrupt_config periodic_interrupt1; -+ struct periodic_interrupt_config periodic_interrupt; - - /* from core_stream struct */ - struct dc_context *ctx; -@@ -260,8 +259,7 @@ struct dc_stream_update { - struct dc_info_packet *hdr_static_metadata; - unsigned int *abm_level; - -- struct periodic_interrupt_config *periodic_interrupt0; -- struct periodic_interrupt_config *periodic_interrupt1; -+ struct periodic_interrupt_config *periodic_interrupt; - - struct dc_info_packet *vrr_infopacket; - struct dc_info_packet *vsc_infopacket; -@@ -290,6 +288,9 @@ bool dc_is_stream_scaling_unchanged( - struct dc_stream_state *old_stream, struct dc_stream_state *stream); - - /* -+ * Setup stream attributes if no stream updates are provided -+ * there will be no impact on the stream parameters -+ * - * Set up surface attributes and associate to a stream - * The surfaces parameter is an absolute set of all surface active for the stream. - * If no surfaces are provided, the stream will be blanked; no memory read. -@@ -298,8 +299,23 @@ bool dc_is_stream_scaling_unchanged( - * After this call: - * Surfaces attributes are programmed and configured to be composed into stream. - * This does not trigger a flip. No surface address is programmed. -+ * - */ -+bool dc_update_planes_and_stream(struct dc *dc, -+ struct dc_surface_update *surface_updates, int surface_count, -+ struct dc_stream_state *dc_stream, -+ struct dc_stream_update *stream_update); - -+/* -+ * Set up surface attributes and associate to a stream -+ * The surfaces parameter is an absolute set of all surface active for the stream. -+ * If no surfaces are provided, the stream will be blanked; no memory read. -+ * Any flip related attribute changes must be done through this interface. -+ * -+ * After this call: -+ * Surfaces attributes are programmed and configured to be composed into stream. -+ * This does not trigger a flip. No surface address is programmed. -+ */ - void dc_commit_updates_for_stream(struct dc *dc, - struct dc_surface_update *srf_updates, - int surface_count, -diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c -index 2c7eb982eabca..5f1b735da5063 100644 ---- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c -+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c -@@ -545,9 +545,11 @@ static void dce112_get_pix_clk_dividers_helper ( - switch (pix_clk_params->color_depth) { - case COLOR_DEPTH_101010: - actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2; -+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10; - break; - case COLOR_DEPTH_121212: - actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2; -+ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10; - break; - case COLOR_DEPTH_161616: - actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2; -@@ -1013,9 +1015,12 @@ static bool get_pixel_clk_frequency_100hz( - * not be programmed equal to DPREFCLK - */ - modulo_hz = REG_READ(MODULO[inst]); -- *pixel_clk_khz = div_u64((uint64_t)clock_hz* -- clock_source->ctx->dc->clk_mgr->dprefclk_khz*10, -- modulo_hz); -+ if (modulo_hz) -+ *pixel_clk_khz = div_u64((uint64_t)clock_hz* -+ clock_source->ctx->dc->clk_mgr->dprefclk_khz*10, -+ modulo_hz); -+ else -+ *pixel_clk_khz = 0; - } else { - /* NOTE: There is agreement with VBIOS here that MODULO is - * programmed equal to DPREFCLK, in which case PHASE will be -diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c -index d9fd4ec60588f..670d5ab9d9984 100644 ---- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c -+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c -@@ -1009,7 +1009,7 @@ static void dce_transform_set_pixel_storage_depth( - color_depth = COLOR_DEPTH_101010; - pixel_depth = 0; - expan_mode = 1; -- BREAK_TO_DEBUGGER(); -+ DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth); - break; - } - -@@ -1023,8 +1023,7 @@ static void dce_transform_set_pixel_storage_depth( - if (!(xfm_dce->lb_pixel_depth_supported & depth)) { - /*we should use unsupported capabilities - * unless it is required by w/a*/ -- DC_LOG_WARNING("%s: Capability not supported", -- __func__); -+ DC_LOG_DC("%s: Capability not supported", __func__); - } - } - -diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c -index 62d595ded8668..52142d272c868 100644 ---- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c -+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c -@@ -1744,10 +1744,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) - hws->funcs.edp_backlight_control(edp_link_with_sink, false); - } - /*resume from S3, no vbios posting, no need to power down again*/ -+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); -+ - power_down_all_hw_blocks(dc); - disable_vga_and_power_gate_all_controllers(dc); - if (edp_link_with_sink && !keep_edp_vdd_on) - dc->hwss.edp_power_control(edp_link_with_sink, false); -+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); - } - bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); - } -@@ -2108,7 +2111,8 @@ static void dce110_setup_audio_dto( - continue; - if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A) - continue; -- if (pipe_ctx->stream_res.audio != NULL) { -+ if (pipe_ctx->stream_res.audio != NULL && -+ pipe_ctx->stream_res.audio->enabled == false) { - struct audio_output audio_output; - - build_audio_output(context, pipe_ctx, &audio_output); -@@ -2156,7 +2160,8 @@ static void dce110_setup_audio_dto( - if (!dc_is_dp_signal(pipe_ctx->stream->signal)) - continue; - -- if (pipe_ctx->stream_res.audio != NULL) { -+ if (pipe_ctx->stream_res.audio != NULL && -+ pipe_ctx->stream_res.audio->enabled == false) { - struct audio_output audio_output; - - build_audio_output(context, pipe_ctx, &audio_output); -diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c -index c65e4d125c8e2..013fca9b9c68c 100644 ---- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c -@@ -361,7 +361,8 @@ static const struct dce_audio_registers audio_regs[] = { - audio_regs(2), - audio_regs(3), - audio_regs(4), -- audio_regs(5) -+ audio_regs(5), -+ audio_regs(6), - }; - - #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ -diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile -index dda596fa1cd76..fee331accc0e7 100644 ---- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile -+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile -@@ -23,7 +23,7 @@ - # Makefile for the 'controller' sub-component of DAL. - # It provides the control and status of HW CRTC block. - --CFLAGS_AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init) -+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init) - - DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \ - dce60_resource.o -diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c -index dcfa0a3efa00d..bf72d3f60d7f4 100644 ---- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c -@@ -1127,6 +1127,7 @@ struct resource_pool *dce60_create_resource_pool( - if (dce60_construct(num_virtual_links, dc, pool)) - return &pool->base; - -+ kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; - } -@@ -1324,6 +1325,7 @@ struct resource_pool *dce61_create_resource_pool( - if (dce61_construct(num_virtual_links, dc, pool)) - return &pool->base; - -+ kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; - } -@@ -1517,6 +1519,7 @@ struct resource_pool *dce64_create_resource_pool( - if (dce64_construct(num_virtual_links, dc, pool)) - return &pool->base; - -+ kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; - } -diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c -index 725d92e40cd30..52d1f9746e8cb 100644 ---- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c -@@ -1138,6 +1138,7 @@ struct resource_pool *dce80_create_resource_pool( - if (dce80_construct(num_virtual_links, dc, pool)) - return &pool->base; - -+ kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; - } -@@ -1337,6 +1338,7 @@ struct resource_pool *dce81_create_resource_pool( - if (dce81_construct(num_virtual_links, dc, pool)) - return &pool->base; - -+ kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; - } -diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c -index f4f423d0b8c3f..80595d7f060c3 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c -@@ -940,6 +940,7 @@ static const struct hubbub_funcs hubbub1_funcs = { - .program_watermarks = hubbub1_program_watermarks, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, -+ .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, - }; - - void hubbub1_construct(struct hubbub *hubbub, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c -index df8a7718a85fc..aa5a1fa68da05 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c -@@ -804,6 +804,32 @@ static void false_optc_underflow_wa( - tg->funcs->clear_optc_underflow(tg); - } - -+static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) -+{ -+ struct pipe_ctx *other_pipe; -+ int vready_offset = pipe->pipe_dlg_param.vready_offset; -+ -+ /* Always use the largest vready_offset of all connected pipes */ -+ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { -+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) -+ vready_offset = other_pipe->pipe_dlg_param.vready_offset; -+ } -+ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { -+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) -+ vready_offset = other_pipe->pipe_dlg_param.vready_offset; -+ } -+ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { -+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) -+ vready_offset = other_pipe->pipe_dlg_param.vready_offset; -+ } -+ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { -+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) -+ vready_offset = other_pipe->pipe_dlg_param.vready_offset; -+ } -+ -+ return vready_offset; -+} -+ - enum dc_status dcn10_enable_stream_timing( - struct pipe_ctx *pipe_ctx, - struct dc_state *context, -@@ -838,7 +864,7 @@ enum dc_status dcn10_enable_stream_timing( - pipe_ctx->stream_res.tg->funcs->program_timing( - pipe_ctx->stream_res.tg, - &stream->timing, -- pipe_ctx->pipe_dlg_param.vready_offset, -+ calculate_vready_offset_for_group(pipe_ctx), - pipe_ctx->pipe_dlg_param.vstartup_start, - pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width, -@@ -1052,9 +1078,13 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) - - void dcn10_verify_allow_pstate_change_high(struct dc *dc) - { -+ struct hubbub *hubbub = dc->res_pool->hubbub; - static bool should_log_hw_state; /* prevent hw state log by default */ - -- if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) { -+ if (!hubbub->funcs->verify_allow_pstate_change_high) -+ return; -+ -+ if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) { - int i = 0; - - if (should_log_hw_state) -@@ -1063,8 +1093,8 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) - TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); - BREAK_TO_DEBUGGER(); - if (dcn10_hw_wa_force_recovery(dc)) { -- /*check again*/ -- if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) -+ /*check again*/ -+ if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) - BREAK_TO_DEBUGGER(); - } - } -@@ -1435,6 +1465,9 @@ void dcn10_init_hw(struct dc *dc) - } - } - -+ if (hws->funcs.enable_power_gating_plane) -+ hws->funcs.enable_power_gating_plane(dc->hwseq, true); -+ - /* If taking control over from VBIOS, we may want to optimize our first - * mode set, so we need to skip powering down pipes until we know which - * pipes we want to use. -@@ -1487,8 +1520,6 @@ void dcn10_init_hw(struct dc *dc) - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } -- if (hws->funcs.enable_power_gating_plane) -- hws->funcs.enable_power_gating_plane(dc->hwseq, true); - - if (dc->clk_mgr->funcs->notify_wm_ranges) - dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); -@@ -1522,7 +1553,7 @@ void dcn10_power_down_on_boot(struct dc *dc) - for (i = 0; i < dc->link_count; i++) { - struct dc_link *link = dc->links[i]; - -- if (link->link_enc->funcs->is_dig_enabled && -+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled && - link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); -@@ -2455,14 +2486,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) - struct mpc *mpc = dc->res_pool->mpc; - struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - -- if (per_pixel_alpha) -- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; -- else -- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; -- - blnd_cfg.overlap_only = false; - blnd_cfg.global_gain = 0xff; - -+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { -+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; -+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; -+ } else if (per_pixel_alpha) { -+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; -+ } else { -+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; -+ } -+ - if (pipe_ctx->plane_state->global_alpha) - blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; - else -@@ -2767,7 +2802,7 @@ void dcn10_program_pipe( - - pipe_ctx->stream_res.tg->funcs->program_global_sync( - pipe_ctx->stream_res.tg, -- pipe_ctx->pipe_dlg_param.vready_offset, -+ calculate_vready_offset_for_group(pipe_ctx), - pipe_ctx->pipe_dlg_param.vstartup_start, - pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); -@@ -3107,7 +3142,9 @@ void dcn10_wait_for_mpcc_disconnect( - if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { - struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); - -- res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); -+ if (pipe_ctx->stream_res.tg && -+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) -+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); - pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; - hubp->funcs->set_blank(hubp, true); - } -@@ -3176,13 +3213,11 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) - - static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) - { -- struct pipe_ctx *test_pipe; -+ struct pipe_ctx *test_pipe, *split_pipe; - const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; -- const struct rect *r1 = &scl_data->recout, *r2; -- int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; -+ struct rect r1 = scl_data->recout, r2, r2_half; -+ int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b; - int cur_layer = pipe_ctx->plane_state->layer_index; -- bool upper_pipe_exists = false; -- struct fixed31_32 one = dc_fixpt_from_int(1); - - /** - * Disable the cursor if there's another pipe above this with a -@@ -3191,26 +3226,35 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) - */ - for (test_pipe = pipe_ctx->top_pipe; test_pipe; - test_pipe = test_pipe->top_pipe) { -- if (!test_pipe->plane_state->visible) -+ // Skip invisible layer and pipe-split plane on same layer -+ if (!test_pipe->plane_state || -+ !test_pipe->plane_state->visible || -+ test_pipe->plane_state->layer_index == cur_layer) - continue; - -- r2 = &test_pipe->plane_res.scl_data.recout; -- r2_r = r2->x + r2->width; -- r2_b = r2->y + r2->height; -+ r2 = test_pipe->plane_res.scl_data.recout; -+ r2_r = r2.x + r2.width; -+ r2_b = r2.y + r2.height; -+ split_pipe = test_pipe; - -- if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b) -- return true; -+ /** -+ * There is another half plane on same layer because of -+ * pipe-split, merge together per same height. -+ */ -+ for (split_pipe = pipe_ctx->top_pipe; split_pipe; -+ split_pipe = split_pipe->top_pipe) -+ if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { -+ r2_half = split_pipe->plane_res.scl_data.recout; -+ r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x; -+ r2.width = r2.width + r2_half.width; -+ r2_r = r2.x + r2.width; -+ break; -+ } - -- if (test_pipe->plane_state->layer_index < cur_layer) -- upper_pipe_exists = true; -+ if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b) -+ return true; - } - -- // if plane scaled, assume an upper plane can handle cursor if it exists. -- if (upper_pipe_exists && -- (scl_data->ratios.horz.value != one.value || -- scl_data->ratios.vert.value != one.value)) -- return true; -- - return false; - } - -@@ -3508,7 +3552,7 @@ void dcn10_calc_vupdate_position( - { - const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; - int vline_int_offset_from_vupdate = -- pipe_ctx->stream->periodic_interrupt0.lines_offset; -+ pipe_ctx->stream->periodic_interrupt.lines_offset; - int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); - int start_position; - -@@ -3533,18 +3577,10 @@ void dcn10_calc_vupdate_position( - static void dcn10_cal_vline_position( - struct dc *dc, - struct pipe_ctx *pipe_ctx, -- enum vline_select vline, - uint32_t *start_line, - uint32_t *end_line) - { -- enum vertical_interrupt_ref_point ref_point = INVALID_POINT; -- -- if (vline == VLINE0) -- ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point; -- else if (vline == VLINE1) -- ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point; -- -- switch (ref_point) { -+ switch (pipe_ctx->stream->periodic_interrupt.ref_point) { - case START_V_UPDATE: - dcn10_calc_vupdate_position( - dc, -@@ -3553,7 +3589,9 @@ static void dcn10_cal_vline_position( - end_line); - break; - case START_V_SYNC: -- // Suppose to do nothing because vsync is 0; -+ // vsync is line 0 so start_line is just the requested line offset -+ *start_line = pipe_ctx->stream->periodic_interrupt.lines_offset; -+ *end_line = *start_line + 2; - break; - default: - ASSERT(0); -@@ -3563,24 +3601,15 @@ static void dcn10_cal_vline_position( - - void dcn10_setup_periodic_interrupt( - struct dc *dc, -- struct pipe_ctx *pipe_ctx, -- enum vline_select vline) -+ struct pipe_ctx *pipe_ctx) - { - struct timing_generator *tg = pipe_ctx->stream_res.tg; -+ uint32_t start_line = 0; -+ uint32_t end_line = 0; - -- if (vline == VLINE0) { -- uint32_t start_line = 0; -- uint32_t end_line = 0; -- -- dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line); -+ dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line); - -- tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); -- -- } else if (vline == VLINE1) { -- pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1( -- tg, -- pipe_ctx->stream->periodic_interrupt1.lines_offset); -- } -+ tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); - } - - void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) -diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h -index 9ae07c77fdc01..0ef7bf7ddb75e 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h -+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h -@@ -175,8 +175,7 @@ void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx); - void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx); - void dcn10_setup_periodic_interrupt( - struct dc *dc, -- struct pipe_ctx *pipe_ctx, -- enum vline_select vline); -+ struct pipe_ctx *pipe_ctx); - enum dc_status dcn10_set_clock(struct dc *dc, - enum dc_clock_type clock_type, - uint32_t clk_khz, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c -index 34001a30d449a..10e613ec7d24f 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c -@@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { - .get_clock = dcn10_get_clock, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, -+ .power_down = dce110_power_down, - .set_backlight_level = dce110_set_backlight_level, - .set_abm_immediate_disable = dce110_set_abm_immediate_disable, - .set_pipe = dce110_set_pipe, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c -index 11019c2c62ccb..d3681db36c30b 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c -@@ -126,6 +126,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) - while (tmp_mpcc != NULL) { - if (tmp_mpcc->dpp_id == dpp_id) - return tmp_mpcc; -+ -+ /* avoid circular linked list */ -+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot); -+ if (tmp_mpcc == tmp_mpcc->mpcc_bot) -+ break; -+ - tmp_mpcc = tmp_mpcc->mpcc_bot; - } - return NULL; -@@ -201,8 +207,9 @@ struct mpcc *mpc1_insert_plane( - /* check insert_above_mpcc exist in tree->opp_list */ - struct mpcc *temp_mpcc = tree->opp_list; - -- while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) -- temp_mpcc = temp_mpcc->mpcc_bot; -+ if (temp_mpcc != insert_above_mpcc) -+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) -+ temp_mpcc = temp_mpcc->mpcc_bot; - if (temp_mpcc == NULL) - return NULL; - } -diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c -index 37848f4577b18..92fee47278e5a 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c -@@ -480,6 +480,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable) - OTG_CLOCK_ON, 1, - 1, 1000); - } else { -+ -+ //last chance to clear underflow, otherwise, it will always there due to clock is off. -+ if (optc->funcs->is_optc_underflow_occurred(optc) == true) -+ optc->funcs->clear_optc_underflow(optc); -+ - REG_UPDATE_2(OTG_CLOCK_CONTROL, - OTG_CLOCK_GATE_DIS, 0, - OTG_CLOCK_EN, 0); -diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c -index a47ba1d45be92..bf2a8f53694b4 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c -@@ -1513,6 +1513,7 @@ static void dcn20_update_dchubp_dpp( - /* Any updates are handled in dc interface, just need - * to apply existing for plane enable / opp change */ - if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed -+ || pipe_ctx->update_flags.bits.plane_changed - || pipe_ctx->stream->update_flags.bits.gamut_remap - || pipe_ctx->stream->update_flags.bits.out_csc) { - /* dpp/cm gamut remap*/ -@@ -1563,6 +1564,31 @@ static void dcn20_update_dchubp_dpp( - hubp->funcs->set_blank(hubp, false); - } - -+static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) -+{ -+ struct pipe_ctx *other_pipe; -+ int vready_offset = pipe->pipe_dlg_param.vready_offset; -+ -+ /* Always use the largest vready_offset of all connected pipes */ -+ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { -+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) -+ vready_offset = other_pipe->pipe_dlg_param.vready_offset; -+ } -+ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { -+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) -+ vready_offset = other_pipe->pipe_dlg_param.vready_offset; -+ } -+ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { -+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) -+ vready_offset = other_pipe->pipe_dlg_param.vready_offset; -+ } -+ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { -+ if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) -+ vready_offset = other_pipe->pipe_dlg_param.vready_offset; -+ } -+ -+ return vready_offset; -+} - - static void dcn20_program_pipe( - struct dc *dc, -@@ -1581,7 +1607,7 @@ static void dcn20_program_pipe( - - pipe_ctx->stream_res.tg->funcs->program_global_sync( - pipe_ctx->stream_res.tg, -- pipe_ctx->pipe_dlg_param.vready_offset, -+ calculate_vready_offset_for_group(pipe_ctx), - pipe_ctx->pipe_dlg_param.vstartup_start, - pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); -@@ -1764,7 +1790,7 @@ void dcn20_post_unlock_program_front_end( - - for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 - && hubp->funcs->hubp_is_flip_pending(hubp); j++) -- mdelay(1); -+ udelay(1); - } - } - -@@ -1874,7 +1900,7 @@ bool dcn20_update_bandwidth( - - pipe_ctx->stream_res.tg->funcs->program_global_sync( - pipe_ctx->stream_res.tg, -- pipe_ctx->pipe_dlg_param.vready_offset, -+ calculate_vready_offset_for_group(pipe_ctx), - pipe_ctx->pipe_dlg_param.vstartup_start, - pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); -@@ -2297,14 +2323,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) - struct mpc *mpc = dc->res_pool->mpc; - struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - -- if (per_pixel_alpha) -- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; -- else -- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; -- - blnd_cfg.overlap_only = false; - blnd_cfg.global_gain = 0xff; - -+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { -+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; -+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; -+ } else if (per_pixel_alpha) { -+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; -+ } else { -+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; -+ } -+ - if (pipe_ctx->plane_state->global_alpha) - blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; - else -diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c -index 947eb0df3f125..142fc0a3a536c 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c -@@ -532,6 +532,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) - while (tmp_mpcc != NULL) { - if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id) - return tmp_mpcc; -+ -+ /* avoid circular linked list */ -+ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot); -+ if (tmp_mpcc == tmp_mpcc->mpcc_bot) -+ break; -+ - tmp_mpcc = tmp_mpcc->mpcc_bot; - } - return NULL; -diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c -index e3e01b17c164e..ede11eb120d4f 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c -@@ -1854,7 +1854,9 @@ static void swizzle_to_dml_params( - case DC_SW_VAR_D_X: - *sw_mode = dm_sw_var_d_x; - break; -- -+ case DC_SW_VAR_R_X: -+ *sw_mode = dm_sw_var_r_x; -+ break; - default: - ASSERT(0); /* Not supported */ - break; -@@ -3152,7 +3154,7 @@ void dcn20_calculate_dlg_params( - - context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, - &context->res_ctx.pipe_ctx[i].rq_regs, -- pipes[pipe_idx].pipe); -+ &pipes[pipe_idx].pipe); - pipe_idx++; - } - } -@@ -3668,16 +3670,22 @@ static bool init_soc_bounding_box(struct dc *dc, - clock_limits_available = (status == PP_SMU_RESULT_OK); - } - -- if (clock_limits_available && uclk_states_available && num_states) -+ if (clock_limits_available && uclk_states_available && num_states) { -+ DC_FP_START(); - dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); -- else if (clock_limits_available) -+ DC_FP_END(); -+ } else if (clock_limits_available) { -+ DC_FP_START(); - dcn20_cap_soc_clocks(loaded_bb, max_clocks); -+ DC_FP_END(); -+ } - } - - loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; - loaded_ip->max_num_dpp = pool->base.pipe_count; -+ DC_FP_START(); - dcn20_patch_bounding_box(dc, loaded_bb); -- -+ DC_FP_END(); - return true; - } - -@@ -3697,8 +3705,6 @@ static bool dcn20_resource_construct( - enum dml_project dml_project_version = - get_dml_project_version(ctx->asic_id.hw_internal_rev); - -- DC_FP_START(); -- - ctx->dc_bios->regs = &bios_regs; - pool->base.funcs = &dcn20_res_pool_funcs; - -@@ -4047,12 +4053,10 @@ static bool dcn20_resource_construct( - pool->base.oem_device = NULL; - } - -- DC_FP_END(); - return true; - - create_fail: - -- DC_FP_END(); - dcn20_resource_destruct(pool); - - return false; -diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c -index 36044cb8ec834..1c0f56d8ba8bb 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c -@@ -67,9 +67,15 @@ static uint32_t convert_and_clamp( - void dcn21_dchvm_init(struct hubbub *hubbub) - { - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); -- uint32_t riommu_active; -+ uint32_t riommu_active, prefetch_done; - int i; - -+ REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done); -+ -+ if (prefetch_done) { -+ hubbub->riommu_active = true; -+ return; -+ } - //Init DCHVM block - REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); - -diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c -index fbbdf99761838..5b8274b8c3845 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c -@@ -1428,6 +1428,7 @@ static struct clock_source *dcn21_clock_source_create( - return &clk_src->base; - } - -+ kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; - } -diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c -index 23a52d47e61c4..0601c17426af2 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c -@@ -355,8 +355,11 @@ void dpp3_set_cursor_attributes( - int cur_rom_en = 0; - - if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || -- color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) -- cur_rom_en = 1; -+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { -+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { -+ cur_rom_en = 1; -+ } -+ } - - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c -index f4414de96acc5..152c9c5733f1c 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c -@@ -448,6 +448,7 @@ static const struct hubbub_funcs hubbub30_funcs = { - .program_watermarks = hubbub3_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, -+ .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, - .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, - .force_pstate_change_control = hubbub3_force_pstate_change_control, - .init_watermarks = hubbub3_init_watermarks, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c -index f246125232482..33c2337c4edf3 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c -@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr( - VMID, address->vmid); - - if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) { -- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1); -+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0); - REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1); - - } else { -diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c -index fafed1e4a998d..f834573758113 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c -@@ -570,6 +570,9 @@ void dcn30_init_hw(struct dc *dc) - } - } - -+ if (hws->funcs.enable_power_gating_plane) -+ hws->funcs.enable_power_gating_plane(dc->hwseq, true); -+ - /* If taking control over from VBIOS, we may want to optimize our first - * mode set, so we need to skip powering down pipes until we know which - * pipes we want to use. -@@ -647,8 +650,6 @@ void dcn30_init_hw(struct dc *dc) - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } -- if (hws->funcs.enable_power_gating_plane) -- hws->funcs.enable_power_gating_plane(dc->hwseq, true); - - if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) - dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); -@@ -1002,7 +1003,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, - /* turning off DPG */ - pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false); - for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) -- mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false); -+ if (mpcc_pipe->plane_res.hubp) -+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false); - - stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space, - color_depth, solid_color, width, height, offset); -diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c -index a0de309475a97..735c92a5aa36a 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c -@@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = { - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = true, -- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, -+ .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, -@@ -1856,7 +1856,7 @@ static struct pipe_ctx *dcn30_find_split_pipe( - return pipe; - } - --static noinline bool dcn30_internal_validate_bw( -+noinline bool dcn30_internal_validate_bw( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, -@@ -1879,7 +1879,6 @@ static noinline bool dcn30_internal_validate_bw( - dc->res_pool->funcs->update_soc_for_wm_a(dc, context); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - -- DC_FP_START(); - if (!pipe_cnt) { - out = true; - goto validate_out; -@@ -2103,7 +2102,6 @@ validate_fail: - out = false; - - validate_out: -- DC_FP_END(); - return out; - } - -@@ -2304,7 +2302,9 @@ bool dcn30_validate_bandwidth(struct dc *dc, - - BW_VAL_TRACE_COUNT(); - -+ DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); -+ DC_FP_END(); - - if (pipe_cnt == 0) - goto validate_out; -diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h -index b754b89beadfb..b92e4cc0232f2 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h -+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h -@@ -55,6 +55,13 @@ unsigned int dcn30_calc_max_scaled_time( - - bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); -+bool dcn30_internal_validate_bw( -+ struct dc *dc, -+ struct dc_state *context, -+ display_e2e_pipe_params_st *pipes, -+ int *pipe_cnt_out, -+ int *vlevel_out, -+ bool fast_validate); - void dcn30_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c -index 1e3bd2e9cdcc4..a046664e20316 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c -@@ -60,6 +60,7 @@ static const struct hubbub_funcs hubbub301_funcs = { - .program_watermarks = hubbub3_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, -+ .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, - .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, - .force_pstate_change_control = hubbub3_force_pstate_change_control, - .hubbub_read_state = hubbub2_read_state, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c -index 912285fdce18e..dea358b01791c 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c -@@ -863,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = { - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, -- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, -+ .pipe_split_policy = MPC_SPLIT_AVOID, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, -@@ -1622,12 +1622,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b - dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); - } - -+static void calculate_wm_set_for_vlevel( -+ int vlevel, -+ struct wm_range_table_entry *table_entry, -+ struct dcn_watermarks *wm_set, -+ struct display_mode_lib *dml, -+ display_e2e_pipe_params_st *pipes, -+ int pipe_cnt) -+{ -+ double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; -+ -+ ASSERT(vlevel < dml->soc.num_states); -+ /* only pipe 0 is read for voltage and dcf/soc clocks */ -+ pipes[0].clks_cfg.voltage = vlevel; -+ pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; -+ pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; -+ -+ dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; -+ dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; -+ dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; -+ -+ wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; -+ wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; -+ wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; -+ wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; -+ wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; -+ wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; -+ wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; -+ wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; -+ dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; -+ -+} -+ -+static void dcn301_calculate_wm_and_dlg( -+ struct dc *dc, struct dc_state *context, -+ display_e2e_pipe_params_st *pipes, -+ int pipe_cnt, -+ int vlevel_req) -+{ -+ int i, pipe_idx; -+ int vlevel, vlevel_max; -+ struct wm_range_table_entry *table_entry; -+ struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; -+ -+ ASSERT(bw_params); -+ -+ vlevel_max = bw_params->clk_table.num_entries - 1; -+ -+ /* WM Set D */ -+ table_entry = &bw_params->wm_table.entries[WM_D]; -+ if (table_entry->wm_type == WM_TYPE_RETRAINING) -+ vlevel = 0; -+ else -+ vlevel = vlevel_max; -+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, -+ &context->bw_ctx.dml, pipes, pipe_cnt); -+ /* WM Set C */ -+ table_entry = &bw_params->wm_table.entries[WM_C]; -+ vlevel = min(max(vlevel_req, 2), vlevel_max); -+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, -+ &context->bw_ctx.dml, pipes, pipe_cnt); -+ /* WM Set B */ -+ table_entry = &bw_params->wm_table.entries[WM_B]; -+ vlevel = min(max(vlevel_req, 1), vlevel_max); -+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, -+ &context->bw_ctx.dml, pipes, pipe_cnt); -+ -+ /* WM Set A */ -+ table_entry = &bw_params->wm_table.entries[WM_A]; -+ vlevel = min(vlevel_req, vlevel_max); -+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, -+ &context->bw_ctx.dml, pipes, pipe_cnt); -+ -+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { -+ if (!context->res_ctx.pipe_ctx[i].stream) -+ continue; -+ -+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); -+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); -+ -+ if (dc->config.forced_clocks) { -+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; -+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; -+ } -+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) -+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; -+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) -+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; -+ -+ pipe_idx++; -+ } -+ -+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); -+} -+ - static struct resource_funcs dcn301_res_pool_funcs = { - .destroy = dcn301_destroy_resource_pool, - .link_enc_create = dcn301_link_encoder_create, - .panel_cntl_create = dcn301_panel_cntl_create, - .validate_bandwidth = dcn30_validate_bandwidth, -- .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, -+ .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, - .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c -index 7d3ff5d444023..2292bb82026e2 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c -@@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = { - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = true, -- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, -+ .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c -index dd38796ba30ad..9d9b0d343c6b3 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c -@@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = { - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = true, -- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, -+ .pipe_split_policy = MPC_SPLIT_AVOID, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, -@@ -500,7 +500,7 @@ static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ -- if (eng_id <= ENGINE_ID_DIGE) { -+ if (eng_id <= ENGINE_ID_DIGB) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else -@@ -1344,6 +1344,20 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param - dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz; - dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz; - } -+ -+ // WA: patch strobe modes to compensate for DCN303 BW issue -+ if (dcn3_03_soc.num_chans <= 4) { -+ for (i = 0; i < dcn3_03_soc.num_states; i++) { -+ if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700) -+ break; -+ -+ if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) { -+ dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100; -+ dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100; -+ } -+ } -+ } -+ - /* re-init DML with updated bb */ - dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); - if (dc->current_state) -diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c -index b0892443fbd57..c7c27a605f159 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c -@@ -168,9 +168,7 @@ void enc31_hw_init(struct link_encoder *enc) - AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3 - AUX_RX_DETECTION_THRESHOLD [30:28] = 1 - */ -- AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); -- -- AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); -+ // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init - - //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; - // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk -diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c -index 90c73a1cb9861..208d2dc8b1d1a 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c -@@ -24,6 +24,7 @@ - */ - - -+#include - #include "dcn30/dcn30_hubbub.h" - #include "dcn31_hubbub.h" - #include "dm_services.h" -@@ -138,8 +139,11 @@ static uint32_t convert_and_clamp( - ret_val = wm_ns * refclk_mhz; - ret_val /= 1000; - -- if (ret_val > clamp_value) -+ if (ret_val > clamp_value) { -+ /* clamping WMs is abnormal, unexpected and may lead to underflow*/ -+ ASSERT(0); - ret_val = clamp_value; -+ } - - return ret_val; - } -@@ -159,7 +163,7 @@ static bool hubbub31_program_urgent_watermarks( - if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { - hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); - -@@ -193,7 +197,7 @@ static bool hubbub31_program_urgent_watermarks( - if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { - hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); - } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) -@@ -203,7 +207,7 @@ static bool hubbub31_program_urgent_watermarks( - if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { - hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); - -@@ -237,7 +241,7 @@ static bool hubbub31_program_urgent_watermarks( - if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { - hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); - } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) -@@ -247,7 +251,7 @@ static bool hubbub31_program_urgent_watermarks( - if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { - hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); - -@@ -281,7 +285,7 @@ static bool hubbub31_program_urgent_watermarks( - if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { - hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); - } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) -@@ -291,7 +295,7 @@ static bool hubbub31_program_urgent_watermarks( - if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { - hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); - -@@ -325,7 +329,7 @@ static bool hubbub31_program_urgent_watermarks( - if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { - hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); - } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) -@@ -351,7 +355,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" -@@ -367,7 +371,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->a.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_exit_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" -@@ -383,7 +387,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" -@@ -399,7 +403,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->a.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_exit_z8_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" -@@ -416,7 +420,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" -@@ -432,7 +436,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->b.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_exit_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" -@@ -448,7 +452,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" -@@ -464,7 +468,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->b.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_exit_z8_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" -@@ -481,7 +485,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" -@@ -497,7 +501,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->c.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_exit_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" -@@ -513,7 +517,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" -@@ -529,7 +533,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->c.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_exit_z8_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" -@@ -546,7 +550,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" -@@ -562,7 +566,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->d.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_exit_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" -@@ -578,7 +582,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" -@@ -594,7 +598,7 @@ static bool hubbub31_program_stutter_watermarks( - watermarks->d.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_exit_z8_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" -@@ -625,7 +629,7 @@ static bool hubbub31_program_pstate_watermarks( - watermarks->a.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.pstate_change_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" -@@ -642,7 +646,7 @@ static bool hubbub31_program_pstate_watermarks( - watermarks->b.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.pstate_change_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" -@@ -659,7 +663,7 @@ static bool hubbub31_program_pstate_watermarks( - watermarks->c.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.pstate_change_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" -@@ -676,7 +680,7 @@ static bool hubbub31_program_pstate_watermarks( - watermarks->d.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.pstate_change_ns, -- refclk_mhz, 0x1fffff); -+ refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" -@@ -946,6 +950,65 @@ static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub, - } - } - -+static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) -+{ -+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); -+ -+ /* -+ * Pstate latency is ~20us so if we wait over 40us and pstate allow -+ * still not asserted, we are probably stuck and going to hang -+ */ -+ const unsigned int pstate_wait_timeout_us = 100; -+ const unsigned int pstate_wait_expected_timeout_us = 40; -+ -+ static unsigned int max_sampled_pstate_wait_us; /* data collection */ -+ static bool forced_pstate_allow; /* help with revert wa */ -+ -+ unsigned int debug_data = 0; -+ unsigned int i; -+ -+ if (forced_pstate_allow) { -+ /* we hacked to force pstate allow to prevent hang last time -+ * we verify_allow_pstate_change_high. so disable force -+ * here so we can check status -+ */ -+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, -+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, -+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); -+ forced_pstate_allow = false; -+ } -+ -+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate); -+ -+ for (i = 0; i < pstate_wait_timeout_us; i++) { -+ debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); -+ -+ /* Debug bit is specific to ASIC. */ -+ if (debug_data & (1 << 26)) { -+ if (i > pstate_wait_expected_timeout_us) -+ DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i); -+ return true; -+ } -+ if (max_sampled_pstate_wait_us < i) -+ max_sampled_pstate_wait_us = i; -+ -+ udelay(1); -+ } -+ -+ /* force pstate allow to prevent system hang -+ * and break to debugger to investigate -+ */ -+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, -+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, -+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); -+ forced_pstate_allow = true; -+ -+ DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", -+ debug_data); -+ -+ return false; -+} -+ - static const struct hubbub_funcs hubbub31_funcs = { - .update_dchub = hubbub2_update_dchub, - .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, -@@ -958,6 +1021,7 @@ static const struct hubbub_funcs hubbub31_funcs = { - .program_watermarks = hubbub31_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, -+ .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, - .program_det_size = dcn31_program_det_size, - .program_compbuf_size = dcn31_program_compbuf_size, - .init_crb = dcn31_init_crb, -@@ -979,5 +1043,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31, - hubbub31->detile_buf_size = det_size_kb * 1024; - hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024; - hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB; -+ -+ hubbub31->debug_test_index_pstate = 0x6; - } - -diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c -index 53b792b997b7e..127055044cf1a 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c -@@ -79,6 +79,7 @@ static struct hubp_funcs dcn31_hubp_funcs = { - .hubp_init = hubp3_init, - .set_unbounded_requesting = hubp31_set_unbounded_requesting, - .hubp_soft_reset = hubp31_soft_reset, -+ .hubp_set_flip_int = hubp1_set_flip_int, - .hubp_in_blank = hubp1_in_blank, - }; - -diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c -index 3afa1159a5f7d..b72d080b302a1 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c -@@ -204,6 +204,9 @@ void dcn31_init_hw(struct dc *dc) - } - } - -+ if (hws->funcs.enable_power_gating_plane) -+ hws->funcs.enable_power_gating_plane(dc->hwseq, true); -+ - /* If taking control over from VBIOS, we may want to optimize our first - * mode set, so we need to skip powering down pipes until we know which - * pipes we want to use. -@@ -287,8 +290,6 @@ void dcn31_init_hw(struct dc *dc) - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } -- if (hws->funcs.enable_power_gating_plane) -- hws->funcs.enable_power_gating_plane(dc->hwseq, true); - - if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) - dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); -diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c -index 40011cd3c8ef0..4e9fe090b770a 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c -@@ -100,6 +100,8 @@ static const struct hw_sequencer_funcs dcn31_funcs = { - .z10_save_init = dcn31_z10_save_init, - .is_abm_supported = dcn31_is_abm_supported, - .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, -+ .optimize_pwr_state = dcn21_optimize_pwr_state, -+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn20_update_visual_confirm_color, - }; - -diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c -index 79e92ecca96c1..e224c52132581 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c -+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c -@@ -352,6 +352,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(3, D), - clk_src_regs(4, E) - }; -+/*pll_id being rempped in dmub, in driver it is logical instance*/ -+static const struct dce110_clk_src_regs clk_src_regs_b0[] = { -+ clk_src_regs(0, A), -+ clk_src_regs(1, B), -+ clk_src_regs(2, F), -+ clk_src_regs(3, G), -+ clk_src_regs(4, E) -+}; - - static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -@@ -462,7 +470,8 @@ static const struct dcn30_afmt_mask afmt_mask = { - SE_DCN3_REG_LIST(id)\ - } - --static const struct dcn10_stream_enc_registers stream_enc_regs[] = { -+/* Some encoders won't be initialized here - but they're logical, not physical. */ -+static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), -@@ -923,7 +932,7 @@ static const struct dc_debug_options debug_defaults_drv = { - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = false, -- .pipe_split_policy = MPC_SPLIT_AVOID, -+ .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, -@@ -931,7 +940,7 @@ static const struct dc_debug_options debug_defaults_drv = { - .max_downscale_src_width = 4096,/*upto true 4K*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, -- .sanity_checks = false, -+ .sanity_checks = true, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, -@@ -948,6 +957,7 @@ static const struct dc_debug_options debug_defaults_drv = { - .optc = false, - } - }, -+ .disable_z10 = true, - .optimize_edp_link_rate = true, - .enable_sw_cntl_psr = true, - }; -@@ -1284,12 +1294,6 @@ static struct stream_encoder *dcn31_stream_encoder_create( - if (!enc1 || !vpg || !afmt) - return NULL; - -- if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && -- ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { -- if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD)) -- eng_id = eng_id + 3; // For B0 only. C->F, D->G. -- } -- - dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], -@@ -1660,6 +1664,15 @@ static void dcn31_calculate_wm_and_dlg_fp( - if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) - dcfclk = context->bw_ctx.dml.soc.min_dcfclk; - -+ /* We don't recalculate clocks for 0 pipe configs, which can block -+ * S0i3 as high clocks will block low power states -+ * Override any clocks that can block S0i3 to min here -+ */ -+ if (pipe_cnt == 0) { -+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0 -+ return; -+ } -+ - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = dcfclk; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; -@@ -1785,6 +1798,60 @@ static void dcn31_calculate_wm_and_dlg( - DC_FP_END(); - } - -+bool dcn31_validate_bandwidth(struct dc *dc, -+ struct dc_state *context, -+ bool fast_validate) -+{ -+ bool out = false; -+ -+ BW_VAL_TRACE_SETUP(); -+ -+ int vlevel = 0; -+ int pipe_cnt = 0; -+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); -+ DC_LOGGER_INIT(dc->ctx->logger); -+ -+ BW_VAL_TRACE_COUNT(); -+ -+ DC_FP_START(); -+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); -+ DC_FP_END(); -+ -+ // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg -+ if (pipe_cnt == 0) -+ fast_validate = false; -+ -+ if (!out) -+ goto validate_fail; -+ -+ BW_VAL_TRACE_END_VOLTAGE_LEVEL(); -+ -+ if (fast_validate) { -+ BW_VAL_TRACE_SKIP(fast); -+ goto validate_out; -+ } -+ -+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); -+ -+ BW_VAL_TRACE_END_WATERMARKS(); -+ -+ goto validate_out; -+ -+validate_fail: -+ DC_LOG_WARNING("Mode Validation Warning: %s failed alidation.\n", -+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); -+ -+ BW_VAL_TRACE_SKIP(fail); -+ out = false; -+ -+validate_out: -+ kfree(pipes); -+ -+ BW_VAL_TRACE_FINISH(); -+ -+ return out; -+} -+ - static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap - }; -@@ -1867,7 +1934,7 @@ static struct resource_funcs dcn31_res_pool_funcs = { - .link_encs_assign = link_enc_cfg_link_encs_assign, - .link_enc_unassign = link_enc_cfg_link_enc_unassign, - .panel_cntl_create = dcn31_panel_cntl_create, -- .validate_bandwidth = dcn30_validate_bandwidth, -+ .validate_bandwidth = dcn31_validate_bandwidth, - .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, - .populate_dml_pipes = dcn31_populate_dml_pipes_from_context, -@@ -2019,14 +2086,27 @@ static bool dcn31_resource_construct( - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); -- pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = -+ /*move phypllx_pixclk_resync to dmub next*/ -+ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { -+ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = -+ dcn30_clock_source_create(ctx, ctx->dc_bios, -+ CLOCK_SOURCE_COMBO_PHY_PLL2, -+ &clk_src_regs_b0[2], false); -+ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = -+ dcn30_clock_source_create(ctx, ctx->dc_bios, -+ CLOCK_SOURCE_COMBO_PHY_PLL3, -+ &clk_src_regs_b0[3], false); -+ } else { -+ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); -- pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = -+ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); -+ } -+ - pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, -diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h -index 93571c9769967..cc4bed675588c 100644 ---- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h -+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h -@@ -39,4 +39,35 @@ struct resource_pool *dcn31_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -+/*temp: B0 specific before switch to dcn313 headers*/ -+#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL -+#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e -+#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1 -+#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f -+#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1 -+ -+//PHYPLLF_PIXCLK_RESYNC_CNTL -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8 -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L -+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L -+ -+//PHYPLLG_PIXCLK_RESYNC_CNTL -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8 -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L -+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L -+#endif - #endif /* _DCN31_RESOURCE_H_ */ -diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile -index 56055df2e8d2e..9009b92490f34 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/Makefile -+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile -@@ -70,6 +70,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram - CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) - CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag) - CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags) -+CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags) - CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags) -@@ -84,6 +85,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_rcfla - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_rcflags) - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcflags) - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags) -+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags) - endif - CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) - CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) -@@ -99,6 +101,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o - DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o - DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o - DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o -+DML += dsc/rc_calc_fpu.o - endif - - AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c -index d3b5b6fedf042..6266b0788387e 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c -@@ -3897,14 +3897,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 - * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - -- locals->ODMCombineEnablePerState[i][k] = false; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - if (mode_lib->vba.ODMCapability) { - if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { -- locals->ODMCombineEnablePerState[i][k] = true; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; - } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { -- locals->ODMCombineEnablePerState[i][k] = true; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; - } - } -@@ -3957,7 +3957,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - locals->RequiredDISPCLK[i][j] = 0.0; - locals->DISPCLK_DPPCLK_Support[i][j] = true; - for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -- locals->ODMCombineEnablePerState[i][k] = false; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; - if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { - locals->NoOfDPP[i][j][k] = 1; - locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c -index 63bbdf8b8678b..0053a6d5178c9 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c -@@ -4008,17 +4008,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode - mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 - * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - -- locals->ODMCombineEnablePerState[i][k] = false; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - if (mode_lib->vba.ODMCapability) { - if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { -- locals->ODMCombineEnablePerState[i][k] = true; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; - } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { -- locals->ODMCombineEnablePerState[i][k] = true; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; - } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { -- locals->ODMCombineEnablePerState[i][k] = true; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; - } - } -@@ -4071,7 +4071,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode - locals->RequiredDISPCLK[i][j] = 0.0; - locals->DISPCLK_DPPCLK_Support[i][j] = true; - for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -- locals->ODMCombineEnablePerState[i][k] = false; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; - if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { - locals->NoOfDPP[i][j][k] = 1; - locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c -index 2091dd8c252da..8c168f348a27f 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c -@@ -768,12 +768,12 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, - - void dml20_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param) -+ const display_pipe_params_st *pipe_param) - { - display_rq_params_st rq_param = {0}; - - memset(rq_regs, 0, sizeof(*rq_regs)); -- dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src); -+ dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param->src); - extract_rq_regs(mode_lib, rq_regs, rq_param); - - print__rq_regs_st(mode_lib, *rq_regs); -@@ -1549,7 +1549,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, - void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h -index d0b90947f5409..8b23867e97c18 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h -@@ -43,7 +43,7 @@ struct display_mode_lib; - void dml20_rq_dlg_get_rq_reg( - struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param); -+ const display_pipe_params_st *pipe_param); - - - // Function: dml_rq_dlg_get_dlg_reg -@@ -61,7 +61,7 @@ void dml20_rq_dlg_get_dlg_reg( - struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c -index 1a0c14e465faa..26ececfd40cdc 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c -@@ -768,12 +768,12 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, - - void dml20v2_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param) -+ const display_pipe_params_st *pipe_param) - { - display_rq_params_st rq_param = {0}; - - memset(rq_regs, 0, sizeof(*rq_regs)); -- dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src); -+ dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param->src); - extract_rq_regs(mode_lib, rq_regs, rq_param); - - print__rq_regs_st(mode_lib, *rq_regs); -@@ -1550,7 +1550,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, - void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h -index 27cf8bed9376f..2b4e46ea1c3df 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h -@@ -43,7 +43,7 @@ struct display_mode_lib; - void dml20v2_rq_dlg_get_rq_reg( - struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param); -+ const display_pipe_params_st *pipe_param); - - - // Function: dml_rq_dlg_get_dlg_reg -@@ -61,7 +61,7 @@ void dml20v2_rq_dlg_get_dlg_reg( - struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c -index 4136eb8256cb5..26f839ce710f5 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c -@@ -3979,17 +3979,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 - * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - -- locals->ODMCombineEnablePerState[i][k] = false; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - if (mode_lib->vba.ODMCapability) { - if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { -- locals->ODMCombineEnablePerState[i][k] = true; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; - } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) { -- locals->ODMCombineEnablePerState[i][k] = true; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; - } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { -- locals->ODMCombineEnablePerState[i][k] = true; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; - } - } -@@ -4042,7 +4042,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - locals->RequiredDISPCLK[i][j] = 0.0; - locals->DISPCLK_DPPCLK_Support[i][j] = true; - for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { -- locals->ODMCombineEnablePerState[i][k] = false; -+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; - if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { - locals->NoOfDPP[i][j][k] = 1; - locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] -@@ -5218,7 +5218,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - mode_lib->vba.ODMCombineEnabled[k] = - locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k]; - } else { -- mode_lib->vba.ODMCombineEnabled[k] = false; -+ mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled; - } - mode_lib->vba.DSCEnabled[k] = - locals->RequiresDSC[mode_lib->vba.VoltageLevel][k]; -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c -index 287e31052b307..736978c4d40a1 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c -@@ -694,7 +694,7 @@ static void get_surf_rq_param( - display_data_rq_sizing_params_st *rq_sizing_param, - display_data_rq_dlg_params_st *rq_dlg_param, - display_data_rq_misc_params_st *rq_misc_param, -- const display_pipe_params_st pipe_param, -+ const display_pipe_params_st *pipe_param, - bool is_chroma) - { - bool mode_422 = false; -@@ -706,30 +706,30 @@ static void get_surf_rq_param( - - // FIXME check if ppe apply for both luma and chroma in 422 case - if (is_chroma) { -- vp_width = pipe_param.src.viewport_width_c / ppe; -- vp_height = pipe_param.src.viewport_height_c; -- data_pitch = pipe_param.src.data_pitch_c; -- meta_pitch = pipe_param.src.meta_pitch_c; -+ vp_width = pipe_param->src.viewport_width_c / ppe; -+ vp_height = pipe_param->src.viewport_height_c; -+ data_pitch = pipe_param->src.data_pitch_c; -+ meta_pitch = pipe_param->src.meta_pitch_c; - } else { -- vp_width = pipe_param.src.viewport_width / ppe; -- vp_height = pipe_param.src.viewport_height; -- data_pitch = pipe_param.src.data_pitch; -- meta_pitch = pipe_param.src.meta_pitch; -+ vp_width = pipe_param->src.viewport_width / ppe; -+ vp_height = pipe_param->src.viewport_height; -+ data_pitch = pipe_param->src.data_pitch; -+ meta_pitch = pipe_param->src.meta_pitch; - } - -- if (pipe_param.dest.odm_combine) { -+ if (pipe_param->dest.odm_combine) { - unsigned int access_dir; - unsigned int full_src_vp_width; - unsigned int hactive_half; - unsigned int src_hactive_half; -- access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed -- hactive_half = pipe_param.dest.hactive / 2; -+ access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed -+ hactive_half = pipe_param->dest.hactive / 2; - if (is_chroma) { -- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width; -- src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_half; -+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width; -+ src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_half; - } else { -- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width; -- src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio * hactive_half; -+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width; -+ src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio * hactive_half; - } - - if (access_dir == 0) { -@@ -754,7 +754,7 @@ static void get_surf_rq_param( - rq_sizing_param->meta_chunk_bytes = 2048; - rq_sizing_param->min_meta_chunk_bytes = 256; - -- if (pipe_param.src.hostvm) -+ if (pipe_param->src.hostvm) - rq_sizing_param->mpte_group_bytes = 512; - else - rq_sizing_param->mpte_group_bytes = 2048; -@@ -768,23 +768,23 @@ static void get_surf_rq_param( - vp_height, - data_pitch, - meta_pitch, -- pipe_param.src.source_format, -- pipe_param.src.sw_mode, -- pipe_param.src.macro_tile_size, -- pipe_param.src.source_scan, -- pipe_param.src.hostvm, -+ pipe_param->src.source_format, -+ pipe_param->src.sw_mode, -+ pipe_param->src.macro_tile_size, -+ pipe_param->src.source_scan, -+ pipe_param->src.hostvm, - is_chroma); - } - - static void dml_rq_dlg_get_rq_params( - struct display_mode_lib *mode_lib, - display_rq_params_st *rq_param, -- const display_pipe_params_st pipe_param) -+ const display_pipe_params_st *pipe_param) - { - // get param for luma surface -- rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 -- || pipe_param.src.source_format == dm_420_10; -- rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10; -+ rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 -+ || pipe_param->src.source_format == dm_420_10; -+ rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10; - - get_surf_rq_param( - mode_lib, -@@ -794,7 +794,7 @@ static void dml_rq_dlg_get_rq_params( - pipe_param, - 0); - -- if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) { -+ if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) { - // get param for chroma surface - get_surf_rq_param( - mode_lib, -@@ -806,14 +806,14 @@ static void dml_rq_dlg_get_rq_params( - } - - // calculate how to split the det buffer space between luma and chroma -- handle_det_buf_split(mode_lib, rq_param, pipe_param.src); -+ handle_det_buf_split(mode_lib, rq_param, pipe_param->src); - print__rq_params_st(mode_lib, *rq_param); - } - - void dml21_rq_dlg_get_rq_reg( - struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param) -+ const display_pipe_params_st *pipe_param) - { - display_rq_params_st rq_param = {0}; - -@@ -1658,7 +1658,7 @@ void dml21_rq_dlg_get_dlg_reg( - struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -@@ -1696,7 +1696,7 @@ void dml21_rq_dlg_get_dlg_reg( - // system parameter calculation done - - dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); -- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe); -+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe); - dml_rq_dlg_get_dlg_params( - mode_lib, - e2e_pipe_param, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h -index e8f7785e3fc63..af6ad0ca9cf8a 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h -@@ -44,7 +44,7 @@ struct display_mode_lib; - void dml21_rq_dlg_get_rq_reg( - struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param); -+ const display_pipe_params_st *pipe_param); - - // Function: dml_rq_dlg_get_dlg_reg - // Calculate and return DLG and TTU register struct given the system setting -@@ -61,7 +61,7 @@ void dml21_rq_dlg_get_dlg_reg( - struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c -index e3d9f1decdfc7..de0fa87b301a5 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c -@@ -1868,7 +1868,10 @@ static unsigned int CalculateVMAndRowBytes( - } - - if (SurfaceTiling == dm_sw_linear) { -- *dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1)); -+ if (PTEBufferSizeInRequests == 0) -+ *dpte_row_height = 1; -+ else -+ *dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1)); - *dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth; - *PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize; - } else if (ScanDirection != dm_vert) { -@@ -6658,8 +6661,7 @@ static double CalculateUrgentLatency( - return ret; - } - -- --static void UseMinimumDCFCLK( -+static noinline_for_stack void UseMinimumDCFCLK( - struct display_mode_lib *mode_lib, - int MaxInterDCNTileRepeaters, - int MaxPrefetchMode, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c -index 0d934fae1c3a6..2120e0941a095 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c -@@ -747,7 +747,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, - display_data_rq_sizing_params_st *rq_sizing_param, - display_data_rq_dlg_params_st *rq_dlg_param, - display_data_rq_misc_params_st *rq_misc_param, -- const display_pipe_params_st pipe_param, -+ const display_pipe_params_st *pipe_param, - bool is_chroma, - bool is_alpha) - { -@@ -761,32 +761,32 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, - - // FIXME check if ppe apply for both luma and chroma in 422 case - if (is_chroma | is_alpha) { -- vp_width = pipe_param.src.viewport_width_c / ppe; -- vp_height = pipe_param.src.viewport_height_c; -- data_pitch = pipe_param.src.data_pitch_c; -- meta_pitch = pipe_param.src.meta_pitch_c; -- surface_height = pipe_param.src.surface_height_y / 2.0; -+ vp_width = pipe_param->src.viewport_width_c / ppe; -+ vp_height = pipe_param->src.viewport_height_c; -+ data_pitch = pipe_param->src.data_pitch_c; -+ meta_pitch = pipe_param->src.meta_pitch_c; -+ surface_height = pipe_param->src.surface_height_y / 2.0; - } else { -- vp_width = pipe_param.src.viewport_width / ppe; -- vp_height = pipe_param.src.viewport_height; -- data_pitch = pipe_param.src.data_pitch; -- meta_pitch = pipe_param.src.meta_pitch; -- surface_height = pipe_param.src.surface_height_y; -+ vp_width = pipe_param->src.viewport_width / ppe; -+ vp_height = pipe_param->src.viewport_height; -+ data_pitch = pipe_param->src.data_pitch; -+ meta_pitch = pipe_param->src.meta_pitch; -+ surface_height = pipe_param->src.surface_height_y; - } - -- if (pipe_param.dest.odm_combine) { -+ if (pipe_param->dest.odm_combine) { - unsigned int access_dir = 0; - unsigned int full_src_vp_width = 0; - unsigned int hactive_odm = 0; - unsigned int src_hactive_odm = 0; -- access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed -- hactive_odm = pipe_param.dest.hactive / ((unsigned int)pipe_param.dest.odm_combine*2); -+ access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed -+ hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine*2); - if (is_chroma) { -- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width; -- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm; -+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width; -+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm; - } else { -- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width; -- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm; -+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width; -+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm; - } - - if (access_dir == 0) { -@@ -815,7 +815,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, - rq_sizing_param->meta_chunk_bytes = 2048; - rq_sizing_param->min_meta_chunk_bytes = 256; - -- if (pipe_param.src.hostvm) -+ if (pipe_param->src.hostvm) - rq_sizing_param->mpte_group_bytes = 512; - else - rq_sizing_param->mpte_group_bytes = 2048; -@@ -828,28 +828,28 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, - vp_height, - data_pitch, - meta_pitch, -- pipe_param.src.source_format, -- pipe_param.src.sw_mode, -- pipe_param.src.macro_tile_size, -- pipe_param.src.source_scan, -- pipe_param.src.hostvm, -+ pipe_param->src.source_format, -+ pipe_param->src.sw_mode, -+ pipe_param->src.macro_tile_size, -+ pipe_param->src.source_scan, -+ pipe_param->src.hostvm, - is_chroma, - surface_height); - } - - static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, - display_rq_params_st *rq_param, -- const display_pipe_params_st pipe_param) -+ const display_pipe_params_st *pipe_param) - { - // get param for luma surface -- rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 -- || pipe_param.src.source_format == dm_420_10 -- || pipe_param.src.source_format == dm_rgbe_alpha -- || pipe_param.src.source_format == dm_420_12; -+ rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 -+ || pipe_param->src.source_format == dm_420_10 -+ || pipe_param->src.source_format == dm_rgbe_alpha -+ || pipe_param->src.source_format == dm_420_12; - -- rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10; -+ rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10; - -- rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha)?1:0; -+ rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha)?1:0; - - get_surf_rq_param(mode_lib, - &(rq_param->sizing.rq_l), -@@ -859,7 +859,7 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, - 0, - 0); - -- if (is_dual_plane((enum source_format_class)(pipe_param.src.source_format))) { -+ if (is_dual_plane((enum source_format_class)(pipe_param->src.source_format))) { - // get param for chroma surface - get_surf_rq_param(mode_lib, - &(rq_param->sizing.rq_c), -@@ -871,13 +871,13 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, - } - - // calculate how to split the det buffer space between luma and chroma -- handle_det_buf_split(mode_lib, rq_param, pipe_param.src); -+ handle_det_buf_split(mode_lib, rq_param, pipe_param->src); - print__rq_params_st(mode_lib, *rq_param); - } - - void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param) -+ const display_pipe_params_st *pipe_param) - { - display_rq_params_st rq_param = { 0 }; - -@@ -1831,7 +1831,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, - void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -@@ -1866,7 +1866,7 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, - // system parameter calculation done - - dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); -- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe); -+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe); - dml_rq_dlg_get_dlg_params(mode_lib, - e2e_pipe_param, - num_pipes, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h -index c04965cceff35..625e41f8d5751 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h -@@ -41,7 +41,7 @@ struct display_mode_lib; - // See also: - void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param); -+ const display_pipe_params_st *pipe_param); - - // Function: dml_rq_dlg_get_dlg_reg - // Calculate and return DLG and TTU register struct given the system setting -@@ -57,7 +57,7 @@ void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, - void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c -index d58925cff420e..aa0507e017926 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c -@@ -259,33 +259,13 @@ static void CalculateRowBandwidth( - - static void CalculateFlipSchedule( - struct display_mode_lib *mode_lib, -+ unsigned int k, - double HostVMInefficiencyFactor, - double UrgentExtraLatency, - double UrgentLatency, -- unsigned int GPUVMMaxPageTableLevels, -- bool HostVMEnable, -- unsigned int HostVMMaxNonCachedPageTableLevels, -- bool GPUVMEnable, -- double HostVMMinPageSize, - double PDEAndMetaPTEBytesPerFrame, - double MetaRowBytes, -- double DPTEBytesPerRow, -- double BandwidthAvailableForImmediateFlip, -- unsigned int TotImmediateFlipBytes, -- enum source_format_class SourcePixelFormat, -- double LineTime, -- double VRatio, -- double VRatioChroma, -- double Tno_bw, -- bool DCCEnable, -- unsigned int dpte_row_height, -- unsigned int meta_row_height, -- unsigned int dpte_row_height_chroma, -- unsigned int meta_row_height_chroma, -- double *DestinationLinesToRequestVMInImmediateFlip, -- double *DestinationLinesToRequestRowInImmediateFlip, -- double *final_flip_bw, -- bool *ImmediateFlipSupportedForPipe); -+ double DPTEBytesPerRow); - static double CalculateWriteBackDelay( - enum source_format_class WritebackPixelFormat, - double WritebackHRatio, -@@ -319,64 +299,28 @@ static void CalculateVupdateAndDynamicMetadataParameters( - static void CalculateWatermarksAndDRAMSpeedChangeSupport( - struct display_mode_lib *mode_lib, - unsigned int PrefetchMode, -- unsigned int NumberOfActivePlanes, -- unsigned int MaxLineBufferLines, -- unsigned int LineBufferSize, -- unsigned int WritebackInterfaceBufferSize, - double DCFCLK, - double ReturnBW, -- bool SynchronizedVBlank, -- unsigned int dpte_group_bytes[], -- unsigned int MetaChunkSize, - double UrgentLatency, - double ExtraLatency, -- double WritebackLatency, -- double WritebackChunkSize, - double SOCCLK, -- double DRAMClockChangeLatency, -- double SRExitTime, -- double SREnterPlusExitTime, -- double SRExitZ8Time, -- double SREnterPlusExitZ8Time, - double DCFCLKDeepSleep, - unsigned int DETBufferSizeY[], - unsigned int DETBufferSizeC[], - unsigned int SwathHeightY[], - unsigned int SwathHeightC[], -- unsigned int LBBitPerPixel[], - double SwathWidthY[], - double SwathWidthC[], -- double HRatio[], -- double HRatioChroma[], -- unsigned int vtaps[], -- unsigned int VTAPsChroma[], -- double VRatio[], -- double VRatioChroma[], -- unsigned int HTotal[], -- double PixelClock[], -- unsigned int BlendingAndTiming[], - unsigned int DPPPerPlane[], - double BytePerPixelDETY[], - double BytePerPixelDETC[], -- double DSTXAfterScaler[], -- double DSTYAfterScaler[], -- bool WritebackEnable[], -- enum source_format_class WritebackPixelFormat[], -- double WritebackDestinationWidth[], -- double WritebackDestinationHeight[], -- double WritebackSourceHeight[], - bool UnboundedRequestEnabled, - int unsigned CompressedBufferSizeInkByte, - enum clock_change_support *DRAMClockChangeSupport, -- double *UrgentWatermark, -- double *WritebackUrgentWatermark, -- double *DRAMClockChangeWatermark, -- double *WritebackDRAMClockChangeWatermark, - double *StutterExitWatermark, - double *StutterEnterPlusExitWatermark, - double *Z8StutterExitWatermark, -- double *Z8StutterEnterPlusExitWatermark, -- double *MinActiveDRAMClockChangeLatencySupported); -+ double *Z8StutterEnterPlusExitWatermark); - - static void CalculateDCFCLKDeepSleep( - struct display_mode_lib *mode_lib, -@@ -2959,33 +2903,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman - for (k = 0; k < v->NumberOfActivePlanes; ++k) { - CalculateFlipSchedule( - mode_lib, -+ k, - HostVMInefficiencyFactor, - v->UrgentExtraLatency, - v->UrgentLatency, -- v->GPUVMMaxPageTableLevels, -- v->HostVMEnable, -- v->HostVMMaxNonCachedPageTableLevels, -- v->GPUVMEnable, -- v->HostVMMinPageSize, - v->PDEAndMetaPTEBytesFrame[k], - v->MetaRowByte[k], -- v->PixelPTEBytesPerRow[k], -- v->BandwidthAvailableForImmediateFlip, -- v->TotImmediateFlipBytes, -- v->SourcePixelFormat[k], -- v->HTotal[k] / v->PixelClock[k], -- v->VRatio[k], -- v->VRatioChroma[k], -- v->Tno_bw[k], -- v->DCCEnable[k], -- v->dpte_row_height[k], -- v->meta_row_height[k], -- v->dpte_row_height_chroma[k], -- v->meta_row_height_chroma[k], -- &v->DestinationLinesToRequestVMInImmediateFlip[k], -- &v->DestinationLinesToRequestRowInImmediateFlip[k], -- &v->final_flip_bw[k], -- &v->ImmediateFlipSupportedForPipe[k]); -+ v->PixelPTEBytesPerRow[k]); - } - - v->total_dcn_read_bw_with_flip = 0.0; -@@ -3072,64 +2996,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman - CalculateWatermarksAndDRAMSpeedChangeSupport( - mode_lib, - PrefetchMode, -- v->NumberOfActivePlanes, -- v->MaxLineBufferLines, -- v->LineBufferSize, -- v->WritebackInterfaceBufferSize, - v->DCFCLK, - v->ReturnBW, -- v->SynchronizedVBlank, -- v->dpte_group_bytes, -- v->MetaChunkSize, - v->UrgentLatency, - v->UrgentExtraLatency, -- v->WritebackLatency, -- v->WritebackChunkSize, - v->SOCCLK, -- v->DRAMClockChangeLatency, -- v->SRExitTime, -- v->SREnterPlusExitTime, -- v->SRExitZ8Time, -- v->SREnterPlusExitZ8Time, - v->DCFCLKDeepSleep, - v->DETBufferSizeY, - v->DETBufferSizeC, - v->SwathHeightY, - v->SwathHeightC, -- v->LBBitPerPixel, - v->SwathWidthY, - v->SwathWidthC, -- v->HRatio, -- v->HRatioChroma, -- v->vtaps, -- v->VTAPsChroma, -- v->VRatio, -- v->VRatioChroma, -- v->HTotal, -- v->PixelClock, -- v->BlendingAndTiming, - v->DPPPerPlane, - v->BytePerPixelDETY, - v->BytePerPixelDETC, -- v->DSTXAfterScaler, -- v->DSTYAfterScaler, -- v->WritebackEnable, -- v->WritebackPixelFormat, -- v->WritebackDestinationWidth, -- v->WritebackDestinationHeight, -- v->WritebackSourceHeight, - v->UnboundedRequestEnabled, - v->CompressedBufferSizeInkByte, - &DRAMClockChangeSupport, -- &v->UrgentWatermark, -- &v->WritebackUrgentWatermark, -- &v->DRAMClockChangeWatermark, -- &v->WritebackDRAMClockChangeWatermark, - &v->StutterExitWatermark, - &v->StutterEnterPlusExitWatermark, - &v->Z8StutterExitWatermark, -- &v->Z8StutterEnterPlusExitWatermark, -- &v->MinActiveDRAMClockChangeLatencySupported); -+ &v->Z8StutterEnterPlusExitWatermark); - - for (k = 0; k < v->NumberOfActivePlanes; ++k) { - if (v->WritebackEnable[k] == true) { -@@ -3741,61 +3629,43 @@ static void CalculateRowBandwidth( - - static void CalculateFlipSchedule( - struct display_mode_lib *mode_lib, -+ unsigned int k, - double HostVMInefficiencyFactor, - double UrgentExtraLatency, - double UrgentLatency, -- unsigned int GPUVMMaxPageTableLevels, -- bool HostVMEnable, -- unsigned int HostVMMaxNonCachedPageTableLevels, -- bool GPUVMEnable, -- double HostVMMinPageSize, - double PDEAndMetaPTEBytesPerFrame, - double MetaRowBytes, -- double DPTEBytesPerRow, -- double BandwidthAvailableForImmediateFlip, -- unsigned int TotImmediateFlipBytes, -- enum source_format_class SourcePixelFormat, -- double LineTime, -- double VRatio, -- double VRatioChroma, -- double Tno_bw, -- bool DCCEnable, -- unsigned int dpte_row_height, -- unsigned int meta_row_height, -- unsigned int dpte_row_height_chroma, -- unsigned int meta_row_height_chroma, -- double *DestinationLinesToRequestVMInImmediateFlip, -- double *DestinationLinesToRequestRowInImmediateFlip, -- double *final_flip_bw, -- bool *ImmediateFlipSupportedForPipe) -+ double DPTEBytesPerRow) - { -+ struct vba_vars_st *v = &mode_lib->vba; - double min_row_time = 0.0; - unsigned int HostVMDynamicLevelsTrips; - double TimeForFetchingMetaPTEImmediateFlip; - double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; -+ double LineTime = v->HTotal[k] / v->PixelClock[k]; - -- if (GPUVMEnable == true && HostVMEnable == true) { -- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; -+ if (v->GPUVMEnable == true && v->HostVMEnable == true) { -+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels; - } else { - HostVMDynamicLevelsTrips = 0; - } - -- if (GPUVMEnable == true || DCCEnable == true) { -- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes; -+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) { -+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes; - } - -- if (GPUVMEnable == true) { -+ if (v->GPUVMEnable == true) { - TimeForFetchingMetaPTEImmediateFlip = dml_max3( -- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW, -- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), -+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW, -+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), - LineTime / 4.0); - } else { - TimeForFetchingMetaPTEImmediateFlip = 0; - } - -- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0; -- if ((GPUVMEnable == true || DCCEnable == true)) { -+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0; -+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) { - TimeForFetchingRowInVBlankImmediateFlip = dml_max3( - (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW, - UrgentLatency * (HostVMDynamicLevelsTrips + 1), -@@ -3804,54 +3674,54 @@ static void CalculateFlipSchedule( - TimeForFetchingRowInVBlankImmediateFlip = 0; - } - -- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0; -+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0; - -- if (GPUVMEnable == true) { -- *final_flip_bw = dml_max( -- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), -- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime)); -- } else if ((GPUVMEnable == true || DCCEnable == true)) { -- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime); -+ if (v->GPUVMEnable == true) { -+ v->final_flip_bw[k] = dml_max( -+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime), -+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime)); -+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) { -+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime); - } else { -- *final_flip_bw = 0; -+ v->final_flip_bw[k] = 0; - } - -- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) { -- if (GPUVMEnable == true && DCCEnable != true) { -- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma); -- } else if (GPUVMEnable != true && DCCEnable == true) { -- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma); -+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) { -+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) { -+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); -+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) { -+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); - } else { - min_row_time = dml_min4( -- dpte_row_height * LineTime / VRatio, -- meta_row_height * LineTime / VRatio, -- dpte_row_height_chroma * LineTime / VRatioChroma, -- meta_row_height_chroma * LineTime / VRatioChroma); -+ v->dpte_row_height[k] * LineTime / v->VRatio[k], -+ v->meta_row_height[k] * LineTime / v->VRatio[k], -+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k], -+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); - } - } else { -- if (GPUVMEnable == true && DCCEnable != true) { -- min_row_time = dpte_row_height * LineTime / VRatio; -- } else if (GPUVMEnable != true && DCCEnable == true) { -- min_row_time = meta_row_height * LineTime / VRatio; -+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) { -+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k]; -+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) { -+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k]; - } else { -- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio); -+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]); - } - } - -- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16 -+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16 - || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) { -- *ImmediateFlipSupportedForPipe = false; -+ v->ImmediateFlipSupportedForPipe[k] = false; - } else { -- *ImmediateFlipSupportedForPipe = true; -+ v->ImmediateFlipSupportedForPipe[k] = true; - } - - #ifdef __DML_VBA_DEBUG__ -- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip); -- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip); -+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]); -+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]); - dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip); - dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip); - dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time); -- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe); -+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]); - #endif - - } -@@ -5477,33 +5347,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - for (k = 0; k < v->NumberOfActivePlanes; k++) { - CalculateFlipSchedule( - mode_lib, -+ k, - HostVMInefficiencyFactor, - v->ExtraLatency, - v->UrgLatency[i], -- v->GPUVMMaxPageTableLevels, -- v->HostVMEnable, -- v->HostVMMaxNonCachedPageTableLevels, -- v->GPUVMEnable, -- v->HostVMMinPageSize, - v->PDEAndMetaPTEBytesPerFrame[i][j][k], - v->MetaRowBytes[i][j][k], -- v->DPTEBytesPerRow[i][j][k], -- v->BandwidthAvailableForImmediateFlip, -- v->TotImmediateFlipBytes, -- v->SourcePixelFormat[k], -- v->HTotal[k] / v->PixelClock[k], -- v->VRatio[k], -- v->VRatioChroma[k], -- v->Tno_bw[k], -- v->DCCEnable[k], -- v->dpte_row_height[k], -- v->meta_row_height[k], -- v->dpte_row_height_chroma[k], -- v->meta_row_height_chroma[k], -- &v->DestinationLinesToRequestVMInImmediateFlip[k], -- &v->DestinationLinesToRequestRowInImmediateFlip[k], -- &v->final_flip_bw[k], -- &v->ImmediateFlipSupportedForPipe[k]); -+ v->DPTEBytesPerRow[i][j][k]); - } - v->total_dcn_read_bw_with_flip = 0.0; - for (k = 0; k < v->NumberOfActivePlanes; k++) { -@@ -5561,64 +5411,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - CalculateWatermarksAndDRAMSpeedChangeSupport( - mode_lib, - v->PrefetchModePerState[i][j], -- v->NumberOfActivePlanes, -- v->MaxLineBufferLines, -- v->LineBufferSize, -- v->WritebackInterfaceBufferSize, - v->DCFCLKState[i][j], - v->ReturnBWPerState[i][j], -- v->SynchronizedVBlank, -- v->dpte_group_bytes, -- v->MetaChunkSize, - v->UrgLatency[i], - v->ExtraLatency, -- v->WritebackLatency, -- v->WritebackChunkSize, - v->SOCCLKPerState[i], -- v->DRAMClockChangeLatency, -- v->SRExitTime, -- v->SREnterPlusExitTime, -- v->SRExitZ8Time, -- v->SREnterPlusExitZ8Time, - v->ProjectedDCFCLKDeepSleep[i][j], - v->DETBufferSizeYThisState, - v->DETBufferSizeCThisState, - v->SwathHeightYThisState, - v->SwathHeightCThisState, -- v->LBBitPerPixel, - v->SwathWidthYThisState, - v->SwathWidthCThisState, -- v->HRatio, -- v->HRatioChroma, -- v->vtaps, -- v->VTAPsChroma, -- v->VRatio, -- v->VRatioChroma, -- v->HTotal, -- v->PixelClock, -- v->BlendingAndTiming, - v->NoOfDPPThisState, - v->BytePerPixelInDETY, - v->BytePerPixelInDETC, -- v->DSTXAfterScaler, -- v->DSTYAfterScaler, -- v->WritebackEnable, -- v->WritebackPixelFormat, -- v->WritebackDestinationWidth, -- v->WritebackDestinationHeight, -- v->WritebackSourceHeight, - UnboundedRequestEnabledThisState, - CompressedBufferSizeInkByteThisState, - &v->DRAMClockChangeSupport[i][j], -- &v->UrgentWatermark, -- &v->WritebackUrgentWatermark, -- &v->DRAMClockChangeWatermark, -- &v->WritebackDRAMClockChangeWatermark, -- &dummy, - &dummy, - &dummy, - &dummy, -- &v->MinActiveDRAMClockChangeLatencySupported); -+ &dummy); - } - } - -@@ -5743,64 +5557,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - static void CalculateWatermarksAndDRAMSpeedChangeSupport( - struct display_mode_lib *mode_lib, - unsigned int PrefetchMode, -- unsigned int NumberOfActivePlanes, -- unsigned int MaxLineBufferLines, -- unsigned int LineBufferSize, -- unsigned int WritebackInterfaceBufferSize, - double DCFCLK, - double ReturnBW, -- bool SynchronizedVBlank, -- unsigned int dpte_group_bytes[], -- unsigned int MetaChunkSize, - double UrgentLatency, - double ExtraLatency, -- double WritebackLatency, -- double WritebackChunkSize, - double SOCCLK, -- double DRAMClockChangeLatency, -- double SRExitTime, -- double SREnterPlusExitTime, -- double SRExitZ8Time, -- double SREnterPlusExitZ8Time, - double DCFCLKDeepSleep, - unsigned int DETBufferSizeY[], - unsigned int DETBufferSizeC[], - unsigned int SwathHeightY[], - unsigned int SwathHeightC[], -- unsigned int LBBitPerPixel[], - double SwathWidthY[], - double SwathWidthC[], -- double HRatio[], -- double HRatioChroma[], -- unsigned int vtaps[], -- unsigned int VTAPsChroma[], -- double VRatio[], -- double VRatioChroma[], -- unsigned int HTotal[], -- double PixelClock[], -- unsigned int BlendingAndTiming[], - unsigned int DPPPerPlane[], - double BytePerPixelDETY[], - double BytePerPixelDETC[], -- double DSTXAfterScaler[], -- double DSTYAfterScaler[], -- bool WritebackEnable[], -- enum source_format_class WritebackPixelFormat[], -- double WritebackDestinationWidth[], -- double WritebackDestinationHeight[], -- double WritebackSourceHeight[], - bool UnboundedRequestEnabled, - int unsigned CompressedBufferSizeInkByte, - enum clock_change_support *DRAMClockChangeSupport, -- double *UrgentWatermark, -- double *WritebackUrgentWatermark, -- double *DRAMClockChangeWatermark, -- double *WritebackDRAMClockChangeWatermark, - double *StutterExitWatermark, - double *StutterEnterPlusExitWatermark, - double *Z8StutterExitWatermark, -- double *Z8StutterEnterPlusExitWatermark, -- double *MinActiveDRAMClockChangeLatencySupported) -+ double *Z8StutterEnterPlusExitWatermark) - { - struct vba_vars_st *v = &mode_lib->vba; - double EffectiveLBLatencyHidingY; -@@ -5820,103 +5598,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( - double TotalPixelBW = 0.0; - int k, j; - -- *UrgentWatermark = UrgentLatency + ExtraLatency; -+ v->UrgentWatermark = UrgentLatency + ExtraLatency; - - #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency); - dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency); -- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark); -+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark); - #endif - -- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark; -+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark; - - #ifdef __DML_VBA_DEBUG__ -- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency); -- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark); -+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency); -+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark); - #endif - - v->TotalActiveWriteback = 0; -- for (k = 0; k < NumberOfActivePlanes; ++k) { -- if (WritebackEnable[k] == true) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { -+ if (v->WritebackEnable[k] == true) { - v->TotalActiveWriteback = v->TotalActiveWriteback + 1; - } - } - - if (v->TotalActiveWriteback <= 1) { -- *WritebackUrgentWatermark = WritebackLatency; -+ v->WritebackUrgentWatermark = v->WritebackLatency; - } else { -- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; -+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; - } - - if (v->TotalActiveWriteback <= 1) { -- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency; -+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency; - } else { -- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; -+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; - } - -- for (k = 0; k < NumberOfActivePlanes; ++k) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { - TotalPixelBW = TotalPixelBW -- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) -- / (HTotal[k] / PixelClock[k]); -+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) -+ / (v->HTotal[k] / v->PixelClock[k]); - } - -- for (k = 0; k < NumberOfActivePlanes; ++k) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { - double EffectiveDETBufferSizeY = DETBufferSizeY[k]; - - v->LBLatencyHidingSourceLinesY = dml_min( -- (double) MaxLineBufferLines, -- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1); -+ (double) v->MaxLineBufferLines, -+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1); - - v->LBLatencyHidingSourceLinesC = dml_min( -- (double) MaxLineBufferLines, -- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1); -+ (double) v->MaxLineBufferLines, -+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1); - -- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]); -+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]); - -- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]); -+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]); - - if (UnboundedRequestEnabled) { - EffectiveDETBufferSizeY = EffectiveDETBufferSizeY -- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW; -+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW; - } - - LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k]; - LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]); -- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k]; -+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k]; - if (BytePerPixelDETC[k] > 0) { - LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k]; - LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]); -- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k]; -+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k]; - } else { - LinesInDETC = 0; - FullDETBufferingTimeC = 999999; - } - - ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY -- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark; -+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark; - -- if (NumberOfActivePlanes > 1) { -+ if (v->NumberOfActivePlanes > 1) { - ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY -- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k]; -+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k]; - } - - if (BytePerPixelDETC[k] > 0) { - ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC -- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark; -+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark; - -- if (NumberOfActivePlanes > 1) { -+ if (v->NumberOfActivePlanes > 1) { - ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC -- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k]; -+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k]; - } - v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC); - } else { - v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY; - } - -- if (WritebackEnable[k] == true) { -- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 -- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4); -- if (WritebackPixelFormat[k] == dm_444_64) { -+ if (v->WritebackEnable[k] == true) { -+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024 -+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4); -+ if (v->WritebackPixelFormat[k] == dm_444_64) { - WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2; - } - WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark; -@@ -5926,14 +5704,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( - - v->MinActiveDRAMClockChangeMargin = 999999; - PlaneWithMinActiveDRAMClockChangeMargin = 0; -- for (k = 0; k < NumberOfActivePlanes; ++k) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { - if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) { - v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k]; -- if (BlendingAndTiming[k] == k) { -+ if (v->BlendingAndTiming[k] == k) { - PlaneWithMinActiveDRAMClockChangeMargin = k; - } else { -- for (j = 0; j < NumberOfActivePlanes; ++j) { -- if (BlendingAndTiming[k] == j) { -+ for (j = 0; j < v->NumberOfActivePlanes; ++j) { -+ if (v->BlendingAndTiming[k] == j) { - PlaneWithMinActiveDRAMClockChangeMargin = j; - } - } -@@ -5941,11 +5719,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( - } - } - -- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency; -+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ; - - SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999; -- for (k = 0; k < NumberOfActivePlanes; ++k) { -- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { -+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) - && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) { - SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k]; - } -@@ -5953,25 +5731,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( - - v->TotalNumberOfActiveOTG = 0; - -- for (k = 0; k < NumberOfActivePlanes; ++k) { -- if (BlendingAndTiming[k] == k) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { -+ if (v->BlendingAndTiming[k] == k) { - v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1; - } - } - - if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) { - *DRAMClockChangeSupport = dm_dram_clock_change_vactive; -- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 -+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 - || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) { - *DRAMClockChangeSupport = dm_dram_clock_change_vblank; - } else { - *DRAMClockChangeSupport = dm_dram_clock_change_unsupported; - } - -- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep; -- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep); -- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; -- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; -+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep; -+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep); -+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; -+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; - - #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark); -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c -index c23905bc733ae..57bd4e3f8a823 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c -@@ -738,7 +738,7 @@ static void get_surf_rq_param( - display_data_rq_sizing_params_st *rq_sizing_param, - display_data_rq_dlg_params_st *rq_dlg_param, - display_data_rq_misc_params_st *rq_misc_param, -- const display_pipe_params_st pipe_param, -+ const display_pipe_params_st *pipe_param, - bool is_chroma, - bool is_alpha) - { -@@ -752,33 +752,33 @@ static void get_surf_rq_param( - - // FIXME check if ppe apply for both luma and chroma in 422 case - if (is_chroma | is_alpha) { -- vp_width = pipe_param.src.viewport_width_c / ppe; -- vp_height = pipe_param.src.viewport_height_c; -- data_pitch = pipe_param.src.data_pitch_c; -- meta_pitch = pipe_param.src.meta_pitch_c; -- surface_height = pipe_param.src.surface_height_y / 2.0; -+ vp_width = pipe_param->src.viewport_width_c / ppe; -+ vp_height = pipe_param->src.viewport_height_c; -+ data_pitch = pipe_param->src.data_pitch_c; -+ meta_pitch = pipe_param->src.meta_pitch_c; -+ surface_height = pipe_param->src.surface_height_y / 2.0; - } else { -- vp_width = pipe_param.src.viewport_width / ppe; -- vp_height = pipe_param.src.viewport_height; -- data_pitch = pipe_param.src.data_pitch; -- meta_pitch = pipe_param.src.meta_pitch; -- surface_height = pipe_param.src.surface_height_y; -+ vp_width = pipe_param->src.viewport_width / ppe; -+ vp_height = pipe_param->src.viewport_height; -+ data_pitch = pipe_param->src.data_pitch; -+ meta_pitch = pipe_param->src.meta_pitch; -+ surface_height = pipe_param->src.surface_height_y; - } - -- if (pipe_param.dest.odm_combine) { -+ if (pipe_param->dest.odm_combine) { - unsigned int access_dir; - unsigned int full_src_vp_width; - unsigned int hactive_odm; - unsigned int src_hactive_odm; - -- access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed -- hactive_odm = pipe_param.dest.hactive / ((unsigned int) pipe_param.dest.odm_combine * 2); -+ access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed -+ hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine * 2); - if (is_chroma) { -- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width; -- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm; -+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width; -+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm; - } else { -- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width; -- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm; -+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width; -+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm; - } - - if (access_dir == 0) { -@@ -808,7 +808,7 @@ static void get_surf_rq_param( - rq_sizing_param->meta_chunk_bytes = 2048; - rq_sizing_param->min_meta_chunk_bytes = 256; - -- if (pipe_param.src.hostvm) -+ if (pipe_param->src.hostvm) - rq_sizing_param->mpte_group_bytes = 512; - else - rq_sizing_param->mpte_group_bytes = 2048; -@@ -822,38 +822,38 @@ static void get_surf_rq_param( - vp_height, - data_pitch, - meta_pitch, -- pipe_param.src.source_format, -- pipe_param.src.sw_mode, -- pipe_param.src.macro_tile_size, -- pipe_param.src.source_scan, -- pipe_param.src.hostvm, -+ pipe_param->src.source_format, -+ pipe_param->src.sw_mode, -+ pipe_param->src.macro_tile_size, -+ pipe_param->src.source_scan, -+ pipe_param->src.hostvm, - is_chroma, - surface_height); - } - --static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st pipe_param) -+static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st *pipe_param) - { - // get param for luma surface -- rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 || pipe_param.src.source_format == dm_420_10 || pipe_param.src.source_format == dm_rgbe_alpha -- || pipe_param.src.source_format == dm_420_12; -+ rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 || pipe_param->src.source_format == dm_420_10 || pipe_param->src.source_format == dm_rgbe_alpha -+ || pipe_param->src.source_format == dm_420_12; - -- rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10; -+ rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10; - -- rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha) ? 1 : 0; -+ rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha) ? 1 : 0; - - get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), &(rq_param->dlg.rq_l), &(rq_param->misc.rq_l), pipe_param, 0, 0); - -- if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) { -+ if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) { - // get param for chroma surface - get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), &(rq_param->dlg.rq_c), &(rq_param->misc.rq_c), pipe_param, 1, rq_param->rgbe_alpha); - } - - // calculate how to split the det buffer space between luma and chroma -- handle_det_buf_split(mode_lib, rq_param, pipe_param.src); -+ handle_det_buf_split(mode_lib, rq_param, pipe_param->src); - print__rq_params_st(mode_lib, *rq_param); - } - --void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st pipe_param) -+void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st *pipe_param) - { - display_rq_params_st rq_param = {0}; - -@@ -1677,7 +1677,7 @@ void dml31_rq_dlg_get_dlg_reg( - struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -@@ -1704,7 +1704,7 @@ void dml31_rq_dlg_get_dlg_reg( - // system parameter calculation done - - dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); -- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe); -+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe); - dml_rq_dlg_get_dlg_params( - mode_lib, - e2e_pipe_param, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h -index adf8518f761f9..8ee991351699d 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h -@@ -41,7 +41,7 @@ struct display_mode_lib; - // See also: - void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param); -+ const display_pipe_params_st *pipe_param); - - // Function: dml_rq_dlg_get_dlg_reg - // Calculate and return DLG and TTU register struct given the system setting -@@ -57,7 +57,7 @@ void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, - void dml31_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h -index 1051ca1a23b8a..edb9f7567d6d9 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h -+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h -@@ -80,11 +80,11 @@ enum dm_swizzle_mode { - dm_sw_SPARE_13 = 24, - dm_sw_64kb_s_x = 25, - dm_sw_64kb_d_x = 26, -- dm_sw_SPARE_14 = 27, -+ dm_sw_64kb_r_x = 27, - dm_sw_SPARE_15 = 28, - dm_sw_var_s_x = 29, - dm_sw_var_d_x = 30, -- dm_sw_64kb_r_x, -+ dm_sw_var_r_x = 31, - dm_sw_gfx7_2d_thin_l_vp, - dm_sw_gfx7_2d_thin_gl, - }; -diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h -index d42a0aeca6be2..72b1957022aa2 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h -+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h -@@ -49,7 +49,7 @@ struct dml_funcs { - struct display_mode_lib *mode_lib, - display_dlg_regs_st *dlg_regs, - display_ttu_regs_st *ttu_regs, -- display_e2e_pipe_params_st *e2e_pipe_param, -+ const display_e2e_pipe_params_st *e2e_pipe_param, - const unsigned int num_pipes, - const unsigned int pipe_idx, - const bool cstate_en, -@@ -60,7 +60,7 @@ struct dml_funcs { - void (*rq_dlg_get_rq_reg)( - struct display_mode_lib *mode_lib, - display_rq_regs_st *rq_regs, -- const display_pipe_params_st pipe_param); -+ const display_pipe_params_st *pipe_param); - void (*recalculate)(struct display_mode_lib *mode_lib); - void (*validate)(struct display_mode_lib *mode_lib); - }; -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h -new file mode 100644 -index 0000000000000..e5fac9f4181d8 ---- /dev/null -+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h -@@ -0,0 +1,704 @@ -+ -+/* -+ * Copyright 2017 Advanced Micro Devices, Inc. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: AMD -+ * -+ */ -+ -+ -+const qp_table qp_table_422_10bpc_min = { -+ { 6, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 8, 9, 9, 9, 12, 16} }, -+ { 6.5, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 8, 9, 9, 9, 12, 16} }, -+ { 7, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 7, 9, 9, 9, 11, 15} }, -+ { 7.5, { 0, 2, 4, 6, 6, 6, 6, 7, 7, 7, 8, 9, 9, 11, 15} }, -+ { 8, { 0, 2, 3, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 11, 14} }, -+ { 8.5, { 0, 2, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 11, 14} }, -+ { 9, { 0, 2, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 11, 13} }, -+ { 9.5, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 11, 13} }, -+ { 10, { 0, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 11, 12} }, -+ {10.5, { 0, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 11, 12} }, -+ { 11, { 0, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11} }, -+ {11.5, { 0, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 10, 11} }, -+ { 12, { 0, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 10} }, -+ {12.5, { 0, 1, 2, 2, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10} }, -+ { 13, { 0, 1, 2, 2, 4, 4, 4, 5, 5, 6, 6, 6, 8, 8, 9} }, -+ {13.5, { 0, 1, 2, 2, 3, 4, 4, 4, 5, 6, 6, 6, 7, 8, 9} }, -+ { 14, { 0, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 8} }, -+ {14.5, { 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8} }, -+ { 15, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 6, 8} }, -+ {15.5, { 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, -+ { 16, { 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, 5, 7} }, -+ {16.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6} }, -+ { 17, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 6} }, -+ {17.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, -+ { 18, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 5} }, -+ {18.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 5} }, -+ { 19, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 4} }, -+ {19.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 4} }, -+ { 20, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 3} } -+}; -+ -+ -+const qp_table qp_table_444_8bpc_max = { -+ { 6, { 4, 6, 8, 8, 9, 9, 9, 10, 11, 12, 12, 12, 12, 13, 15} }, -+ { 6.5, { 4, 6, 7, 8, 8, 8, 9, 10, 11, 11, 12, 12, 12, 13, 15} }, -+ { 7, { 4, 5, 7, 7, 8, 8, 8, 9, 10, 11, 11, 12, 12, 13, 14} }, -+ { 7.5, { 4, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, -+ { 8, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -+ { 8.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -+ { 9, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 13} }, -+ { 9.5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 13} }, -+ { 10, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, -+ {10.5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 10, 11, 12} }, -+ { 11, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11} }, -+ {11.5, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, -+ { 12, { 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, -+ {12.5, { 2, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, -+ { 13, { 1, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8, 8, 9, 10} }, -+ {13.5, { 1, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10} }, -+ { 14, { 1, 2, 2, 3, 4, 4, 4, 5, 6, 6, 7, 8, 8, 8, 10} }, -+ {14.5, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9} }, -+ { 15, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -+ {15.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -+ { 16, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8} }, -+ {16.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8} }, -+ { 17, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 8} }, -+ {17.5, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 8} }, -+ { 18, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 7} }, -+ {18.5, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 7} }, -+ { 19, { 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6} }, -+ {19.5, { 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6} }, -+ { 20, { 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 6} }, -+ {20.5, { 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 6} }, -+ { 21, { 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, -+ {21.5, { 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, -+ { 22, { 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 5} }, -+ {22.5, { 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }, -+ { 23, { 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 4} }, -+ {23.5, { 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 4} }, -+ { 24, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4} } -+}; -+ -+ -+const qp_table qp_table_420_12bpc_max = { -+ { 4, {11, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 21, 22} }, -+ { 4.5, {10, 11, 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -+ { 5, { 9, 11, 12, 13, 14, 15, 15, 16, 17, 17, 18, 18, 19, 20, 21} }, -+ { 5.5, { 8, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, 19, 20} }, -+ { 6, { 6, 9, 11, 12, 13, 14, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, -+ { 6.5, { 6, 8, 10, 11, 11, 13, 14, 15, 15, 16, 16, 17, 17, 18, 19} }, -+ { 7, { 5, 7, 9, 10, 10, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18} }, -+ { 7.5, { 5, 7, 8, 9, 9, 11, 12, 13, 14, 14, 15, 15, 16, 16, 17} }, -+ { 8, { 4, 6, 7, 8, 8, 10, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -+ { 8.5, { 3, 6, 6, 7, 7, 10, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, -+ { 9, { 3, 5, 6, 7, 7, 10, 11, 12, 12, 13, 13, 14, 14, 14, 15} }, -+ { 9.5, { 2, 5, 6, 6, 7, 9, 10, 11, 12, 12, 13, 13, 13, 14, 15} }, -+ { 10, { 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 13, 13, 15} }, -+ {10.5, { 2, 3, 5, 5, 6, 7, 8, 9, 11, 11, 12, 12, 12, 12, 14} }, -+ { 11, { 1, 3, 4, 5, 6, 6, 7, 9, 10, 11, 11, 11, 12, 12, 13} }, -+ {11.5, { 1, 2, 3, 4, 5, 6, 6, 8, 9, 10, 10, 11, 11, 11, 13} }, -+ { 12, { 1, 1, 3, 3, 4, 5, 6, 7, 8, 9, 9, 10, 10, 10, 12} }, -+ {12.5, { 1, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 9, 10, 11} }, -+ { 13, { 1, 1, 1, 2, 4, 4, 6, 6, 7, 8, 8, 9, 9, 9, 11} }, -+ {13.5, { 1, 1, 1, 2, 3, 4, 5, 5, 6, 7, 8, 8, 8, 9, 11} }, -+ { 14, { 1, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 8, 10} }, -+ {14.5, { 0, 1, 1, 1, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, -+ { 15, { 0, 1, 1, 1, 1, 2, 3, 3, 5, 5, 5, 6, 6, 7, 9} }, -+ {15.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8} }, -+ { 16, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 7} }, -+ {16.5, { 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 7} }, -+ { 17, { 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 6} }, -+ {17.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 6} }, -+ { 18, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 5} } -+}; -+ -+ -+const qp_table qp_table_444_10bpc_min = { -+ { 6, { 0, 4, 7, 7, 9, 9, 9, 9, 9, 10, 10, 10, 10, 12, 18} }, -+ { 6.5, { 0, 4, 6, 7, 8, 8, 9, 9, 9, 9, 10, 10, 10, 12, 18} }, -+ { 7, { 0, 4, 6, 6, 8, 8, 8, 8, 8, 9, 9, 10, 10, 12, 17} }, -+ { 7.5, { 0, 4, 6, 6, 7, 8, 8, 8, 8, 8, 9, 9, 10, 12, 17} }, -+ { 8, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 8, 9, 9, 9, 12, 16} }, -+ { 8.5, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 8, 9, 9, 9, 12, 16} }, -+ { 9, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, -+ { 9.5, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, -+ { 10, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 15} }, -+ {10.5, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 15} }, -+ { 11, { 0, 3, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, -+ {11.5, { 0, 3, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, -+ { 12, { 0, 2, 4, 4, 6, 6, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, -+ {12.5, { 0, 2, 4, 4, 6, 6, 7, 7, 7, 7, 8, 9, 9, 11, 14} }, -+ { 13, { 0, 2, 4, 4, 5, 6, 7, 7, 7, 7, 8, 9, 9, 11, 13} }, -+ {13.5, { 0, 2, 3, 4, 5, 6, 6, 7, 7, 7, 8, 9, 9, 11, 13} }, -+ { 14, { 0, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 11, 13} }, -+ {14.5, { 0, 2, 3, 4, 5, 5, 6, 6, 6, 7, 7, 8, 9, 11, 12} }, -+ { 15, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 9, 11, 12} }, -+ {15.5, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 9, 11, 12} }, -+ { 16, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 10, 11} }, -+ {16.5, { 0, 1, 2, 3, 4, 5, 5, 6, 6, 6, 7, 8, 8, 10, 11} }, -+ { 17, { 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8, 9, 11} }, -+ {17.5, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 11} }, -+ { 18, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10} }, -+ {18.5, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10} }, -+ { 19, { 0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 7, 8, 9} }, -+ {19.5, { 0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 7, 8, 9} }, -+ { 20, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 9} }, -+ {20.5, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 9} }, -+ { 21, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 7, 9} }, -+ {21.5, { 0, 1, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6, 6, 7, 8} }, -+ { 22, { 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 8} }, -+ {22.5, { 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, -+ { 23, { 0, 0, 1, 2, 2, 2, 3, 3, 3, 3, 5, 5, 5, 5, 7} }, -+ {23.5, { 0, 0, 0, 2, 2, 2, 3, 3, 3, 3, 5, 5, 5, 5, 7} }, -+ { 24, { 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 5, 7} }, -+ {24.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 7} }, -+ { 25, { 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 4, 4, 6} }, -+ {25.5, { 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, -+ { 26, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 5} }, -+ {26.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 5} }, -+ { 27, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 5} }, -+ {27.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 5} }, -+ { 28, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 4} }, -+ {28.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 4} }, -+ { 29, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3} }, -+ {29.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3} }, -+ { 30, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} } -+}; -+ -+ -+const qp_table qp_table_420_8bpc_max = { -+ { 4, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 13, 14} }, -+ { 4.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -+ { 5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 12, 13} }, -+ { 5.5, { 3, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12} }, -+ { 6, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, -+ { 6.5, { 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, -+ { 7, { 1, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10} }, -+ { 7.5, { 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9} }, -+ { 8, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -+ { 8.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8} }, -+ { 9, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7} }, -+ { 9.5, { 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, -+ { 10, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6} }, -+ {10.5, { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 4, 6} }, -+ { 11, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5} }, -+ {11.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, -+ { 12, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4} } -+}; -+ -+ -+const qp_table qp_table_444_8bpc_min = { -+ { 6, { 0, 1, 3, 3, 5, 5, 5, 5, 5, 6, 6, 6, 6, 9, 14} }, -+ { 6.5, { 0, 1, 2, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 9, 14} }, -+ { 7, { 0, 0, 2, 2, 4, 4, 4, 4, 4, 5, 5, 6, 6, 9, 13} }, -+ { 7.5, { 0, 0, 2, 2, 3, 4, 4, 4, 4, 4, 5, 5, 6, 9, 13} }, -+ { 8, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 4, 5, 5, 5, 8, 12} }, -+ { 8.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 4, 5, 5, 5, 8, 12} }, -+ { 9, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 12} }, -+ { 9.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 12} }, -+ { 10, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -+ {10.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -+ { 11, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -+ {11.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -+ { 12, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -+ {12.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 4, 5, 5, 7, 10} }, -+ { 13, { 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 4, 5, 5, 7, 9} }, -+ {13.5, { 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, -+ { 14, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, -+ {14.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 7, 8} }, -+ { 15, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -+ {15.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -+ { 16, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -+ {16.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -+ { 17, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, -+ {17.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, -+ { 18, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, -+ {18.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, -+ { 19, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5} }, -+ {19.5, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5} }, -+ { 20, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 5} }, -+ {20.5, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 5} }, -+ { 21, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4} }, -+ {21.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4} }, -+ { 22, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 4} }, -+ {22.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3} }, -+ { 23, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} }, -+ {23.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} }, -+ { 24, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3} } -+}; -+ -+ -+const qp_table qp_table_444_12bpc_min = { -+ { 6, { 0, 5, 11, 11, 13, 13, 13, 13, 13, 14, 14, 14, 14, 17, 22} }, -+ { 6.5, { 0, 5, 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 17, 22} }, -+ { 7, { 0, 5, 10, 10, 12, 12, 12, 12, 12, 13, 13, 14, 14, 17, 21} }, -+ { 7.5, { 0, 5, 9, 10, 11, 12, 12, 12, 12, 12, 13, 13, 14, 17, 21} }, -+ { 8, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 12, 13, 13, 13, 16, 20} }, -+ { 8.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 12, 13, 13, 13, 16, 20} }, -+ { 9, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, -+ { 9.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, -+ { 10, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -+ {10.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -+ { 11, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -+ {11.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -+ { 12, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -+ {12.5, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -+ { 13, { 0, 4, 7, 8, 9, 11, 11, 11, 11, 11, 13, 13, 13, 15, 17} }, -+ {13.5, { 0, 3, 6, 7, 9, 10, 10, 11, 11, 11, 12, 13, 13, 15, 17} }, -+ { 14, { 0, 3, 5, 6, 9, 9, 9, 10, 11, 11, 12, 13, 13, 15, 17} }, -+ {14.5, { 0, 2, 5, 6, 8, 9, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -+ { 15, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -+ {15.5, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -+ { 16, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 11, 12, 12, 14, 15} }, -+ {16.5, { 0, 2, 3, 5, 7, 8, 9, 10, 11, 11, 11, 12, 12, 14, 15} }, -+ { 17, { 0, 2, 3, 5, 5, 6, 9, 9, 10, 10, 11, 11, 12, 13, 15} }, -+ {17.5, { 0, 2, 3, 5, 5, 6, 8, 9, 10, 10, 11, 11, 12, 13, 15} }, -+ { 18, { 0, 2, 3, 5, 5, 6, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, -+ {18.5, { 0, 2, 3, 5, 5, 6, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, -+ { 19, { 0, 1, 2, 4, 5, 5, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -+ {19.5, { 0, 1, 2, 4, 5, 5, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -+ { 20, { 0, 1, 2, 3, 4, 5, 7, 8, 8, 8, 9, 10, 10, 11, 13} }, -+ {20.5, { 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 13} }, -+ { 21, { 0, 1, 2, 3, 4, 5, 5, 7, 7, 8, 9, 10, 10, 11, 13} }, -+ {21.5, { 0, 1, 2, 3, 3, 4, 5, 7, 7, 8, 9, 10, 10, 11, 12} }, -+ { 22, { 0, 0, 1, 3, 3, 4, 5, 6, 7, 8, 9, 9, 9, 10, 12} }, -+ {22.5, { 0, 0, 1, 3, 3, 4, 5, 6, 7, 8, 9, 9, 9, 10, 11} }, -+ { 23, { 0, 0, 1, 3, 3, 4, 5, 6, 6, 7, 9, 9, 9, 9, 11} }, -+ {23.5, { 0, 0, 1, 3, 3, 4, 5, 6, 6, 7, 9, 9, 9, 9, 11} }, -+ { 24, { 0, 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 8, 8, 9, 11} }, -+ {24.5, { 0, 0, 1, 2, 3, 4, 4, 6, 6, 7, 8, 8, 8, 9, 11} }, -+ { 25, { 0, 0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 8, 10} }, -+ {25.5, { 0, 0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 8, 10} }, -+ { 26, { 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 7, 7, 9} }, -+ {26.5, { 0, 0, 1, 2, 2, 3, 4, 5, 5, 5, 7, 7, 7, 7, 9} }, -+ { 27, { 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, -+ {27.5, { 0, 0, 1, 1, 2, 2, 4, 4, 4, 5, 6, 7, 7, 7, 9} }, -+ { 28, { 0, 0, 0, 1, 1, 2, 3, 4, 4, 4, 6, 6, 6, 7, 9} }, -+ {28.5, { 0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 6, 8} }, -+ { 29, { 0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8} }, -+ {29.5, { 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7} }, -+ { 30, { 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 5, 5, 5, 5, 7} }, -+ {30.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 4, 5, 7} }, -+ { 31, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 4, 5, 7} }, -+ {31.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, -+ { 32, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 6} }, -+ {32.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 6} }, -+ { 33, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 5} }, -+ {33.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 5} }, -+ { 34, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 5} }, -+ {34.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 3, 5} }, -+ { 35, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 4} }, -+ {35.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 4} }, -+ { 36, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} } -+}; -+ -+ -+const qp_table qp_table_420_12bpc_min = { -+ { 4, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21} }, -+ { 4.5, { 0, 4, 8, 9, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, -+ { 5, { 0, 4, 8, 9, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, -+ { 5.5, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -+ { 6, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -+ { 6.5, { 0, 4, 6, 8, 9, 10, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -+ { 7, { 0, 3, 5, 7, 9, 10, 10, 11, 11, 11, 13, 13, 13, 15, 17} }, -+ { 7.5, { 0, 3, 5, 7, 8, 9, 10, 10, 11, 11, 12, 13, 13, 15, 16} }, -+ { 8, { 0, 2, 4, 6, 7, 9, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -+ { 8.5, { 0, 2, 4, 6, 6, 9, 9, 10, 11, 11, 12, 12, 13, 14, 15} }, -+ { 9, { 0, 2, 4, 6, 6, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14} }, -+ { 9.5, { 0, 2, 4, 5, 6, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14} }, -+ { 10, { 0, 2, 3, 5, 6, 7, 8, 8, 9, 10, 10, 12, 12, 12, 14} }, -+ {10.5, { 0, 2, 3, 4, 5, 6, 7, 8, 9, 9, 10, 11, 11, 11, 13} }, -+ { 11, { 0, 2, 3, 4, 5, 5, 6, 8, 8, 9, 9, 10, 11, 11, 12} }, -+ {11.5, { 0, 1, 2, 3, 4, 5, 5, 7, 8, 8, 9, 10, 10, 10, 12} }, -+ { 12, { 0, 0, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 9, 11} }, -+ {12.5, { 0, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 8, 9, 10} }, -+ { 13, { 0, 0, 0, 1, 3, 3, 5, 5, 6, 7, 7, 8, 8, 8, 10} }, -+ {13.5, { 0, 0, 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 7, 8, 10} }, -+ { 14, { 0, 0, 0, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 7, 9} }, -+ {14.5, { 0, 0, 0, 0, 1, 2, 3, 3, 4, 4, 5, 6, 6, 6, 8} }, -+ { 15, { 0, 0, 0, 0, 0, 1, 2, 2, 4, 4, 4, 5, 5, 6, 8} }, -+ {15.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, -+ { 16, { 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 6} }, -+ {16.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 6} }, -+ { 17, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 5} }, -+ {17.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 3, 5} }, -+ { 18, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 4} } -+}; -+ -+ -+const qp_table qp_table_422_12bpc_min = { -+ { 6, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 16, 20} }, -+ { 6.5, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 16, 20} }, -+ { 7, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -+ { 7.5, { 0, 4, 8, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -+ { 8, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -+ { 8.5, { 0, 3, 6, 8, 9, 10, 10, 11, 11, 11, 12, 13, 13, 15, 18} }, -+ { 9, { 0, 3, 5, 8, 9, 10, 10, 10, 11, 11, 12, 13, 13, 15, 17} }, -+ { 9.5, { 0, 3, 5, 7, 8, 9, 10, 10, 11, 11, 12, 13, 13, 15, 17} }, -+ { 10, { 0, 2, 4, 6, 7, 9, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -+ {10.5, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -+ { 11, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 12, 13, 14, 15} }, -+ {11.5, { 0, 2, 4, 6, 7, 7, 9, 9, 10, 11, 11, 12, 12, 14, 15} }, -+ { 12, { 0, 2, 4, 6, 6, 6, 8, 8, 9, 9, 11, 11, 12, 13, 14} }, -+ {12.5, { 0, 1, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11, 13, 14} }, -+ { 13, { 0, 1, 3, 4, 5, 5, 7, 8, 8, 9, 10, 10, 11, 12, 13} }, -+ {13.5, { 0, 1, 3, 3, 4, 5, 7, 7, 8, 8, 10, 10, 10, 12, 13} }, -+ { 14, { 0, 0, 2, 3, 4, 5, 6, 6, 7, 7, 9, 10, 10, 11, 12} }, -+ {14.5, { 0, 0, 1, 3, 4, 4, 6, 6, 6, 7, 9, 9, 9, 11, 12} }, -+ { 15, { 0, 0, 1, 3, 3, 4, 5, 6, 6, 6, 8, 9, 9, 10, 12} }, -+ {15.5, { 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 8, 8, 8, 10, 11} }, -+ { 16, { 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 8, 8, 8, 9, 11} }, -+ {16.5, { 0, 0, 0, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7, 9, 10} }, -+ { 17, { 0, 0, 0, 1, 2, 2, 4, 4, 4, 5, 6, 6, 6, 8, 10} }, -+ {17.5, { 0, 0, 0, 1, 2, 2, 3, 4, 4, 4, 5, 6, 6, 8, 9} }, -+ { 18, { 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 6, 7, 9} }, -+ {18.5, { 0, 0, 0, 1, 2, 2, 3, 3, 3, 3, 5, 5, 5, 7, 9} }, -+ { 19, { 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6, 8} }, -+ {19.5, { 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 6, 8} }, -+ { 20, { 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 7} }, -+ {20.5, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 7} }, -+ { 21, { 0, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, -+ {21.5, { 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 6} }, -+ { 22, { 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 6} }, -+ {22.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 5} }, -+ { 23, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 5} }, -+ {23.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 4} }, -+ { 24, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 4} } -+}; -+ -+ -+const qp_table qp_table_422_12bpc_max = { -+ { 6, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -+ { 6.5, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -+ { 7, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 20} }, -+ { 7.5, { 9, 10, 12, 14, 15, 15, 15, 16, 16, 17, 17, 18, 18, 19, 20} }, -+ { 8, { 6, 9, 10, 12, 14, 15, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, -+ { 8.5, { 6, 8, 9, 11, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 19} }, -+ { 9, { 5, 7, 8, 10, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 18} }, -+ { 9.5, { 5, 7, 7, 9, 10, 12, 12, 13, 14, 14, 15, 15, 16, 17, 18} }, -+ { 10, { 4, 6, 6, 8, 9, 11, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -+ {10.5, { 4, 6, 6, 8, 9, 10, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -+ { 11, { 4, 5, 6, 8, 9, 10, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, -+ {11.5, { 3, 5, 6, 8, 9, 9, 11, 11, 12, 13, 13, 14, 14, 15, 16} }, -+ { 12, { 3, 5, 6, 8, 8, 8, 10, 10, 11, 11, 13, 13, 14, 14, 15} }, -+ {12.5, { 3, 4, 6, 7, 8, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15} }, -+ { 13, { 2, 4, 5, 6, 7, 7, 9, 10, 10, 11, 12, 12, 13, 13, 14} }, -+ {13.5, { 2, 4, 5, 5, 6, 7, 9, 9, 10, 10, 12, 12, 12, 13, 14} }, -+ { 14, { 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 11, 12, 12, 12, 13} }, -+ {14.5, { 2, 3, 3, 5, 6, 6, 8, 8, 8, 9, 11, 11, 11, 12, 13} }, -+ { 15, { 2, 3, 3, 5, 5, 6, 7, 8, 8, 8, 10, 11, 11, 11, 13} }, -+ {15.5, { 2, 2, 3, 4, 5, 6, 7, 7, 8, 8, 10, 10, 10, 11, 12} }, -+ { 16, { 2, 2, 3, 4, 5, 6, 7, 7, 8, 8, 10, 10, 10, 10, 12} }, -+ {16.5, { 1, 2, 2, 4, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 11} }, -+ { 17, { 1, 1, 2, 3, 4, 4, 6, 6, 6, 7, 8, 8, 8, 9, 11} }, -+ {17.5, { 1, 1, 2, 3, 4, 4, 5, 6, 6, 6, 7, 8, 8, 9, 10} }, -+ { 18, { 1, 1, 1, 2, 3, 3, 5, 5, 5, 6, 7, 7, 8, 8, 10} }, -+ {18.5, { 1, 1, 1, 2, 3, 3, 5, 5, 5, 5, 7, 7, 7, 8, 10} }, -+ { 19, { 1, 1, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 7, 9} }, -+ {19.5, { 1, 1, 1, 2, 2, 2, 4, 5, 5, 5, 6, 6, 6, 7, 9} }, -+ { 20, { 1, 1, 1, 2, 2, 2, 4, 5, 5, 5, 6, 6, 6, 6, 8} }, -+ {20.5, { 0, 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 8} }, -+ { 21, { 0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 5, 7} }, -+ {21.5, { 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 7} }, -+ { 22, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 7} }, -+ {22.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 6} }, -+ { 23, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6} }, -+ {23.5, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 5} }, -+ { 24, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 5} } -+}; -+ -+ -+const qp_table qp_table_444_12bpc_max = { -+ { 6, {12, 14, 16, 16, 17, 17, 17, 18, 19, 20, 20, 20, 20, 21, 23} }, -+ { 6.5, {12, 14, 15, 16, 16, 16, 17, 18, 19, 19, 20, 20, 20, 21, 23} }, -+ { 7, {12, 13, 15, 15, 16, 16, 16, 17, 18, 19, 19, 20, 20, 21, 22} }, -+ { 7.5, {12, 13, 14, 15, 15, 16, 16, 17, 18, 18, 19, 19, 20, 21, 22} }, -+ { 8, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -+ { 8.5, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -+ { 9, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 21} }, -+ { 9.5, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 21} }, -+ { 10, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 20} }, -+ {10.5, {10, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 18, 19, 20} }, -+ { 11, { 9, 11, 13, 14, 15, 15, 15, 16, 16, 17, 17, 17, 18, 18, 19} }, -+ {11.5, { 9, 11, 13, 14, 15, 15, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, -+ { 12, { 6, 9, 12, 13, 14, 14, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, -+ {12.5, { 6, 9, 12, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18, 19} }, -+ { 13, { 5, 9, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16, 16, 17, 18} }, -+ {13.5, { 5, 8, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 18} }, -+ { 14, { 5, 8, 10, 11, 12, 12, 12, 13, 14, 14, 15, 16, 16, 16, 18} }, -+ {14.5, { 4, 7, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 17} }, -+ { 15, { 4, 7, 9, 10, 10, 11, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -+ {15.5, { 4, 7, 9, 10, 10, 11, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -+ { 16, { 4, 7, 9, 10, 10, 11, 11, 12, 13, 13, 13, 14, 14, 15, 16} }, -+ {16.5, { 4, 5, 7, 8, 10, 11, 11, 12, 13, 13, 13, 14, 14, 15, 16} }, -+ { 17, { 4, 5, 7, 8, 8, 9, 11, 11, 12, 12, 12, 13, 13, 14, 16} }, -+ {17.5, { 3, 5, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 13, 14, 16} }, -+ { 18, { 3, 5, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 13, 14, 15} }, -+ {18.5, { 3, 5, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 13, 14, 15} }, -+ { 19, { 3, 4, 6, 7, 8, 8, 9, 10, 11, 11, 11, 12, 12, 13, 14} }, -+ {19.5, { 3, 4, 6, 7, 8, 8, 9, 10, 11, 11, 11, 12, 12, 13, 14} }, -+ { 20, { 2, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11, 11, 12, 14} }, -+ {20.5, { 2, 3, 5, 5, 7, 8, 8, 8, 9, 10, 10, 11, 11, 12, 14} }, -+ { 21, { 2, 3, 5, 5, 7, 7, 7, 8, 8, 9, 10, 11, 11, 12, 14} }, -+ {21.5, { 2, 3, 5, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11, 12, 13} }, -+ { 22, { 2, 2, 4, 5, 6, 6, 7, 7, 8, 9, 10, 10, 10, 11, 13} }, -+ {22.5, { 2, 2, 4, 5, 5, 6, 7, 7, 8, 9, 10, 10, 10, 11, 12} }, -+ { 23, { 2, 2, 4, 5, 5, 6, 7, 7, 7, 8, 10, 10, 10, 10, 12} }, -+ {23.5, { 2, 2, 3, 5, 5, 6, 7, 7, 7, 8, 10, 10, 10, 10, 12} }, -+ { 24, { 2, 2, 3, 4, 4, 5, 7, 7, 7, 8, 9, 9, 9, 10, 12} }, -+ {24.5, { 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 9, 9, 10, 12} }, -+ { 25, { 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 9, 9, 11} }, -+ {25.5, { 1, 1, 3, 3, 4, 5, 6, 6, 7, 7, 8, 9, 9, 9, 11} }, -+ { 26, { 1, 1, 3, 3, 3, 4, 5, 6, 6, 7, 8, 8, 8, 8, 10} }, -+ {26.5, { 1, 1, 2, 3, 3, 4, 5, 6, 6, 6, 8, 8, 8, 8, 10} }, -+ { 27, { 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 10} }, -+ {27.5, { 1, 1, 2, 2, 3, 3, 5, 5, 5, 6, 7, 8, 8, 8, 10} }, -+ { 28, { 0, 1, 1, 2, 2, 3, 4, 5, 5, 5, 7, 7, 7, 8, 10} }, -+ {28.5, { 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, -+ { 29, { 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 9} }, -+ {29.5, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8} }, -+ { 30, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 6, 6, 6, 6, 8} }, -+ {30.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 8} }, -+ { 31, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 8} }, -+ {31.5, { 0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8} }, -+ { 32, { 0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 7} }, -+ {32.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 4, 4, 4, 5, 7} }, -+ { 33, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, -+ {33.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, -+ { 34, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 4, 6} }, -+ {34.5, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 6} }, -+ { 35, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 5} }, -+ {35.5, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 5} }, -+ { 36, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 4} } -+}; -+ -+ -+const qp_table qp_table_420_8bpc_min = { -+ { 4, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 9, 13} }, -+ { 4.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, -+ { 5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, -+ { 5.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -+ { 6, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -+ { 6.5, { 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 5, 5, 7, 10} }, -+ { 7, { 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, -+ { 7.5, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4, 5, 7, 8} }, -+ { 8, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -+ { 8.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -+ { 9, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6} }, -+ { 9.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, -+ { 10, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5} }, -+ {10.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 5} }, -+ { 11, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4} }, -+ {11.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4} }, -+ { 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3} } -+}; -+ -+ -+const qp_table qp_table_422_8bpc_min = { -+ { 6, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, -+ { 6.5, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, -+ { 7, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -+ { 7.5, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -+ { 8, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -+ { 8.5, { 0, 0, 1, 2, 2, 2, 2, 3, 3, 3, 4, 5, 5, 7, 10} }, -+ { 9, { 0, 0, 0, 1, 2, 2, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, -+ { 9.5, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 9} }, -+ { 10, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -+ {10.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -+ { 11, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -+ {11.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -+ { 12, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6} }, -+ {12.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, -+ { 13, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5} }, -+ {13.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 5} }, -+ { 14, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4} }, -+ {14.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4} }, -+ { 15, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 4} }, -+ {15.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3} }, -+ { 16, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3} } -+}; -+ -+ -+const qp_table qp_table_422_10bpc_max = { -+ { 6, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -+ { 6.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -+ { 7, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, -+ { 7.5, { 5, 6, 8, 10, 11, 11, 11, 12, 12, 13, 13, 14, 14, 15, 16} }, -+ { 8, { 4, 6, 7, 9, 10, 11, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, -+ { 8.5, { 4, 5, 6, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 15} }, -+ { 9, { 3, 4, 5, 7, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14} }, -+ { 9.5, { 3, 4, 4, 6, 6, 8, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, -+ { 10, { 2, 3, 3, 5, 5, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -+ {10.5, { 2, 3, 3, 5, 5, 6, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -+ { 11, { 2, 3, 3, 5, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, -+ {11.5, { 2, 3, 3, 5, 5, 5, 7, 7, 8, 9, 9, 10, 10, 11, 12} }, -+ { 12, { 2, 3, 3, 5, 5, 5, 7, 7, 8, 8, 9, 9, 10, 10, 11} }, -+ {12.5, { 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, -+ { 13, { 1, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10} }, -+ {13.5, { 1, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 8, 9, 10} }, -+ { 14, { 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 8, 9} }, -+ {14.5, { 1, 2, 2, 3, 4, 4, 5, 5, 5, 6, 7, 7, 7, 8, 9} }, -+ { 15, { 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, 7, 9} }, -+ {15.5, { 1, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 8} }, -+ { 16, { 1, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 6, 6, 8} }, -+ {16.5, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7} }, -+ { 17, { 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 7} }, -+ {17.5, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6} }, -+ { 18, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 6} }, -+ {18.5, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 6} }, -+ { 19, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 5} }, -+ {19.5, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 5} }, -+ { 20, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 4} } -+}; -+ -+ -+const qp_table qp_table_420_10bpc_max = { -+ { 4, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 17, 18} }, -+ { 4.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -+ { 5, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 16, 17} }, -+ { 5.5, { 6, 7, 8, 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 15, 16} }, -+ { 6, { 4, 6, 8, 9, 10, 10, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, -+ { 6.5, { 4, 5, 7, 8, 8, 9, 10, 11, 11, 12, 12, 13, 13, 14, 15} }, -+ { 7, { 3, 4, 6, 7, 7, 8, 9, 10, 10, 11, 12, 12, 13, 13, 14} }, -+ { 7.5, { 3, 4, 5, 6, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 13} }, -+ { 8, { 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -+ { 8.5, { 1, 3, 3, 4, 4, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, -+ { 9, { 1, 3, 3, 4, 4, 6, 7, 8, 8, 9, 9, 10, 10, 10, 11} }, -+ { 9.5, { 1, 3, 3, 3, 4, 5, 6, 7, 8, 8, 9, 9, 9, 10, 11} }, -+ { 10, { 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9, 11} }, -+ {10.5, { 1, 1, 3, 3, 3, 4, 5, 5, 7, 7, 8, 8, 8, 8, 10} }, -+ { 11, { 0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 7, 7, 8, 8, 9} }, -+ {11.5, { 0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 7, 7, 9} }, -+ { 12, { 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 6, 8} }, -+ {12.5, { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, -+ { 13, { 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 7} }, -+ {13.5, { 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 4, 6} }, -+ { 14, { 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, -+ {14.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 5} }, -+ { 15, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 5} } -+}; -+ -+ -+const qp_table qp_table_420_10bpc_min = { -+ { 4, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 13, 17} }, -+ { 4.5, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, -+ { 5, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, -+ { 5.5, { 0, 3, 3, 4, 6, 7, 7, 7, 7, 7, 9, 9, 9, 11, 15} }, -+ { 6, { 0, 2, 3, 4, 6, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, -+ { 6.5, { 0, 2, 3, 4, 5, 6, 6, 7, 7, 7, 8, 9, 9, 11, 14} }, -+ { 7, { 0, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 11, 13} }, -+ { 7.5, { 0, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 11, 12} }, -+ { 8, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 11, 12} }, -+ { 8.5, { 0, 2, 2, 3, 3, 5, 5, 6, 6, 7, 8, 8, 9, 10, 11} }, -+ { 9, { 0, 2, 2, 3, 3, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10} }, -+ { 9.5, { 0, 2, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10} }, -+ { 10, { 0, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 8, 8, 8, 10} }, -+ {10.5, { 0, 0, 2, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, -+ { 11, { 0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8} }, -+ {11.5, { 0, 0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 6, 6, 8} }, -+ { 12, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 5, 7} }, -+ {12.5, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6} }, -+ { 13, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, -+ {13.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 5} }, -+ { 14, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 5} }, -+ {14.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 4} }, -+ { 15, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 4} } -+}; -+ -+ -+const qp_table qp_table_444_10bpc_max = { -+ { 6, { 8, 10, 12, 12, 13, 13, 13, 14, 15, 16, 16, 16, 16, 17, 19} }, -+ { 6.5, { 8, 10, 11, 12, 12, 12, 13, 14, 15, 15, 16, 16, 16, 17, 19} }, -+ { 7, { 8, 9, 11, 11, 12, 12, 12, 13, 14, 15, 15, 16, 16, 17, 18} }, -+ { 7.5, { 8, 9, 10, 11, 11, 12, 12, 13, 14, 14, 15, 15, 16, 17, 18} }, -+ { 8, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -+ { 8.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -+ { 9, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 17} }, -+ { 9.5, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 17} }, -+ { 10, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, -+ {10.5, { 6, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 14, 15, 16} }, -+ { 11, { 5, 7, 9, 10, 11, 11, 11, 12, 12, 13, 13, 13, 14, 14, 15} }, -+ {11.5, { 5, 7, 9, 10, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, -+ { 12, { 4, 6, 8, 9, 10, 10, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, -+ {12.5, { 4, 6, 8, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 15} }, -+ { 13, { 3, 6, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 14} }, -+ {13.5, { 3, 5, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14} }, -+ { 14, { 3, 5, 6, 7, 8, 8, 8, 9, 10, 10, 11, 12, 12, 12, 14} }, -+ {14.5, { 2, 4, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 13} }, -+ { 15, { 2, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -+ {15.5, { 2, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -+ { 16, { 2, 4, 5, 6, 6, 7, 7, 8, 9, 9, 9, 10, 10, 11, 12} }, -+ {16.5, { 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 9, 10, 10, 11, 12} }, -+ { 17, { 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 12} }, -+ {17.5, { 1, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 9, 9, 10, 12} }, -+ { 18, { 1, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 9, 9, 10, 11} }, -+ {18.5, { 1, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 9, 9, 10, 11} }, -+ { 19, { 1, 2, 3, 4, 5, 5, 5, 6, 7, 7, 7, 8, 8, 9, 10} }, -+ {19.5, { 1, 2, 3, 4, 5, 5, 5, 6, 7, 7, 7, 8, 8, 9, 10} }, -+ { 20, { 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8, 10} }, -+ {20.5, { 1, 2, 3, 3, 4, 5, 5, 5, 5, 6, 6, 7, 7, 8, 10} }, -+ { 21, { 1, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 7, 7, 8, 10} }, -+ {21.5, { 1, 2, 3, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 8, 9} }, -+ { 22, { 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 9} }, -+ {22.5, { 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8} }, -+ { 23, { 1, 1, 2, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6, 6, 8} }, -+ {23.5, { 1, 1, 1, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6, 6, 8} }, -+ { 24, { 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 5, 6, 8} }, -+ {24.5, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 8} }, -+ { 25, { 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, 5, 7} }, -+ {25.5, { 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 7} }, -+ { 26, { 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 6} }, -+ {26.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 6} }, -+ { 27, { 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, -+ {27.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, -+ { 28, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 5} }, -+ {28.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 5} }, -+ { 29, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4} }, -+ {29.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4} }, -+ { 30, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 4} } -+}; -+ -+ -+const qp_table qp_table_422_8bpc_max = { -+ { 6, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -+ { 6.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -+ { 7, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, -+ { 7.5, { 3, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12} }, -+ { 8, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, -+ { 8.5, { 2, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, -+ { 9, { 1, 2, 3, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10} }, -+ { 9.5, { 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 10} }, -+ { 10, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -+ {10.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -+ { 11, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8} }, -+ {11.5, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8} }, -+ { 12, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7} }, -+ {12.5, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7} }, -+ { 13, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6} }, -+ {13.5, { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 6} }, -+ { 14, { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5} }, -+ {14.5, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 5} }, -+ { 15, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 5} }, -+ {15.5, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }, -+ { 16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} } -+}; -+ -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c -new file mode 100644 -index 0000000000000..3ee858f311d12 ---- /dev/null -+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c -@@ -0,0 +1,291 @@ -+/* -+ * Copyright 2021 Advanced Micro Devices, Inc. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: AMD -+ * -+ */ -+ -+#include "rc_calc_fpu.h" -+ -+#include "qp_tables.h" -+#include "amdgpu_dm/dc_fpu.h" -+ -+#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) -+ -+#define MODE_SELECT(val444, val422, val420) \ -+ (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) -+ -+ -+#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ -+ table = qp_table_##mode##_##bpc##bpc_##max; \ -+ table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ -+ break -+ -+static int median3(int a, int b, int c) -+{ -+ if (a > b) -+ swap(a, b); -+ if (b > c) -+ swap(b, c); -+ if (a > b) -+ swap(b, c); -+ -+ return b; -+} -+ -+static double dsc_roundf(double num) -+{ -+ if (num < 0.0) -+ num = num - 0.5; -+ else -+ num = num + 0.5; -+ -+ return (int)(num); -+} -+ -+static double dsc_ceil(double num) -+{ -+ double retval = (int)num; -+ -+ if (retval != num && num > 0) -+ retval = num + 1; -+ -+ return (int)retval; -+} -+ -+static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, -+ enum max_min max_min, float bpp) -+{ -+ int mode = MODE_SELECT(444, 422, 420); -+ int sel = table_hash(mode, bpc, max_min); -+ int table_size = 0; -+ int index; -+ const struct qp_entry *table = 0L; -+ -+ // alias enum -+ enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; -+ switch (sel) { -+ TABLE_CASE(444, 8, max); -+ TABLE_CASE(444, 8, min); -+ TABLE_CASE(444, 10, max); -+ TABLE_CASE(444, 10, min); -+ TABLE_CASE(444, 12, max); -+ TABLE_CASE(444, 12, min); -+ TABLE_CASE(422, 8, max); -+ TABLE_CASE(422, 8, min); -+ TABLE_CASE(422, 10, max); -+ TABLE_CASE(422, 10, min); -+ TABLE_CASE(422, 12, max); -+ TABLE_CASE(422, 12, min); -+ TABLE_CASE(420, 8, max); -+ TABLE_CASE(420, 8, min); -+ TABLE_CASE(420, 10, max); -+ TABLE_CASE(420, 10, min); -+ TABLE_CASE(420, 12, max); -+ TABLE_CASE(420, 12, min); -+ } -+ -+ if (table == 0) -+ return; -+ -+ index = (bpp - table[0].bpp) * 2; -+ -+ /* requested size is bigger than the table */ -+ if (index >= table_size) { -+ dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); -+ return; -+ } -+ -+ memcpy(qps, table[index].qps, sizeof(qp_set)); -+} -+ -+static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) -+{ -+ int *p = ofs; -+ -+ if (mode == CM_444 || mode == CM_RGB) { -+ *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); -+ *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); -+ *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); -+ *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); -+ *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); -+ *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); -+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); -+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); -+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); -+ *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); -+ *p++ = -10; -+ *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); -+ *p++ = -12; -+ *p++ = -12; -+ *p++ = -12; -+ } else if (mode == CM_422) { -+ *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); -+ *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); -+ *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -+ *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -+ *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -+ *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); -+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); -+ *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); -+ *p++ = -10; -+ *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); -+ *p++ = -12; -+ *p++ = -12; -+ *p++ = -12; -+ } else { -+ *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); -+ *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); -+ *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -+ *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -+ *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -+ *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); -+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); -+ *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); -+ *p++ = -10; -+ *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); -+ *p++ = -12; -+ *p++ = -12; -+ *p++ = -12; -+ } -+} -+ -+void _do_calc_rc_params(struct rc_params *rc, -+ enum colour_mode cm, -+ enum bits_per_comp bpc, -+ u16 drm_bpp, -+ bool is_navite_422_or_420, -+ int slice_width, -+ int slice_height, -+ int minor_version) -+{ -+ float bpp; -+ float bpp_group; -+ float initial_xmit_delay_factor; -+ int padding_pixels; -+ int i; -+ -+ dc_assert_fp_enabled(); -+ -+ bpp = ((float)drm_bpp / 16.0); -+ /* in native_422 or native_420 modes, the bits_per_pixel is double the -+ * target bpp (the latter is what calc_rc_params expects) -+ */ -+ if (is_navite_422_or_420) -+ bpp /= 2.0; -+ -+ rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); -+ rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); -+ -+ bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); -+ -+ switch (cm) { -+ case CM_420: -+ rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); -+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); -+ rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); -+ break; -+ case CM_422: -+ rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); -+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); -+ rc->second_line_bpg_offset = 0; -+ break; -+ case CM_444: -+ case CM_RGB: -+ rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); -+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); -+ rc->second_line_bpg_offset = 0; -+ break; -+ } -+ -+ initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; -+ rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); -+ -+ if (cm == CM_422 || cm == CM_420) -+ slice_width /= 2; -+ -+ padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; -+ if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { -+ if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) -+ rc->initial_xmit_delay++; -+ } -+ -+ rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); -+ rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); -+ rc->flatness_det_thresh = 2 << (bpc - 8); -+ -+ get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); -+ get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); -+ if (cm == CM_444 && minor_version == 1) { -+ for (i = 0; i < QP_SET_SIZE; ++i) { -+ rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; -+ rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; -+ } -+ } -+ get_ofs_set(rc->ofs, cm, bpp); -+ -+ /* fixed parameters */ -+ rc->rc_model_size = 8192; -+ rc->rc_edge_factor = 6; -+ rc->rc_tgt_offset_hi = 3; -+ rc->rc_tgt_offset_lo = 3; -+ -+ rc->rc_buf_thresh[0] = 896; -+ rc->rc_buf_thresh[1] = 1792; -+ rc->rc_buf_thresh[2] = 2688; -+ rc->rc_buf_thresh[3] = 3584; -+ rc->rc_buf_thresh[4] = 4480; -+ rc->rc_buf_thresh[5] = 5376; -+ rc->rc_buf_thresh[6] = 6272; -+ rc->rc_buf_thresh[7] = 6720; -+ rc->rc_buf_thresh[8] = 7168; -+ rc->rc_buf_thresh[9] = 7616; -+ rc->rc_buf_thresh[10] = 7744; -+ rc->rc_buf_thresh[11] = 7872; -+ rc->rc_buf_thresh[12] = 8000; -+ rc->rc_buf_thresh[13] = 8064; -+} -+ -+u32 _do_bytes_per_pixel_calc(int slice_width, -+ u16 drm_bpp, -+ bool is_navite_422_or_420) -+{ -+ float bpp; -+ u32 bytes_per_pixel; -+ double d_bytes_per_pixel; -+ -+ dc_assert_fp_enabled(); -+ -+ bpp = ((float)drm_bpp / 16.0); -+ d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; -+ // TODO: Make sure the formula for calculating this is precise (ceiling -+ // vs. floor, and at what point they should be applied) -+ if (is_navite_422_or_420) -+ d_bytes_per_pixel /= 2; -+ -+ bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); -+ -+ return bytes_per_pixel; -+} -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h -new file mode 100644 -index 0000000000000..b93b95409fbe2 ---- /dev/null -+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h -@@ -0,0 +1,94 @@ -+/* -+ * Copyright 2021 Advanced Micro Devices, Inc. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice shall be included in -+ * all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: AMD -+ * -+ */ -+ -+#ifndef __RC_CALC_FPU_H__ -+#define __RC_CALC_FPU_H__ -+ -+#include "os_types.h" -+#include -+ -+#define QP_SET_SIZE 15 -+ -+typedef int qp_set[QP_SET_SIZE]; -+ -+struct rc_params { -+ int rc_quant_incr_limit0; -+ int rc_quant_incr_limit1; -+ int initial_fullness_offset; -+ int initial_xmit_delay; -+ int first_line_bpg_offset; -+ int second_line_bpg_offset; -+ int flatness_min_qp; -+ int flatness_max_qp; -+ int flatness_det_thresh; -+ qp_set qp_min; -+ qp_set qp_max; -+ qp_set ofs; -+ int rc_model_size; -+ int rc_edge_factor; -+ int rc_tgt_offset_hi; -+ int rc_tgt_offset_lo; -+ int rc_buf_thresh[QP_SET_SIZE - 1]; -+}; -+ -+enum colour_mode { -+ CM_RGB, /* 444 RGB */ -+ CM_444, /* 444 YUV or simple 422 */ -+ CM_422, /* native 422 */ -+ CM_420 /* native 420 */ -+}; -+ -+enum bits_per_comp { -+ BPC_8 = 8, -+ BPC_10 = 10, -+ BPC_12 = 12 -+}; -+ -+enum max_min { -+ DAL_MM_MIN = 0, -+ DAL_MM_MAX = 1 -+}; -+ -+struct qp_entry { -+ float bpp; -+ const qp_set qps; -+}; -+ -+typedef struct qp_entry qp_table[]; -+ -+u32 _do_bytes_per_pixel_calc(int slice_width, -+ u16 drm_bpp, -+ bool is_navite_422_or_420); -+ -+void _do_calc_rc_params(struct rc_params *rc, -+ enum colour_mode cm, -+ enum bits_per_comp bpc, -+ u16 drm_bpp, -+ bool is_navite_422_or_420, -+ int slice_width, -+ int slice_height, -+ int minor_version); -+ -+#endif -diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile -index 8d31eb75c6a6e..a2537229ee88b 100644 ---- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile -+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile -@@ -1,35 +1,6 @@ - # SPDX-License-Identifier: MIT - # - # Makefile for the 'dsc' sub-component of DAL. -- --ifdef CONFIG_X86 --dsc_ccflags := -mhard-float -msse --endif -- --ifdef CONFIG_PPC64 --dsc_ccflags := -mhard-float -maltivec --endif -- --ifdef CONFIG_CC_IS_GCC --ifeq ($(call cc-ifversion, -lt, 0701, y), y) --IS_OLD_GCC = 1 --endif --endif -- --ifdef CONFIG_X86 --ifdef IS_OLD_GCC --# Stack alignment mismatch, proceed with caution. --# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 --# (8B stack alignment). --dsc_ccflags += -mpreferred-stack-boundary=4 --else --dsc_ccflags += -msse2 --endif --endif -- --CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) --CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags) -- - DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o - - AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC)) -diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h -deleted file mode 100644 -index e5fac9f4181d8..0000000000000 ---- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h -+++ /dev/null -@@ -1,704 +0,0 @@ -- --/* -- * Copyright 2017 Advanced Micro Devices, Inc. -- * -- * Permission is hereby granted, free of charge, to any person obtaining a -- * copy of this software and associated documentation files (the "Software"), -- * to deal in the Software without restriction, including without limitation -- * the rights to use, copy, modify, merge, publish, distribute, sublicense, -- * and/or sell copies of the Software, and to permit persons to whom the -- * Software is furnished to do so, subject to the following conditions: -- * -- * The above copyright notice and this permission notice shall be included in -- * all copies or substantial portions of the Software. -- * -- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -- * OTHER DEALINGS IN THE SOFTWARE. -- * -- * Authors: AMD -- * -- */ -- -- --const qp_table qp_table_422_10bpc_min = { -- { 6, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 8, 9, 9, 9, 12, 16} }, -- { 6.5, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 8, 9, 9, 9, 12, 16} }, -- { 7, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 7, 9, 9, 9, 11, 15} }, -- { 7.5, { 0, 2, 4, 6, 6, 6, 6, 7, 7, 7, 8, 9, 9, 11, 15} }, -- { 8, { 0, 2, 3, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 11, 14} }, -- { 8.5, { 0, 2, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 11, 14} }, -- { 9, { 0, 2, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 11, 13} }, -- { 9.5, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 11, 13} }, -- { 10, { 0, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 11, 12} }, -- {10.5, { 0, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 11, 12} }, -- { 11, { 0, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11} }, -- {11.5, { 0, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 10, 11} }, -- { 12, { 0, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 10} }, -- {12.5, { 0, 1, 2, 2, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10} }, -- { 13, { 0, 1, 2, 2, 4, 4, 4, 5, 5, 6, 6, 6, 8, 8, 9} }, -- {13.5, { 0, 1, 2, 2, 3, 4, 4, 4, 5, 6, 6, 6, 7, 8, 9} }, -- { 14, { 0, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 8} }, -- {14.5, { 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8} }, -- { 15, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 6, 8} }, -- {15.5, { 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, -- { 16, { 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, 5, 7} }, -- {16.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6} }, -- { 17, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 6} }, -- {17.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, -- { 18, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 5} }, -- {18.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 5} }, -- { 19, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 4} }, -- {19.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 4} }, -- { 20, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 3} } --}; -- -- --const qp_table qp_table_444_8bpc_max = { -- { 6, { 4, 6, 8, 8, 9, 9, 9, 10, 11, 12, 12, 12, 12, 13, 15} }, -- { 6.5, { 4, 6, 7, 8, 8, 8, 9, 10, 11, 11, 12, 12, 12, 13, 15} }, -- { 7, { 4, 5, 7, 7, 8, 8, 8, 9, 10, 11, 11, 12, 12, 13, 14} }, -- { 7.5, { 4, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, -- { 8, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -- { 8.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -- { 9, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 13} }, -- { 9.5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 13} }, -- { 10, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, -- {10.5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 10, 11, 12} }, -- { 11, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11} }, -- {11.5, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, -- { 12, { 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, -- {12.5, { 2, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, -- { 13, { 1, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8, 8, 9, 10} }, -- {13.5, { 1, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10} }, -- { 14, { 1, 2, 2, 3, 4, 4, 4, 5, 6, 6, 7, 8, 8, 8, 10} }, -- {14.5, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9} }, -- { 15, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -- {15.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -- { 16, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8} }, -- {16.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8} }, -- { 17, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 8} }, -- {17.5, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 8} }, -- { 18, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 7} }, -- {18.5, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 7} }, -- { 19, { 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6} }, -- {19.5, { 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6} }, -- { 20, { 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 6} }, -- {20.5, { 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 6} }, -- { 21, { 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, -- {21.5, { 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, -- { 22, { 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 5} }, -- {22.5, { 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }, -- { 23, { 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 4} }, -- {23.5, { 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 4} }, -- { 24, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4} } --}; -- -- --const qp_table qp_table_420_12bpc_max = { -- { 4, {11, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 21, 22} }, -- { 4.5, {10, 11, 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -- { 5, { 9, 11, 12, 13, 14, 15, 15, 16, 17, 17, 18, 18, 19, 20, 21} }, -- { 5.5, { 8, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, 19, 20} }, -- { 6, { 6, 9, 11, 12, 13, 14, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, -- { 6.5, { 6, 8, 10, 11, 11, 13, 14, 15, 15, 16, 16, 17, 17, 18, 19} }, -- { 7, { 5, 7, 9, 10, 10, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18} }, -- { 7.5, { 5, 7, 8, 9, 9, 11, 12, 13, 14, 14, 15, 15, 16, 16, 17} }, -- { 8, { 4, 6, 7, 8, 8, 10, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -- { 8.5, { 3, 6, 6, 7, 7, 10, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, -- { 9, { 3, 5, 6, 7, 7, 10, 11, 12, 12, 13, 13, 14, 14, 14, 15} }, -- { 9.5, { 2, 5, 6, 6, 7, 9, 10, 11, 12, 12, 13, 13, 13, 14, 15} }, -- { 10, { 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 13, 13, 15} }, -- {10.5, { 2, 3, 5, 5, 6, 7, 8, 9, 11, 11, 12, 12, 12, 12, 14} }, -- { 11, { 1, 3, 4, 5, 6, 6, 7, 9, 10, 11, 11, 11, 12, 12, 13} }, -- {11.5, { 1, 2, 3, 4, 5, 6, 6, 8, 9, 10, 10, 11, 11, 11, 13} }, -- { 12, { 1, 1, 3, 3, 4, 5, 6, 7, 8, 9, 9, 10, 10, 10, 12} }, -- {12.5, { 1, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 9, 10, 11} }, -- { 13, { 1, 1, 1, 2, 4, 4, 6, 6, 7, 8, 8, 9, 9, 9, 11} }, -- {13.5, { 1, 1, 1, 2, 3, 4, 5, 5, 6, 7, 8, 8, 8, 9, 11} }, -- { 14, { 1, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 8, 10} }, -- {14.5, { 0, 1, 1, 1, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, -- { 15, { 0, 1, 1, 1, 1, 2, 3, 3, 5, 5, 5, 6, 6, 7, 9} }, -- {15.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8} }, -- { 16, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 7} }, -- {16.5, { 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 7} }, -- { 17, { 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 6} }, -- {17.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 6} }, -- { 18, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 5} } --}; -- -- --const qp_table qp_table_444_10bpc_min = { -- { 6, { 0, 4, 7, 7, 9, 9, 9, 9, 9, 10, 10, 10, 10, 12, 18} }, -- { 6.5, { 0, 4, 6, 7, 8, 8, 9, 9, 9, 9, 10, 10, 10, 12, 18} }, -- { 7, { 0, 4, 6, 6, 8, 8, 8, 8, 8, 9, 9, 10, 10, 12, 17} }, -- { 7.5, { 0, 4, 6, 6, 7, 8, 8, 8, 8, 8, 9, 9, 10, 12, 17} }, -- { 8, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 8, 9, 9, 9, 12, 16} }, -- { 8.5, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 8, 9, 9, 9, 12, 16} }, -- { 9, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, -- { 9.5, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, -- { 10, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 15} }, -- {10.5, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 15} }, -- { 11, { 0, 3, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, -- {11.5, { 0, 3, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, -- { 12, { 0, 2, 4, 4, 6, 6, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, -- {12.5, { 0, 2, 4, 4, 6, 6, 7, 7, 7, 7, 8, 9, 9, 11, 14} }, -- { 13, { 0, 2, 4, 4, 5, 6, 7, 7, 7, 7, 8, 9, 9, 11, 13} }, -- {13.5, { 0, 2, 3, 4, 5, 6, 6, 7, 7, 7, 8, 9, 9, 11, 13} }, -- { 14, { 0, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 11, 13} }, -- {14.5, { 0, 2, 3, 4, 5, 5, 6, 6, 6, 7, 7, 8, 9, 11, 12} }, -- { 15, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 9, 11, 12} }, -- {15.5, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 9, 11, 12} }, -- { 16, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 10, 11} }, -- {16.5, { 0, 1, 2, 3, 4, 5, 5, 6, 6, 6, 7, 8, 8, 10, 11} }, -- { 17, { 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8, 9, 11} }, -- {17.5, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 11} }, -- { 18, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10} }, -- {18.5, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10} }, -- { 19, { 0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 7, 8, 9} }, -- {19.5, { 0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 7, 8, 9} }, -- { 20, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 9} }, -- {20.5, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 9} }, -- { 21, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 7, 9} }, -- {21.5, { 0, 1, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6, 6, 7, 8} }, -- { 22, { 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 8} }, -- {22.5, { 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, -- { 23, { 0, 0, 1, 2, 2, 2, 3, 3, 3, 3, 5, 5, 5, 5, 7} }, -- {23.5, { 0, 0, 0, 2, 2, 2, 3, 3, 3, 3, 5, 5, 5, 5, 7} }, -- { 24, { 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 5, 7} }, -- {24.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 7} }, -- { 25, { 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 4, 4, 6} }, -- {25.5, { 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, -- { 26, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 5} }, -- {26.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 5} }, -- { 27, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 5} }, -- {27.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 5} }, -- { 28, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 4} }, -- {28.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 4} }, -- { 29, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3} }, -- {29.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3} }, -- { 30, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} } --}; -- -- --const qp_table qp_table_420_8bpc_max = { -- { 4, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 13, 14} }, -- { 4.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -- { 5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 12, 13} }, -- { 5.5, { 3, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12} }, -- { 6, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, -- { 6.5, { 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, -- { 7, { 1, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10} }, -- { 7.5, { 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9} }, -- { 8, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -- { 8.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8} }, -- { 9, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7} }, -- { 9.5, { 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, -- { 10, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6} }, -- {10.5, { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 4, 6} }, -- { 11, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5} }, -- {11.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, -- { 12, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4} } --}; -- -- --const qp_table qp_table_444_8bpc_min = { -- { 6, { 0, 1, 3, 3, 5, 5, 5, 5, 5, 6, 6, 6, 6, 9, 14} }, -- { 6.5, { 0, 1, 2, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 9, 14} }, -- { 7, { 0, 0, 2, 2, 4, 4, 4, 4, 4, 5, 5, 6, 6, 9, 13} }, -- { 7.5, { 0, 0, 2, 2, 3, 4, 4, 4, 4, 4, 5, 5, 6, 9, 13} }, -- { 8, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 4, 5, 5, 5, 8, 12} }, -- { 8.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 4, 5, 5, 5, 8, 12} }, -- { 9, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 12} }, -- { 9.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 12} }, -- { 10, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -- {10.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -- { 11, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -- {11.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -- { 12, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -- {12.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 4, 5, 5, 7, 10} }, -- { 13, { 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 4, 5, 5, 7, 9} }, -- {13.5, { 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, -- { 14, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, -- {14.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 7, 8} }, -- { 15, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -- {15.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -- { 16, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -- {16.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -- { 17, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, -- {17.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, -- { 18, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, -- {18.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, -- { 19, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5} }, -- {19.5, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5} }, -- { 20, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 5} }, -- {20.5, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 5} }, -- { 21, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4} }, -- {21.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4} }, -- { 22, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 4} }, -- {22.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3} }, -- { 23, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} }, -- {23.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} }, -- { 24, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3} } --}; -- -- --const qp_table qp_table_444_12bpc_min = { -- { 6, { 0, 5, 11, 11, 13, 13, 13, 13, 13, 14, 14, 14, 14, 17, 22} }, -- { 6.5, { 0, 5, 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 17, 22} }, -- { 7, { 0, 5, 10, 10, 12, 12, 12, 12, 12, 13, 13, 14, 14, 17, 21} }, -- { 7.5, { 0, 5, 9, 10, 11, 12, 12, 12, 12, 12, 13, 13, 14, 17, 21} }, -- { 8, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 12, 13, 13, 13, 16, 20} }, -- { 8.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 12, 13, 13, 13, 16, 20} }, -- { 9, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, -- { 9.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, -- { 10, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -- {10.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -- { 11, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -- {11.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -- { 12, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -- {12.5, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -- { 13, { 0, 4, 7, 8, 9, 11, 11, 11, 11, 11, 13, 13, 13, 15, 17} }, -- {13.5, { 0, 3, 6, 7, 9, 10, 10, 11, 11, 11, 12, 13, 13, 15, 17} }, -- { 14, { 0, 3, 5, 6, 9, 9, 9, 10, 11, 11, 12, 13, 13, 15, 17} }, -- {14.5, { 0, 2, 5, 6, 8, 9, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -- { 15, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -- {15.5, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -- { 16, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 11, 12, 12, 14, 15} }, -- {16.5, { 0, 2, 3, 5, 7, 8, 9, 10, 11, 11, 11, 12, 12, 14, 15} }, -- { 17, { 0, 2, 3, 5, 5, 6, 9, 9, 10, 10, 11, 11, 12, 13, 15} }, -- {17.5, { 0, 2, 3, 5, 5, 6, 8, 9, 10, 10, 11, 11, 12, 13, 15} }, -- { 18, { 0, 2, 3, 5, 5, 6, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, -- {18.5, { 0, 2, 3, 5, 5, 6, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, -- { 19, { 0, 1, 2, 4, 5, 5, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -- {19.5, { 0, 1, 2, 4, 5, 5, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -- { 20, { 0, 1, 2, 3, 4, 5, 7, 8, 8, 8, 9, 10, 10, 11, 13} }, -- {20.5, { 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 13} }, -- { 21, { 0, 1, 2, 3, 4, 5, 5, 7, 7, 8, 9, 10, 10, 11, 13} }, -- {21.5, { 0, 1, 2, 3, 3, 4, 5, 7, 7, 8, 9, 10, 10, 11, 12} }, -- { 22, { 0, 0, 1, 3, 3, 4, 5, 6, 7, 8, 9, 9, 9, 10, 12} }, -- {22.5, { 0, 0, 1, 3, 3, 4, 5, 6, 7, 8, 9, 9, 9, 10, 11} }, -- { 23, { 0, 0, 1, 3, 3, 4, 5, 6, 6, 7, 9, 9, 9, 9, 11} }, -- {23.5, { 0, 0, 1, 3, 3, 4, 5, 6, 6, 7, 9, 9, 9, 9, 11} }, -- { 24, { 0, 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 8, 8, 9, 11} }, -- {24.5, { 0, 0, 1, 2, 3, 4, 4, 6, 6, 7, 8, 8, 8, 9, 11} }, -- { 25, { 0, 0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 8, 10} }, -- {25.5, { 0, 0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 8, 10} }, -- { 26, { 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 7, 7, 9} }, -- {26.5, { 0, 0, 1, 2, 2, 3, 4, 5, 5, 5, 7, 7, 7, 7, 9} }, -- { 27, { 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, -- {27.5, { 0, 0, 1, 1, 2, 2, 4, 4, 4, 5, 6, 7, 7, 7, 9} }, -- { 28, { 0, 0, 0, 1, 1, 2, 3, 4, 4, 4, 6, 6, 6, 7, 9} }, -- {28.5, { 0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 6, 8} }, -- { 29, { 0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8} }, -- {29.5, { 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7} }, -- { 30, { 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 5, 5, 5, 5, 7} }, -- {30.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 4, 5, 7} }, -- { 31, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 4, 5, 7} }, -- {31.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, -- { 32, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 6} }, -- {32.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 6} }, -- { 33, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 5} }, -- {33.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 5} }, -- { 34, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 5} }, -- {34.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 3, 5} }, -- { 35, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 4} }, -- {35.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 4} }, -- { 36, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} } --}; -- -- --const qp_table qp_table_420_12bpc_min = { -- { 4, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21} }, -- { 4.5, { 0, 4, 8, 9, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, -- { 5, { 0, 4, 8, 9, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, -- { 5.5, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -- { 6, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -- { 6.5, { 0, 4, 6, 8, 9, 10, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -- { 7, { 0, 3, 5, 7, 9, 10, 10, 11, 11, 11, 13, 13, 13, 15, 17} }, -- { 7.5, { 0, 3, 5, 7, 8, 9, 10, 10, 11, 11, 12, 13, 13, 15, 16} }, -- { 8, { 0, 2, 4, 6, 7, 9, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -- { 8.5, { 0, 2, 4, 6, 6, 9, 9, 10, 11, 11, 12, 12, 13, 14, 15} }, -- { 9, { 0, 2, 4, 6, 6, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14} }, -- { 9.5, { 0, 2, 4, 5, 6, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14} }, -- { 10, { 0, 2, 3, 5, 6, 7, 8, 8, 9, 10, 10, 12, 12, 12, 14} }, -- {10.5, { 0, 2, 3, 4, 5, 6, 7, 8, 9, 9, 10, 11, 11, 11, 13} }, -- { 11, { 0, 2, 3, 4, 5, 5, 6, 8, 8, 9, 9, 10, 11, 11, 12} }, -- {11.5, { 0, 1, 2, 3, 4, 5, 5, 7, 8, 8, 9, 10, 10, 10, 12} }, -- { 12, { 0, 0, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 9, 11} }, -- {12.5, { 0, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 8, 9, 10} }, -- { 13, { 0, 0, 0, 1, 3, 3, 5, 5, 6, 7, 7, 8, 8, 8, 10} }, -- {13.5, { 0, 0, 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 7, 8, 10} }, -- { 14, { 0, 0, 0, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 7, 9} }, -- {14.5, { 0, 0, 0, 0, 1, 2, 3, 3, 4, 4, 5, 6, 6, 6, 8} }, -- { 15, { 0, 0, 0, 0, 0, 1, 2, 2, 4, 4, 4, 5, 5, 6, 8} }, -- {15.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, -- { 16, { 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 6} }, -- {16.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 6} }, -- { 17, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 5} }, -- {17.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 3, 5} }, -- { 18, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 4} } --}; -- -- --const qp_table qp_table_422_12bpc_min = { -- { 6, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 16, 20} }, -- { 6.5, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 16, 20} }, -- { 7, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -- { 7.5, { 0, 4, 8, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, -- { 8, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, -- { 8.5, { 0, 3, 6, 8, 9, 10, 10, 11, 11, 11, 12, 13, 13, 15, 18} }, -- { 9, { 0, 3, 5, 8, 9, 10, 10, 10, 11, 11, 12, 13, 13, 15, 17} }, -- { 9.5, { 0, 3, 5, 7, 8, 9, 10, 10, 11, 11, 12, 13, 13, 15, 17} }, -- { 10, { 0, 2, 4, 6, 7, 9, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -- {10.5, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, -- { 11, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 12, 13, 14, 15} }, -- {11.5, { 0, 2, 4, 6, 7, 7, 9, 9, 10, 11, 11, 12, 12, 14, 15} }, -- { 12, { 0, 2, 4, 6, 6, 6, 8, 8, 9, 9, 11, 11, 12, 13, 14} }, -- {12.5, { 0, 1, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11, 13, 14} }, -- { 13, { 0, 1, 3, 4, 5, 5, 7, 8, 8, 9, 10, 10, 11, 12, 13} }, -- {13.5, { 0, 1, 3, 3, 4, 5, 7, 7, 8, 8, 10, 10, 10, 12, 13} }, -- { 14, { 0, 0, 2, 3, 4, 5, 6, 6, 7, 7, 9, 10, 10, 11, 12} }, -- {14.5, { 0, 0, 1, 3, 4, 4, 6, 6, 6, 7, 9, 9, 9, 11, 12} }, -- { 15, { 0, 0, 1, 3, 3, 4, 5, 6, 6, 6, 8, 9, 9, 10, 12} }, -- {15.5, { 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 8, 8, 8, 10, 11} }, -- { 16, { 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 8, 8, 8, 9, 11} }, -- {16.5, { 0, 0, 0, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7, 9, 10} }, -- { 17, { 0, 0, 0, 1, 2, 2, 4, 4, 4, 5, 6, 6, 6, 8, 10} }, -- {17.5, { 0, 0, 0, 1, 2, 2, 3, 4, 4, 4, 5, 6, 6, 8, 9} }, -- { 18, { 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 6, 7, 9} }, -- {18.5, { 0, 0, 0, 1, 2, 2, 3, 3, 3, 3, 5, 5, 5, 7, 9} }, -- { 19, { 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6, 8} }, -- {19.5, { 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 6, 8} }, -- { 20, { 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 7} }, -- {20.5, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 7} }, -- { 21, { 0, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, -- {21.5, { 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 6} }, -- { 22, { 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 6} }, -- {22.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 5} }, -- { 23, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 5} }, -- {23.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 4} }, -- { 24, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 4} } --}; -- -- --const qp_table qp_table_422_12bpc_max = { -- { 6, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -- { 6.5, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -- { 7, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 20} }, -- { 7.5, { 9, 10, 12, 14, 15, 15, 15, 16, 16, 17, 17, 18, 18, 19, 20} }, -- { 8, { 6, 9, 10, 12, 14, 15, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, -- { 8.5, { 6, 8, 9, 11, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 19} }, -- { 9, { 5, 7, 8, 10, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 18} }, -- { 9.5, { 5, 7, 7, 9, 10, 12, 12, 13, 14, 14, 15, 15, 16, 17, 18} }, -- { 10, { 4, 6, 6, 8, 9, 11, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -- {10.5, { 4, 6, 6, 8, 9, 10, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -- { 11, { 4, 5, 6, 8, 9, 10, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, -- {11.5, { 3, 5, 6, 8, 9, 9, 11, 11, 12, 13, 13, 14, 14, 15, 16} }, -- { 12, { 3, 5, 6, 8, 8, 8, 10, 10, 11, 11, 13, 13, 14, 14, 15} }, -- {12.5, { 3, 4, 6, 7, 8, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15} }, -- { 13, { 2, 4, 5, 6, 7, 7, 9, 10, 10, 11, 12, 12, 13, 13, 14} }, -- {13.5, { 2, 4, 5, 5, 6, 7, 9, 9, 10, 10, 12, 12, 12, 13, 14} }, -- { 14, { 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 11, 12, 12, 12, 13} }, -- {14.5, { 2, 3, 3, 5, 6, 6, 8, 8, 8, 9, 11, 11, 11, 12, 13} }, -- { 15, { 2, 3, 3, 5, 5, 6, 7, 8, 8, 8, 10, 11, 11, 11, 13} }, -- {15.5, { 2, 2, 3, 4, 5, 6, 7, 7, 8, 8, 10, 10, 10, 11, 12} }, -- { 16, { 2, 2, 3, 4, 5, 6, 7, 7, 8, 8, 10, 10, 10, 10, 12} }, -- {16.5, { 1, 2, 2, 4, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 11} }, -- { 17, { 1, 1, 2, 3, 4, 4, 6, 6, 6, 7, 8, 8, 8, 9, 11} }, -- {17.5, { 1, 1, 2, 3, 4, 4, 5, 6, 6, 6, 7, 8, 8, 9, 10} }, -- { 18, { 1, 1, 1, 2, 3, 3, 5, 5, 5, 6, 7, 7, 8, 8, 10} }, -- {18.5, { 1, 1, 1, 2, 3, 3, 5, 5, 5, 5, 7, 7, 7, 8, 10} }, -- { 19, { 1, 1, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 7, 9} }, -- {19.5, { 1, 1, 1, 2, 2, 2, 4, 5, 5, 5, 6, 6, 6, 7, 9} }, -- { 20, { 1, 1, 1, 2, 2, 2, 4, 5, 5, 5, 6, 6, 6, 6, 8} }, -- {20.5, { 0, 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 8} }, -- { 21, { 0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 5, 7} }, -- {21.5, { 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 7} }, -- { 22, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 7} }, -- {22.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 6} }, -- { 23, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6} }, -- {23.5, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 5} }, -- { 24, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 5} } --}; -- -- --const qp_table qp_table_444_12bpc_max = { -- { 6, {12, 14, 16, 16, 17, 17, 17, 18, 19, 20, 20, 20, 20, 21, 23} }, -- { 6.5, {12, 14, 15, 16, 16, 16, 17, 18, 19, 19, 20, 20, 20, 21, 23} }, -- { 7, {12, 13, 15, 15, 16, 16, 16, 17, 18, 19, 19, 20, 20, 21, 22} }, -- { 7.5, {12, 13, 14, 15, 15, 16, 16, 17, 18, 18, 19, 19, 20, 21, 22} }, -- { 8, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -- { 8.5, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, -- { 9, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 21} }, -- { 9.5, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 21} }, -- { 10, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 20} }, -- {10.5, {10, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 18, 19, 20} }, -- { 11, { 9, 11, 13, 14, 15, 15, 15, 16, 16, 17, 17, 17, 18, 18, 19} }, -- {11.5, { 9, 11, 13, 14, 15, 15, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, -- { 12, { 6, 9, 12, 13, 14, 14, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, -- {12.5, { 6, 9, 12, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18, 19} }, -- { 13, { 5, 9, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16, 16, 17, 18} }, -- {13.5, { 5, 8, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 18} }, -- { 14, { 5, 8, 10, 11, 12, 12, 12, 13, 14, 14, 15, 16, 16, 16, 18} }, -- {14.5, { 4, 7, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 17} }, -- { 15, { 4, 7, 9, 10, 10, 11, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -- {15.5, { 4, 7, 9, 10, 10, 11, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, -- { 16, { 4, 7, 9, 10, 10, 11, 11, 12, 13, 13, 13, 14, 14, 15, 16} }, -- {16.5, { 4, 5, 7, 8, 10, 11, 11, 12, 13, 13, 13, 14, 14, 15, 16} }, -- { 17, { 4, 5, 7, 8, 8, 9, 11, 11, 12, 12, 12, 13, 13, 14, 16} }, -- {17.5, { 3, 5, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 13, 14, 16} }, -- { 18, { 3, 5, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 13, 14, 15} }, -- {18.5, { 3, 5, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 13, 14, 15} }, -- { 19, { 3, 4, 6, 7, 8, 8, 9, 10, 11, 11, 11, 12, 12, 13, 14} }, -- {19.5, { 3, 4, 6, 7, 8, 8, 9, 10, 11, 11, 11, 12, 12, 13, 14} }, -- { 20, { 2, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11, 11, 12, 14} }, -- {20.5, { 2, 3, 5, 5, 7, 8, 8, 8, 9, 10, 10, 11, 11, 12, 14} }, -- { 21, { 2, 3, 5, 5, 7, 7, 7, 8, 8, 9, 10, 11, 11, 12, 14} }, -- {21.5, { 2, 3, 5, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11, 12, 13} }, -- { 22, { 2, 2, 4, 5, 6, 6, 7, 7, 8, 9, 10, 10, 10, 11, 13} }, -- {22.5, { 2, 2, 4, 5, 5, 6, 7, 7, 8, 9, 10, 10, 10, 11, 12} }, -- { 23, { 2, 2, 4, 5, 5, 6, 7, 7, 7, 8, 10, 10, 10, 10, 12} }, -- {23.5, { 2, 2, 3, 5, 5, 6, 7, 7, 7, 8, 10, 10, 10, 10, 12} }, -- { 24, { 2, 2, 3, 4, 4, 5, 7, 7, 7, 8, 9, 9, 9, 10, 12} }, -- {24.5, { 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 9, 9, 10, 12} }, -- { 25, { 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 9, 9, 11} }, -- {25.5, { 1, 1, 3, 3, 4, 5, 6, 6, 7, 7, 8, 9, 9, 9, 11} }, -- { 26, { 1, 1, 3, 3, 3, 4, 5, 6, 6, 7, 8, 8, 8, 8, 10} }, -- {26.5, { 1, 1, 2, 3, 3, 4, 5, 6, 6, 6, 8, 8, 8, 8, 10} }, -- { 27, { 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 10} }, -- {27.5, { 1, 1, 2, 2, 3, 3, 5, 5, 5, 6, 7, 8, 8, 8, 10} }, -- { 28, { 0, 1, 1, 2, 2, 3, 4, 5, 5, 5, 7, 7, 7, 8, 10} }, -- {28.5, { 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, -- { 29, { 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 9} }, -- {29.5, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8} }, -- { 30, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 6, 6, 6, 6, 8} }, -- {30.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 8} }, -- { 31, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 8} }, -- {31.5, { 0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8} }, -- { 32, { 0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 7} }, -- {32.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 4, 4, 4, 5, 7} }, -- { 33, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, -- {33.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, -- { 34, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 4, 6} }, -- {34.5, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 6} }, -- { 35, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 5} }, -- {35.5, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 5} }, -- { 36, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 4} } --}; -- -- --const qp_table qp_table_420_8bpc_min = { -- { 4, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 9, 13} }, -- { 4.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, -- { 5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, -- { 5.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -- { 6, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -- { 6.5, { 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 5, 5, 7, 10} }, -- { 7, { 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, -- { 7.5, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4, 5, 7, 8} }, -- { 8, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -- { 8.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -- { 9, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6} }, -- { 9.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, -- { 10, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5} }, -- {10.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 5} }, -- { 11, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4} }, -- {11.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4} }, -- { 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3} } --}; -- -- --const qp_table qp_table_422_8bpc_min = { -- { 6, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, -- { 6.5, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, -- { 7, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -- { 7.5, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, -- { 8, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, -- { 8.5, { 0, 0, 1, 2, 2, 2, 2, 3, 3, 3, 4, 5, 5, 7, 10} }, -- { 9, { 0, 0, 0, 1, 2, 2, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, -- { 9.5, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 9} }, -- { 10, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -- {10.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, -- { 11, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -- {11.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, -- { 12, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6} }, -- {12.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, -- { 13, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5} }, -- {13.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 5} }, -- { 14, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4} }, -- {14.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4} }, -- { 15, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 4} }, -- {15.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3} }, -- { 16, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3} } --}; -- -- --const qp_table qp_table_422_10bpc_max = { -- { 6, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -- { 6.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -- { 7, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, -- { 7.5, { 5, 6, 8, 10, 11, 11, 11, 12, 12, 13, 13, 14, 14, 15, 16} }, -- { 8, { 4, 6, 7, 9, 10, 11, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, -- { 8.5, { 4, 5, 6, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 15} }, -- { 9, { 3, 4, 5, 7, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14} }, -- { 9.5, { 3, 4, 4, 6, 6, 8, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, -- { 10, { 2, 3, 3, 5, 5, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -- {10.5, { 2, 3, 3, 5, 5, 6, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -- { 11, { 2, 3, 3, 5, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, -- {11.5, { 2, 3, 3, 5, 5, 5, 7, 7, 8, 9, 9, 10, 10, 11, 12} }, -- { 12, { 2, 3, 3, 5, 5, 5, 7, 7, 8, 8, 9, 9, 10, 10, 11} }, -- {12.5, { 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, -- { 13, { 1, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10} }, -- {13.5, { 1, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 8, 9, 10} }, -- { 14, { 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 8, 9} }, -- {14.5, { 1, 2, 2, 3, 4, 4, 5, 5, 5, 6, 7, 7, 7, 8, 9} }, -- { 15, { 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, 7, 9} }, -- {15.5, { 1, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 8} }, -- { 16, { 1, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 6, 6, 8} }, -- {16.5, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7} }, -- { 17, { 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 7} }, -- {17.5, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6} }, -- { 18, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 6} }, -- {18.5, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 6} }, -- { 19, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 5} }, -- {19.5, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 5} }, -- { 20, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 4} } --}; -- -- --const qp_table qp_table_420_10bpc_max = { -- { 4, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 17, 18} }, -- { 4.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -- { 5, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 16, 17} }, -- { 5.5, { 6, 7, 8, 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 15, 16} }, -- { 6, { 4, 6, 8, 9, 10, 10, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, -- { 6.5, { 4, 5, 7, 8, 8, 9, 10, 11, 11, 12, 12, 13, 13, 14, 15} }, -- { 7, { 3, 4, 6, 7, 7, 8, 9, 10, 10, 11, 12, 12, 13, 13, 14} }, -- { 7.5, { 3, 4, 5, 6, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 13} }, -- { 8, { 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -- { 8.5, { 1, 3, 3, 4, 4, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, -- { 9, { 1, 3, 3, 4, 4, 6, 7, 8, 8, 9, 9, 10, 10, 10, 11} }, -- { 9.5, { 1, 3, 3, 3, 4, 5, 6, 7, 8, 8, 9, 9, 9, 10, 11} }, -- { 10, { 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9, 11} }, -- {10.5, { 1, 1, 3, 3, 3, 4, 5, 5, 7, 7, 8, 8, 8, 8, 10} }, -- { 11, { 0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 7, 7, 8, 8, 9} }, -- {11.5, { 0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 7, 7, 9} }, -- { 12, { 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 6, 8} }, -- {12.5, { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, -- { 13, { 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 7} }, -- {13.5, { 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 4, 6} }, -- { 14, { 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, -- {14.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 5} }, -- { 15, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 5} } --}; -- -- --const qp_table qp_table_420_10bpc_min = { -- { 4, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 13, 17} }, -- { 4.5, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, -- { 5, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, -- { 5.5, { 0, 3, 3, 4, 6, 7, 7, 7, 7, 7, 9, 9, 9, 11, 15} }, -- { 6, { 0, 2, 3, 4, 6, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, -- { 6.5, { 0, 2, 3, 4, 5, 6, 6, 7, 7, 7, 8, 9, 9, 11, 14} }, -- { 7, { 0, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 11, 13} }, -- { 7.5, { 0, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 11, 12} }, -- { 8, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 11, 12} }, -- { 8.5, { 0, 2, 2, 3, 3, 5, 5, 6, 6, 7, 8, 8, 9, 10, 11} }, -- { 9, { 0, 2, 2, 3, 3, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10} }, -- { 9.5, { 0, 2, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10} }, -- { 10, { 0, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 8, 8, 8, 10} }, -- {10.5, { 0, 0, 2, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, -- { 11, { 0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8} }, -- {11.5, { 0, 0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 6, 6, 8} }, -- { 12, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 5, 7} }, -- {12.5, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6} }, -- { 13, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, -- {13.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 5} }, -- { 14, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 5} }, -- {14.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 4} }, -- { 15, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 4} } --}; -- -- --const qp_table qp_table_444_10bpc_max = { -- { 6, { 8, 10, 12, 12, 13, 13, 13, 14, 15, 16, 16, 16, 16, 17, 19} }, -- { 6.5, { 8, 10, 11, 12, 12, 12, 13, 14, 15, 15, 16, 16, 16, 17, 19} }, -- { 7, { 8, 9, 11, 11, 12, 12, 12, 13, 14, 15, 15, 16, 16, 17, 18} }, -- { 7.5, { 8, 9, 10, 11, 11, 12, 12, 13, 14, 14, 15, 15, 16, 17, 18} }, -- { 8, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -- { 8.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, -- { 9, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 17} }, -- { 9.5, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 17} }, -- { 10, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, -- {10.5, { 6, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 14, 15, 16} }, -- { 11, { 5, 7, 9, 10, 11, 11, 11, 12, 12, 13, 13, 13, 14, 14, 15} }, -- {11.5, { 5, 7, 9, 10, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, -- { 12, { 4, 6, 8, 9, 10, 10, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, -- {12.5, { 4, 6, 8, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 15} }, -- { 13, { 3, 6, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 14} }, -- {13.5, { 3, 5, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14} }, -- { 14, { 3, 5, 6, 7, 8, 8, 8, 9, 10, 10, 11, 12, 12, 12, 14} }, -- {14.5, { 2, 4, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 13} }, -- { 15, { 2, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -- {15.5, { 2, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, -- { 16, { 2, 4, 5, 6, 6, 7, 7, 8, 9, 9, 9, 10, 10, 11, 12} }, -- {16.5, { 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 9, 10, 10, 11, 12} }, -- { 17, { 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 12} }, -- {17.5, { 1, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 9, 9, 10, 12} }, -- { 18, { 1, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 9, 9, 10, 11} }, -- {18.5, { 1, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 9, 9, 10, 11} }, -- { 19, { 1, 2, 3, 4, 5, 5, 5, 6, 7, 7, 7, 8, 8, 9, 10} }, -- {19.5, { 1, 2, 3, 4, 5, 5, 5, 6, 7, 7, 7, 8, 8, 9, 10} }, -- { 20, { 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8, 10} }, -- {20.5, { 1, 2, 3, 3, 4, 5, 5, 5, 5, 6, 6, 7, 7, 8, 10} }, -- { 21, { 1, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 7, 7, 8, 10} }, -- {21.5, { 1, 2, 3, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 8, 9} }, -- { 22, { 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 9} }, -- {22.5, { 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8} }, -- { 23, { 1, 1, 2, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6, 6, 8} }, -- {23.5, { 1, 1, 1, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6, 6, 8} }, -- { 24, { 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 5, 6, 8} }, -- {24.5, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 8} }, -- { 25, { 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, 5, 7} }, -- {25.5, { 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 7} }, -- { 26, { 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 6} }, -- {26.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 6} }, -- { 27, { 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, -- {27.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, -- { 28, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 5} }, -- {28.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 5} }, -- { 29, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4} }, -- {29.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4} }, -- { 30, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 4} } --}; -- -- --const qp_table qp_table_422_8bpc_max = { -- { 6, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -- { 6.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, -- { 7, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, -- { 7.5, { 3, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12} }, -- { 8, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, -- { 8.5, { 2, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, -- { 9, { 1, 2, 3, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10} }, -- { 9.5, { 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 10} }, -- { 10, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -- {10.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, -- { 11, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8} }, -- {11.5, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8} }, -- { 12, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7} }, -- {12.5, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7} }, -- { 13, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6} }, -- {13.5, { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 6} }, -- { 14, { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5} }, -- {14.5, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 5} }, -- { 15, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 5} }, -- {15.5, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }, -- { 16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} } --}; -- -diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c -index 7b294f637881a..b19d3aeb5962c 100644 ---- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c -+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c -@@ -23,266 +23,7 @@ - * Authors: AMD - * - */ --#include -- --#include "os_types.h" - #include "rc_calc.h" --#include "qp_tables.h" -- --#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) -- --#define MODE_SELECT(val444, val422, val420) \ -- (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) -- -- --#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ -- table = qp_table_##mode##_##bpc##bpc_##max; \ -- table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ -- break -- -- --static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, -- enum max_min max_min, float bpp) --{ -- int mode = MODE_SELECT(444, 422, 420); -- int sel = table_hash(mode, bpc, max_min); -- int table_size = 0; -- int index; -- const struct qp_entry *table = 0L; -- -- // alias enum -- enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; -- switch (sel) { -- TABLE_CASE(444, 8, max); -- TABLE_CASE(444, 8, min); -- TABLE_CASE(444, 10, max); -- TABLE_CASE(444, 10, min); -- TABLE_CASE(444, 12, max); -- TABLE_CASE(444, 12, min); -- TABLE_CASE(422, 8, max); -- TABLE_CASE(422, 8, min); -- TABLE_CASE(422, 10, max); -- TABLE_CASE(422, 10, min); -- TABLE_CASE(422, 12, max); -- TABLE_CASE(422, 12, min); -- TABLE_CASE(420, 8, max); -- TABLE_CASE(420, 8, min); -- TABLE_CASE(420, 10, max); -- TABLE_CASE(420, 10, min); -- TABLE_CASE(420, 12, max); -- TABLE_CASE(420, 12, min); -- } -- -- if (table == 0) -- return; -- -- index = (bpp - table[0].bpp) * 2; -- -- /* requested size is bigger than the table */ -- if (index >= table_size) { -- dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); -- return; -- } -- -- memcpy(qps, table[index].qps, sizeof(qp_set)); --} -- --static double dsc_roundf(double num) --{ -- if (num < 0.0) -- num = num - 0.5; -- else -- num = num + 0.5; -- -- return (int)(num); --} -- --static double dsc_ceil(double num) --{ -- double retval = (int)num; -- -- if (retval != num && num > 0) -- retval = num + 1; -- -- return (int)retval; --} -- --static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) --{ -- int *p = ofs; -- -- if (mode == CM_444 || mode == CM_RGB) { -- *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); -- *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); -- *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); -- *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); -- *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); -- *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); -- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); -- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); -- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); -- *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); -- *p++ = -10; -- *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); -- *p++ = -12; -- *p++ = -12; -- *p++ = -12; -- } else if (mode == CM_422) { -- *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); -- *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); -- *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -- *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -- *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -- *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); -- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); -- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); -- *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); -- *p++ = -10; -- *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); -- *p++ = -12; -- *p++ = -12; -- *p++ = -12; -- } else { -- *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); -- *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); -- *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -- *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -- *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -- *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); -- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); -- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); -- *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); -- *p++ = -10; -- *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); -- *p++ = -12; -- *p++ = -12; -- *p++ = -12; -- } --} -- --static int median3(int a, int b, int c) --{ -- if (a > b) -- swap(a, b); -- if (b > c) -- swap(b, c); -- if (a > b) -- swap(b, c); -- -- return b; --} -- --static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm, -- enum bits_per_comp bpc, u16 drm_bpp, -- bool is_navite_422_or_420, -- int slice_width, int slice_height, -- int minor_version) --{ -- float bpp; -- float bpp_group; -- float initial_xmit_delay_factor; -- int padding_pixels; -- int i; -- -- bpp = ((float)drm_bpp / 16.0); -- /* in native_422 or native_420 modes, the bits_per_pixel is double the -- * target bpp (the latter is what calc_rc_params expects) -- */ -- if (is_navite_422_or_420) -- bpp /= 2.0; -- -- rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); -- rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); -- -- bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); -- -- switch (cm) { -- case CM_420: -- rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); -- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); -- rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); -- break; -- case CM_422: -- rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); -- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); -- rc->second_line_bpg_offset = 0; -- break; -- case CM_444: -- case CM_RGB: -- rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); -- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); -- rc->second_line_bpg_offset = 0; -- break; -- } -- -- initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; -- rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); -- -- if (cm == CM_422 || cm == CM_420) -- slice_width /= 2; -- -- padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; -- if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { -- if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) -- rc->initial_xmit_delay++; -- } -- -- rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); -- rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); -- rc->flatness_det_thresh = 2 << (bpc - 8); -- -- get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); -- get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); -- if (cm == CM_444 && minor_version == 1) { -- for (i = 0; i < QP_SET_SIZE; ++i) { -- rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; -- rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; -- } -- } -- get_ofs_set(rc->ofs, cm, bpp); -- -- /* fixed parameters */ -- rc->rc_model_size = 8192; -- rc->rc_edge_factor = 6; -- rc->rc_tgt_offset_hi = 3; -- rc->rc_tgt_offset_lo = 3; -- -- rc->rc_buf_thresh[0] = 896; -- rc->rc_buf_thresh[1] = 1792; -- rc->rc_buf_thresh[2] = 2688; -- rc->rc_buf_thresh[3] = 3584; -- rc->rc_buf_thresh[4] = 4480; -- rc->rc_buf_thresh[5] = 5376; -- rc->rc_buf_thresh[6] = 6272; -- rc->rc_buf_thresh[7] = 6720; -- rc->rc_buf_thresh[8] = 7168; -- rc->rc_buf_thresh[9] = 7616; -- rc->rc_buf_thresh[10] = 7744; -- rc->rc_buf_thresh[11] = 7872; -- rc->rc_buf_thresh[12] = 8000; -- rc->rc_buf_thresh[13] = 8064; --} -- --static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp, -- bool is_navite_422_or_420) --{ -- float bpp; -- u32 bytes_per_pixel; -- double d_bytes_per_pixel; -- -- bpp = ((float)drm_bpp / 16.0); -- d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; -- // TODO: Make sure the formula for calculating this is precise (ceiling -- // vs. floor, and at what point they should be applied) -- if (is_navite_422_or_420) -- d_bytes_per_pixel /= 2; -- -- bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); -- -- return bytes_per_pixel; --} - - /** - * calc_rc_params - reads the user's cmdline mode -diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h -index 262f06afcbf95..c2340e001b578 100644 ---- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h -+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h -@@ -27,55 +27,7 @@ - #ifndef __RC_CALC_H__ - #define __RC_CALC_H__ - -- --#define QP_SET_SIZE 15 -- --typedef int qp_set[QP_SET_SIZE]; -- --struct rc_params { -- int rc_quant_incr_limit0; -- int rc_quant_incr_limit1; -- int initial_fullness_offset; -- int initial_xmit_delay; -- int first_line_bpg_offset; -- int second_line_bpg_offset; -- int flatness_min_qp; -- int flatness_max_qp; -- int flatness_det_thresh; -- qp_set qp_min; -- qp_set qp_max; -- qp_set ofs; -- int rc_model_size; -- int rc_edge_factor; -- int rc_tgt_offset_hi; -- int rc_tgt_offset_lo; -- int rc_buf_thresh[QP_SET_SIZE - 1]; --}; -- --enum colour_mode { -- CM_RGB, /* 444 RGB */ -- CM_444, /* 444 YUV or simple 422 */ -- CM_422, /* native 422 */ -- CM_420 /* native 420 */ --}; -- --enum bits_per_comp { -- BPC_8 = 8, -- BPC_10 = 10, -- BPC_12 = 12 --}; -- --enum max_min { -- DAL_MM_MIN = 0, -- DAL_MM_MAX = 1 --}; -- --struct qp_entry { -- float bpp; -- const qp_set qps; --}; -- --typedef struct qp_entry qp_table[]; -+#include "dml/dsc/rc_calc_fpu.h" - - void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); - u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps); -diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c -index ef830aded5b1c..1e19dd674e5a2 100644 ---- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c -+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c -@@ -22,7 +22,6 @@ - * Authors: AMD - * - */ --#include "os_types.h" - #include - #include "dscc_types.h" - #include "rc_calc.h" -diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h -index 713f5558f5e17..9195dec294c2d 100644 ---- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h -+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h -@@ -154,6 +154,8 @@ struct hubbub_funcs { - bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub); - void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow); - -+ bool (*verify_allow_pstate_change_high)(struct hubbub *hubbub); -+ - void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub); - - void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub); -diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h -index ad5f2adcc40d5..c8427d738c87e 100644 ---- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h -+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h -@@ -32,11 +32,6 @@ - #include "inc/hw/link_encoder.h" - #include "core_status.h" - --enum vline_select { -- VLINE0, -- VLINE1 --}; -- - struct pipe_ctx; - struct dc_state; - struct dc_stream_status; -@@ -115,8 +110,7 @@ struct hw_sequencer_funcs { - int group_index, int group_size, - struct pipe_ctx *grouped_pipes[]); - void (*setup_periodic_interrupt)(struct dc *dc, -- struct pipe_ctx *pipe_ctx, -- enum vline_select vline); -+ struct pipe_ctx *pipe_ctx); - void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, - struct dc_crtc_timing_adjust adjust); - void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, -diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c -index ed54e1c819bed..a728087b3f3d6 100644 ---- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c -+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c -@@ -266,14 +266,6 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = { - .funcs = &pflip_irq_info_funcs\ - } - --#define vupdate_int_entry(reg_num)\ -- [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ -- IRQ_REG_ENTRY(OTG, reg_num,\ -- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\ -- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\ -- .funcs = &vblank_irq_info_funcs\ -- } -- - /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic - * of DCE's DC_IRQ_SOURCE_VUPDATEx. - */ -@@ -402,12 +394,6 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { - dc_underflow_int_entry(6), - [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), - [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), -- vupdate_int_entry(0), -- vupdate_int_entry(1), -- vupdate_int_entry(2), -- vupdate_int_entry(3), -- vupdate_int_entry(4), -- vupdate_int_entry(5), - vupdate_no_lock_int_entry(0), - vupdate_no_lock_int_entry(1), - vupdate_no_lock_int_entry(2), -diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h -index caf961bb633f6..0fc4f90d9e3e9 100644 ---- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h -+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h -@@ -445,7 +445,7 @@ struct dmub_notification { - * of a firmware to know if feature or functionality is supported or present. - */ - #define DMUB_FW_VERSION(major, minor, revision) \ -- ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | ((revision) & 0xFFFF)) -+ ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | (((revision) & 0xFF) << 8)) - - /** - * dmub_srv_create() - creates the DMUB service. -diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h -index 571fcf23cea92..a3a9ea077f505 100644 ---- a/drivers/gpu/drm/amd/display/include/logger_types.h -+++ b/drivers/gpu/drm/amd/display/include/logger_types.h -@@ -72,6 +72,9 @@ - #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) - #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) - #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) -+#if defined(CONFIG_DRM_AMD_DC_DCN) -+#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__) -+#endif - - struct dal_logger; - -diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c -index ef742d95ef057..c707c9bfed433 100644 ---- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c -+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c -@@ -1597,6 +1597,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num, - struct fixed31_32 lut2; - struct fixed31_32 delta_lut; - struct fixed31_32 delta_index; -+ const struct fixed31_32 one = dc_fixpt_from_int(1); - - i = 0; - /* fixed_pt library has problems handling too small values */ -@@ -1625,6 +1626,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num, - } else - hw_x = coordinates_x[i].x; - -+ if (dc_fixpt_le(one, hw_x)) -+ hw_x = one; -+ - norm_x = dc_fixpt_mul(norm_factor, hw_x); - index = dc_fixpt_floor(norm_x); - if (index < 0 || index > 255) -diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c -index b99aa232bd8b1..6230861e78d10 100644 ---- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c -+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c -@@ -327,7 +327,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, - * - Delta for CEIL: delta_from_mid_point_in_us_1 - * - Delta for FLOOR: delta_from_mid_point_in_us_2 - */ -- if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) { -+ if (mid_point_frames_ceil && -+ (last_render_time_in_us / mid_point_frames_ceil) < -+ in_out_vrr->min_duration_in_us) { - /* Check for out of range. - * If using CEIL produces a value that is out of range, - * then we are forced to use FLOOR. -@@ -374,8 +376,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, - /* Either we've calculated the number of frames to insert, - * or we need to insert min duration frames - */ -- if (last_render_time_in_us / frames_to_insert < -- in_out_vrr->min_duration_in_us){ -+ if (frames_to_insert && -+ (last_render_time_in_us / frames_to_insert) < -+ in_out_vrr->min_duration_in_us){ - frames_to_insert -= (frames_to_insert > 1) ? - 1 : 0; - } -@@ -567,10 +570,6 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr, - * Note: We should never go above the field rate of the mode timing set. - */ - infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000); -- -- /* FreeSync HDR */ -- infopacket->sb[9] = 0; -- infopacket->sb[10] = 0; - } - - static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr, -@@ -638,10 +637,6 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr, - - /* PB16 : Reserved bits 7:1, FixedRate bit 0 */ - infopacket->sb[16] = (vrr->state == VRR_STATE_ACTIVE_FIXED) ? 1 : 0; -- -- //FreeSync HDR -- infopacket->sb[9] = 0; -- infopacket->sb[10] = 0; - } - - static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf, -@@ -726,8 +721,7 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal, - /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */ - infopacket->hb2 = 0x09; - -- *payload_size = 0x0A; -- -+ *payload_size = 0x09; - } else if (dc_is_dp_signal(signal)) { - - /* HEADER */ -@@ -776,9 +770,9 @@ static void build_vrr_infopacket_header_v3(enum signal_type signal, - infopacket->hb1 = version; - - /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length] */ -- *payload_size = 0x10; -- infopacket->hb2 = *payload_size - 1; //-1 for checksum -+ infopacket->hb2 = 0x10; - -+ *payload_size = 0x10; - } else if (dc_is_dp_signal(signal)) { - - /* HEADER */ -diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c -index 57f198de5e2cb..4e075b01d48bb 100644 ---- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c -+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c -@@ -100,7 +100,8 @@ enum vsc_packet_revision { - //PB7 = MD0 - #define MASK_VTEM_MD0__VRR_EN 0x01 - #define MASK_VTEM_MD0__M_CONST 0x02 --#define MASK_VTEM_MD0__RESERVED2 0x0C -+#define MASK_VTEM_MD0__QMS_EN 0x04 -+#define MASK_VTEM_MD0__RESERVED2 0x08 - #define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0 - - //MD1 -@@ -109,7 +110,7 @@ enum vsc_packet_revision { - //MD2 - #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03 - #define MASK_VTEM_MD2__RB 0x04 --#define MASK_VTEM_MD2__RESERVED3 0xF8 -+#define MASK_VTEM_MD2__NEXT_TFR 0xF8 - - //MD3 - #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF -diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h -index 257f280d3d53f..bd077ea224a40 100644 ---- a/drivers/gpu/drm/amd/include/amd_shared.h -+++ b/drivers/gpu/drm/amd/include/amd_shared.h -@@ -98,7 +98,8 @@ enum amd_ip_block_type { - AMD_IP_BLOCK_TYPE_ACP, - AMD_IP_BLOCK_TYPE_VCN, - AMD_IP_BLOCK_TYPE_MES, -- AMD_IP_BLOCK_TYPE_JPEG -+ AMD_IP_BLOCK_TYPE_JPEG, -+ AMD_IP_BLOCK_TYPE_NUM, - }; - - enum amd_clockgating_state { -diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h -index 7ec4331e67f26..a486769b66c6a 100644 ---- a/drivers/gpu/drm/amd/include/discovery.h -+++ b/drivers/gpu/drm/amd/include/discovery.h -@@ -143,6 +143,55 @@ struct gc_info_v1_0 { - uint32_t gc_num_gl2a; - }; - -+struct gc_info_v1_1 { -+ struct gpu_info_header header; -+ -+ uint32_t gc_num_se; -+ uint32_t gc_num_wgp0_per_sa; -+ uint32_t gc_num_wgp1_per_sa; -+ uint32_t gc_num_rb_per_se; -+ uint32_t gc_num_gl2c; -+ uint32_t gc_num_gprs; -+ uint32_t gc_num_max_gs_thds; -+ uint32_t gc_gs_table_depth; -+ uint32_t gc_gsprim_buff_depth; -+ uint32_t gc_parameter_cache_depth; -+ uint32_t gc_double_offchip_lds_buffer; -+ uint32_t gc_wave_size; -+ uint32_t gc_max_waves_per_simd; -+ uint32_t gc_max_scratch_slots_per_cu; -+ uint32_t gc_lds_size; -+ uint32_t gc_num_sc_per_se; -+ uint32_t gc_num_sa_per_se; -+ uint32_t gc_num_packer_per_sc; -+ uint32_t gc_num_gl2a; -+ uint32_t gc_num_tcp_per_sa; -+ uint32_t gc_num_sdp_interface; -+ uint32_t gc_num_tcps; -+}; -+ -+struct gc_info_v2_0 { -+ struct gpu_info_header header; -+ -+ uint32_t gc_num_se; -+ uint32_t gc_num_cu_per_sh; -+ uint32_t gc_num_sh_per_se; -+ uint32_t gc_num_rb_per_se; -+ uint32_t gc_num_tccs; -+ uint32_t gc_num_gprs; -+ uint32_t gc_num_max_gs_thds; -+ uint32_t gc_gs_table_depth; -+ uint32_t gc_gsprim_buff_depth; -+ uint32_t gc_parameter_cache_depth; -+ uint32_t gc_double_offchip_lds_buffer; -+ uint32_t gc_wave_size; -+ uint32_t gc_max_waves_per_simd; -+ uint32_t gc_max_scratch_slots_per_cu; -+ uint32_t gc_lds_size; -+ uint32_t gc_num_sc_per_se; -+ uint32_t gc_num_packer_per_sc; -+}; -+ - typedef struct harvest_info_header { - uint32_t signature; /* Table Signature */ - uint32_t version; /* Table Version */ -diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h -index bac15c466733d..6e27c8b16391f 100644 ---- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h -+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h -@@ -341,7 +341,8 @@ struct amd_pm_funcs { - int (*get_power_profile_mode)(void *handle, char *buf); - int (*set_power_profile_mode)(void *handle, long *input, uint32_t size); - int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size); -- int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size); -+ int (*odn_edit_dpm_table)(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, -+ long *input, uint32_t size); - int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state); - int (*smu_i2c_bus_access)(void *handle, bool acquire); - int (*gfx_state_change_set)(void *handle, uint32_t state); -diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c -index 03581d5b18360..a68496b3f9296 100644 ---- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c -+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c -@@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block - { - int ret = 0; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; -+ enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; -+ -+ if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { -+ dev_dbg(adev->dev, "IP block%d already in the target %s state!", -+ block_type, gate ? "gate" : "ungate"); -+ return 0; -+ } - - switch (block_type) { - case AMD_IP_BLOCK_TYPE_UVD: -@@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block - break; - } - -+ if (!ret) -+ atomic_set(&adev->pm.pwr_state[block_type], pwr_state); -+ - return ret; - } - -@@ -1035,6 +1045,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) - - if (!pp_funcs || !pp_funcs->get_asic_baco_capability) - return false; -+ /* Don't use baco for reset in S3. -+ * This is a workaround for some platforms -+ * where entering BACO during suspend -+ * seems to cause reboots or hangs. -+ * This might be related to the fact that BACO controls -+ * power to the whole GPU including devices like audio and USB. -+ * Powering down/up everything may adversely affect these other -+ * devices. Needs more investigation. -+ */ -+ if (adev->in_s3) -+ return false; - - if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) - return false; -diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c -index 249cb0aeb5ae4..73794c1c12082 100644 ---- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c -+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c -@@ -2117,6 +2117,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ - } - } - -+ /* setting should not be allowed from VF if not in one VF mode */ -+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { -+ dev_attr->attr.mode &= ~S_IWUGO; -+ dev_attr->store = NULL; -+ } -+ - #undef DEVICE_ATTR_IS - - return 0; -@@ -2128,15 +2134,19 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev, - uint32_t mask, struct list_head *attr_list) - { - int ret = 0; -- struct device_attribute *dev_attr = &attr->dev_attr; -- const char *name = dev_attr->attr.name; - enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; - struct amdgpu_device_attr_entry *attr_entry; -+ struct device_attribute *dev_attr; -+ const char *name; - - int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, - uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; - -- BUG_ON(!attr); -+ if (!attr) -+ return -EINVAL; -+ -+ dev_attr = &attr->dev_attr; -+ name = dev_attr->attr.name; - - attr_update = attr->attr_update ? attr->attr_update : default_attr_update; - -@@ -3439,8 +3449,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, - attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr || - attr == &sensor_dev_attr_power2_cap.dev_attr.attr || - attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr || -- attr == &sensor_dev_attr_power2_label.dev_attr.attr || -- attr == &sensor_dev_attr_power1_label.dev_attr.attr)) -+ attr == &sensor_dev_attr_power2_label.dev_attr.attr)) - return 0; - - return effective_mode; -diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h -index 98f1b3d8c1d59..16e3f72d31b9f 100644 ---- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h -+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h -@@ -417,6 +417,12 @@ struct amdgpu_dpm { - enum amd_dpm_forced_level forced_level; - }; - -+enum ip_power_state { -+ POWER_STATE_UNKNOWN, -+ POWER_STATE_ON, -+ POWER_STATE_OFF, -+}; -+ - struct amdgpu_pm { - struct mutex mutex; - u32 current_sclk; -@@ -452,6 +458,8 @@ struct amdgpu_pm { - struct i2c_adapter smu_i2c; - struct mutex smu_i2c_mutex; - struct list_head pm_attr_list; -+ -+ atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM]; - }; - - #define R600_SSTU_DFLT 0 -diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c -index 321215003643b..0f5930e797bd5 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c -@@ -924,7 +924,8 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u - return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size); - } - --static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size) -+static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, -+ long *input, uint32_t size) - { - struct pp_hwmgr *hwmgr = handle; - -diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c -index 67d7da0b6fed5..1d829402cd2e2 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c -@@ -75,8 +75,10 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) - for (i = 0; i < table_entries; i++) { - result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); - if (result) { -+ kfree(hwmgr->current_ps); - kfree(hwmgr->request_ps); - kfree(hwmgr->ps); -+ hwmgr->current_ps = NULL; - hwmgr->request_ps = NULL; - hwmgr->ps = NULL; - return -EINVAL; -diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c -index 1de3ae77e03ed..cf74621f94a75 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c -@@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetHardMinFclkByFreq, - hwmgr->display_config->num_display > 3 ? -- data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk : -+ (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) : - min_mclk, - NULL); - - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetHardMinSocclkByFreq, -- data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk, -+ data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100, - NULL); - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetHardMinVcn, -@@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, - NULL); - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetSoftMaxFclkByFreq, -- data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk, -+ data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100, - NULL); - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetSoftMaxSocclkByFreq, -- data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk, -+ data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100, - NULL); - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetSoftMaxVcn, -@@ -1036,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, - else - i = 1; - -- size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", -+ size += sprintf(buf + size, "0: %uMhz %s\n", - data->gfx_min_freq_limit/100, - i == 0 ? "*" : ""); -- size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", -+ size += sprintf(buf + size, "1: %uMhz %s\n", - i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, - i == 1 ? "*" : ""); -- size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", -+ size += sprintf(buf + size, "2: %uMhz %s\n", - data->gfx_max_freq_limit/100, - i == 2 ? "*" : ""); - break; -@@ -1050,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); - - for (i = 0; i < mclk_table->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, - mclk_table->entries[i].clk / 100, - ((mclk_table->entries[i].clk / 100) -@@ -1065,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, - if (ret) - return ret; - -- size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); -- size += sysfs_emit_at(buf, size, "0: %10uMhz\n", -+ size += sprintf(buf + size, "%s:\n", "OD_SCLK"); -+ size += sprintf(buf + size, "0: %10uMhz\n", - (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); -- size += sysfs_emit_at(buf, size, "1: %10uMhz\n", -+ size += sprintf(buf + size, "1: %10uMhz\n", - (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq); - } - break; -@@ -1081,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, - if (ret) - return ret; - -- size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); -- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", -+ size += sprintf(buf + size, "%s:\n", "OD_RANGE"); -+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", - min_freq, max_freq); - } - break; -@@ -1456,6 +1456,8 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) - if (!buf) - return -EINVAL; - -+ phm_get_sysfs_buf(&buf, &size); -+ - size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], - title[1], title[2], title[3], title[4], title[5]); - -diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c -index e7803ce8f67aa..611969bf45207 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c -@@ -4926,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, - now = i; - - for (i = 0; i < sclk_table->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; -@@ -4941,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, - now = i; - - for (i = 0; i < mclk_table->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; -@@ -4955,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, - now = i; - - for (i = 0; i < pcie_table->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %s %s\n", i, -+ size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : - (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : - (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", -@@ -4963,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, - break; - case OD_SCLK: - if (hwmgr->od_enabled) { -- size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); -+ size += sprintf(buf + size, "%s:\n", "OD_SCLK"); - for (i = 0; i < odn_sclk_table->num_of_pl; i++) -- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", -+ size += sprintf(buf + size, "%d: %10uMHz %10umV\n", - i, odn_sclk_table->entries[i].clock/100, - odn_sclk_table->entries[i].vddc); - } - break; - case OD_MCLK: - if (hwmgr->od_enabled) { -- size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); -+ size += sprintf(buf + size, "%s:\n", "OD_MCLK"); - for (i = 0; i < odn_mclk_table->num_of_pl; i++) -- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", -+ size += sprintf(buf + size, "%d: %10uMHz %10umV\n", - i, odn_mclk_table->entries[i].clock/100, - odn_mclk_table->entries[i].vddc); - } - break; - case OD_RANGE: - if (hwmgr->od_enabled) { -- size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); -- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", -+ size += sprintf(buf + size, "%s:\n", "OD_RANGE"); -+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", - data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, - hwmgr->platform_descriptor.overdriveLimit.engineClock/100); -- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", -+ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", - data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, - hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); -- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", -+ size += sprintf(buf + size, "VDDC: %7umV %11umV\n", - data->odn_dpm_table.min_vddc, - data->odn_dpm_table.max_vddc); - } -@@ -5518,6 +5518,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) - if (!buf) - return -EINVAL; - -+ phm_get_sysfs_buf(&buf, &size); -+ - size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n", - title[0], title[1], title[2], title[3], - title[4], title[5], title[6], title[7]); -diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c -index b94a77e4e7147..03bf8f0692228 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c -@@ -1559,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, - CURR_SCLK_INDEX); - - for (i = 0; i < sclk_table->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->entries[i].clk / 100, - (i == now) ? "*" : ""); - break; -@@ -1571,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, - CURR_MCLK_INDEX); - - for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, - (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); - break; -diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h -index ad33983a8064e..2a75da1e9f035 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h -+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h -@@ -109,6 +109,19 @@ int phm_irq_process(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry); - -+/* -+ * Helper function to make sysfs_emit_at() happy. Align buf to -+ * the current page boundary and record the offset. -+ */ -+static inline void phm_get_sysfs_buf(char **buf, int *offset) -+{ -+ if (!*buf || !offset) -+ return; -+ -+ *offset = offset_in_page(*buf); -+ *buf -= *offset; -+} -+ - int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); - - void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, -diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c -index c152a61ddd2c9..e6336654c5655 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c -@@ -4548,6 +4548,8 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) - int ret = 0; - int size = 0; - -+ phm_get_sysfs_buf(&buf, &size); -+ - ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); - PP_ASSERT_WITH_CODE(!ret, - "[EnableAllSmuFeatures] Failed to get enabled smc features!", -@@ -4650,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, - else - count = sclk_table->count; - for (i = 0; i < count; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; -@@ -4661,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); - - for (i = 0; i < mclk_table->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; -@@ -4672,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); - - for (i = 0; i < soc_table->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, soc_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; -@@ -4684,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, - PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); - - for (i = 0; i < dcef_table->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, dcef_table->dpm_levels[i].value / 100, - (dcef_table->dpm_levels[i].value / 100 == now) ? - "*" : ""); -@@ -4698,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, - gen_speed = pptable->PcieGenSpeed[i]; - lane_width = pptable->PcieLaneCount[i]; - -- size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i, -+ size += sprintf(buf + size, "%d: %s %s %s\n", i, - (gen_speed == 0) ? "2.5GT/s," : - (gen_speed == 1) ? "5.0GT/s," : - (gen_speed == 2) ? "8.0GT/s," : -@@ -4717,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, - - case OD_SCLK: - if (hwmgr->od_enabled) { -- size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); -+ size += sprintf(buf + size, "%s:\n", "OD_SCLK"); - podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; - for (i = 0; i < podn_vdd_dep->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", -+ size += sprintf(buf + size, "%d: %10uMhz %10umV\n", - i, podn_vdd_dep->entries[i].clk / 100, - podn_vdd_dep->entries[i].vddc); - } - break; - case OD_MCLK: - if (hwmgr->od_enabled) { -- size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); -+ size += sprintf(buf + size, "%s:\n", "OD_MCLK"); - podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; - for (i = 0; i < podn_vdd_dep->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", -+ size += sprintf(buf + size, "%d: %10uMhz %10umV\n", - i, podn_vdd_dep->entries[i].clk/100, - podn_vdd_dep->entries[i].vddc); - } - break; - case OD_RANGE: - if (hwmgr->od_enabled) { -- size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); -- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", -+ size += sprintf(buf + size, "%s:\n", "OD_RANGE"); -+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", - data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, - hwmgr->platform_descriptor.overdriveLimit.engineClock/100); -- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", -+ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", - data->golden_dpm_table.mem_table.dpm_levels[0].value/100, - hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); -- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", -+ size += sprintf(buf + size, "VDDC: %7umV %11umV\n", - data->odn_dpm_table.min_vddc, - data->odn_dpm_table.max_vddc); - } -@@ -5112,6 +5114,8 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) - if (!buf) - return -EINVAL; - -+ phm_get_sysfs_buf(&buf, &size); -+ - size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], - title[1], title[2], title[3], title[4], title[5]); - -diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c -index dad3e3741a4e8..190af79f3236f 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c -@@ -67,22 +67,21 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, - int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr, - uint32_t *speed) - { -- uint32_t current_rpm; -- uint32_t percent = 0; -- -- if (hwmgr->thermal_controller.fanInfo.bNoFan) -- return 0; -+ struct amdgpu_device *adev = hwmgr->adev; -+ uint32_t duty100, duty; -+ uint64_t tmp64; - -- if (vega10_get_current_rpm(hwmgr, ¤t_rpm)) -- return -1; -+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), -+ CG_FDO_CTRL1, FMAX_DUTY100); -+ duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), -+ CG_THERMAL_STATUS, FDO_PWM_DUTY); - -- if (hwmgr->thermal_controller. -- advanceFanControlParameters.usMaxFanRPM != 0) -- percent = current_rpm * 255 / -- hwmgr->thermal_controller. -- advanceFanControlParameters.usMaxFanRPM; -+ if (!duty100) -+ return -EINVAL; - -- *speed = MIN(percent, 255); -+ tmp64 = (uint64_t)duty * 255; -+ do_div(tmp64, duty100); -+ *speed = MIN((uint32_t)tmp64, 255); - - return 0; - } -diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c -index 8558718e15a8f..a2f4d6773d458 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c -@@ -2141,6 +2141,8 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) - int ret = 0; - int size = 0; - -+ phm_get_sysfs_buf(&buf, &size); -+ - ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled); - PP_ASSERT_WITH_CODE(!ret, - "[EnableAllSmuFeatures] Failed to get enabled smc features!", -@@ -2256,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, - "Attempt to get gfx clk levels Failed!", - return -1); - for (i = 0; i < clocks.num_levels; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); - break; -@@ -2272,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, - "Attempt to get memory clk levels Failed!", - return -1); - for (i = 0; i < clocks.num_levels; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); - break; -@@ -2290,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, - "Attempt to get soc clk levels Failed!", - return -1); - for (i = 0; i < clocks.num_levels; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); - break; -@@ -2308,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, - "Attempt to get dcef clk levels Failed!", - return -1); - for (i = 0; i < clocks.num_levels; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); - break; -diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c -index 0cf39c1244b1c..299b5c838bf70 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c -@@ -2961,7 +2961,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, - data->od8_settings.od8_settings_array; - OverDriveTable_t *od_table = - &(data->smc_state_table.overdrive_table); -- int32_t input_index, input_clk, input_vol, i; -+ int32_t input_clk, input_vol, i; -+ uint32_t input_index; - int od8_id; - int ret; - -@@ -3238,6 +3239,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) - int ret = 0; - int size = 0; - -+ phm_get_sysfs_buf(&buf, &size); -+ - ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); - PP_ASSERT_WITH_CODE(!ret, - "[EnableAllSmuFeatures] Failed to get enabled smc features!", -@@ -3372,13 +3375,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - return ret); - - if (vega20_get_sclks(hwmgr, &clocks)) { -- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", -+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", - now / 100); - break; - } - - for (i = 0; i < clocks.num_levels; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); - break; -@@ -3390,13 +3393,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - return ret); - - if (vega20_get_memclocks(hwmgr, &clocks)) { -- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", -+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", - now / 100); - break; - } - - for (i = 0; i < clocks.num_levels; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); - break; -@@ -3408,13 +3411,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - return ret); - - if (vega20_get_socclocks(hwmgr, &clocks)) { -- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", -+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", - now / 100); - break; - } - - for (i = 0; i < clocks.num_levels; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); - break; -@@ -3426,7 +3429,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - return ret); - - for (i = 0; i < fclk_dpm_table->count; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, fclk_dpm_table->dpm_levels[i].value, - fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); - break; -@@ -3438,13 +3441,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - return ret); - - if (vega20_get_dcefclocks(hwmgr, &clocks)) { -- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", -+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", - now / 100); - break; - } - - for (i = 0; i < clocks.num_levels; i++) -- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", -+ size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); - break; -@@ -3458,7 +3461,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - gen_speed = pptable->PcieGenSpeed[i]; - lane_width = pptable->PcieLaneCount[i]; - -- size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, -+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, - (gen_speed == 0) ? "2.5GT/s," : - (gen_speed == 1) ? "5.0GT/s," : - (gen_speed == 2) ? "8.0GT/s," : -@@ -3479,18 +3482,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - case OD_SCLK: - if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && - od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { -- size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); -- size += sysfs_emit_at(buf, size, "0: %10uMhz\n", -+ size += sprintf(buf + size, "%s:\n", "OD_SCLK"); -+ size += sprintf(buf + size, "0: %10uMhz\n", - od_table->GfxclkFmin); -- size += sysfs_emit_at(buf, size, "1: %10uMhz\n", -+ size += sprintf(buf + size, "1: %10uMhz\n", - od_table->GfxclkFmax); - } - break; - - case OD_MCLK: - if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { -- size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); -- size += sysfs_emit_at(buf, size, "1: %10uMhz\n", -+ size += sprintf(buf + size, "%s:\n", "OD_MCLK"); -+ size += sprintf(buf + size, "1: %10uMhz\n", - od_table->UclkFmax); - } - -@@ -3503,14 +3506,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { -- size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE"); -- size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", -+ size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE"); -+ size += sprintf(buf + size, "0: %10uMhz %10dmV\n", - od_table->GfxclkFreq1, - od_table->GfxclkVolt1 / VOLTAGE_SCALE); -- size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n", -+ size += sprintf(buf + size, "1: %10uMhz %10dmV\n", - od_table->GfxclkFreq2, - od_table->GfxclkVolt2 / VOLTAGE_SCALE); -- size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n", -+ size += sprintf(buf + size, "2: %10uMhz %10dmV\n", - od_table->GfxclkFreq3, - od_table->GfxclkVolt3 / VOLTAGE_SCALE); - } -@@ -3518,17 +3521,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - break; - - case OD_RANGE: -- size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); -+ size += sprintf(buf + size, "%s:\n", "OD_RANGE"); - - if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && - od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { -- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", -+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", - od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, - od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); - } - - if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { -- size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", -+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", - od8_settings[OD8_SETTING_UCLK_FMAX].min_value, - od8_settings[OD8_SETTING_UCLK_FMAX].max_value); - } -@@ -3539,22 +3542,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { -- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", -+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", - od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, - od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); -- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", -+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); -- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", -+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", - od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, - od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); -- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", -+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); -- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", -+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", - od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, - od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); -- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", -+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); - } -@@ -4003,6 +4006,8 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) - if (!buf) - return -EINVAL; - -+ phm_get_sysfs_buf(&buf, &size); -+ - size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", - title[0], title[1], title[2], title[3], title[4], title[5], - title[6], title[7], title[8], title[9], title[10]); -diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c -index bcae42cef3743..6ba4c2ae69a63 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c -@@ -1609,19 +1609,7 @@ static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) - - static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) - { -- u8 i; -- struct amdgpu_clock_voltage_dependency_table *table = -- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; -- -- for (i = 0; i < table->count; i++) { -- if (table->entries[i].clk >= 0) /* XXX */ -- break; -- } -- -- if (i >= table->count) -- i = table->count - 1; -- -- return i; -+ return 0; - } - - static void kv_update_acp_boot_level(struct amdgpu_device *adev) -diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c -index 81f82aa05ec28..66fc63f1f1c17 100644 ---- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c -+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c -@@ -7247,17 +7247,15 @@ static int si_parse_power_table(struct amdgpu_device *adev) - if (!adev->pm.dpm.ps) - return -ENOMEM; - power_state_offset = (u8 *)state_array->states; -- for (i = 0; i < state_array->ucNumEntries; i++) { -+ for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) { - u8 *idx; - power_state = (union pplib_power_state *)power_state_offset; - non_clock_array_index = power_state->v2.nonClockInfoIndex; - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - &non_clock_info_array->nonClockInfo[non_clock_array_index]; - ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL); -- if (ps == NULL) { -- kfree(adev->pm.dpm.ps); -+ if (ps == NULL) - return -ENOMEM; -- } - adev->pm.dpm.ps[i].ps_priv = ps; - si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], - non_clock_info, -@@ -7279,8 +7277,8 @@ static int si_parse_power_table(struct amdgpu_device *adev) - k++; - } - power_state_offset += 2 + power_state->v2.ucNumDPMLevels; -+ adev->pm.dpm.num_ps++; - } -- adev->pm.dpm.num_ps = state_array->ucNumEntries; - - /* fill in the vce power states */ - for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { -diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c -index 04863a7971155..952a8aa69b9ee 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c -@@ -138,7 +138,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu, - uint32_t *min, - uint32_t *max) - { -- int ret = 0; -+ int ret = -ENOTSUPP; - - if (!min && !max) - return -EINVAL; -@@ -1536,9 +1536,7 @@ static int smu_suspend(void *handle) - - smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); - -- /* skip CGPG when in S0ix */ -- if (smu->is_apu && !adev->in_s0ix) -- smu_set_gfx_cgpg(&adev->smu, false); -+ smu_set_gfx_cgpg(&adev->smu, false); - - return 0; - } -@@ -1569,8 +1567,7 @@ static int smu_resume(void *handle) - return ret; - } - -- if (smu->is_apu) -- smu_set_gfx_cgpg(&adev->smu, true); -+ smu_set_gfx_cgpg(&adev->smu, true); - - smu->disable_uclk_switch = 0; - -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c -index b1ad451af06bd..dfba0bc732073 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c -@@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, - enum smu_clk_type clk_type, char *buf) - { - uint16_t *curve_settings; -- int i, size = 0, ret = 0; -+ int i, levels, size = 0, ret = 0; - uint32_t cur_value = 0, value = 0, count = 0; - uint32_t freq_values[3] = {0}; - uint32_t mark_index = 0; -@@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu, - freq_values[1] = cur_value; - mark_index = cur_value == freq_values[0] ? 0 : - cur_value == freq_values[2] ? 2 : 1; -- if (mark_index != 1) -- freq_values[1] = (freq_values[0] + freq_values[2]) / 2; - -- for (i = 0; i < 3; i++) { -+ levels = 3; -+ if (mark_index != 1) { -+ levels = 2; -+ freq_values[1] = freq_values[2]; -+ } -+ -+ for (i = 0; i < levels; i++) { - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i], - i == mark_index ? "*" : ""); - } -- - } - break; - case SMU_PCIE: -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c -index ca57221e39629..d4fde146bd4c9 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c -@@ -338,7 +338,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu, - if (smu->dc_controlled_by_gpio) - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT); - -- if (amdgpu_aspm) -+ if (amdgpu_device_should_use_aspm(adev)) - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT); - - return 0; -@@ -358,6 +358,23 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu) - smu_baco->platform_support = - (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : - false; -+ -+ /* -+ * Disable BACO entry/exit completely on below SKUs to -+ * avoid hardware intermittent failures. -+ */ -+ if (((adev->pdev->device == 0x73A1) && -+ (adev->pdev->revision == 0x00)) || -+ ((adev->pdev->device == 0x73BF) && -+ (adev->pdev->revision == 0xCF)) || -+ ((adev->pdev->device == 0x7422) && -+ (adev->pdev->revision == 0x00)) || -+ ((adev->pdev->device == 0x73A3) && -+ (adev->pdev->revision == 0x00)) || -+ ((adev->pdev->device == 0x73E3) && -+ (adev->pdev->revision == 0x00))) -+ smu_baco->platform_support = false; -+ - } - } - -@@ -418,6 +435,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu) - return 0; - } - -+static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu) -+{ -+ struct amdgpu_device *adev = smu->adev; -+ uint32_t *board_reserved; -+ uint16_t *freq_table_gfx; -+ uint32_t i; -+ -+ /* Fix some OEM SKU specific stability issues */ -+ GET_PPTABLE_MEMBER(BoardReserved, &board_reserved); -+ if ((adev->pdev->device == 0x73DF) && -+ (adev->pdev->revision == 0XC3) && -+ (adev->pdev->subsystem_device == 0x16C2) && -+ (adev->pdev->subsystem_vendor == 0x1043)) -+ board_reserved[0] = 1387; -+ -+ GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx); -+ if ((adev->pdev->device == 0x73DF) && -+ (adev->pdev->revision == 0XC3) && -+ ((adev->pdev->subsystem_device == 0x16C2) || -+ (adev->pdev->subsystem_device == 0x133C)) && -+ (adev->pdev->subsystem_vendor == 0x1043)) { -+ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) { -+ if (freq_table_gfx[i] > 2500) -+ freq_table_gfx[i] = 2500; -+ } -+ } -+ -+ return 0; -+} -+ - static int sienna_cichlid_setup_pptable(struct smu_context *smu) - { - int ret = 0; -@@ -438,7 +485,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu) - if (ret) - return ret; - -- return ret; -+ return sienna_cichlid_patch_pptable_quirk(smu); - } - - static int sienna_cichlid_tables_init(struct smu_context *smu) -@@ -1278,21 +1325,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu) - &dpm_context->dpm_tables.soc_table; - struct smu_umd_pstate_table *pstate_table = - &smu->pstate_table; -+ struct amdgpu_device *adev = smu->adev; - - pstate_table->gfxclk_pstate.min = gfx_table->min; - pstate_table->gfxclk_pstate.peak = gfx_table->max; -- if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK) -- pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; - - pstate_table->uclk_pstate.min = mem_table->min; - pstate_table->uclk_pstate.peak = mem_table->max; -- if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK) -- pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; - - pstate_table->socclk_pstate.min = soc_table->min; - pstate_table->socclk_pstate.peak = soc_table->max; -- if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK) -+ -+ switch (adev->asic_type) { -+ case CHIP_SIENNA_CICHLID: -+ case CHIP_NAVY_FLOUNDER: -+ pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; -+ pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; - pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK; -+ break; -+ case CHIP_DIMGREY_CAVEFISH: -+ pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK; -+ pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK; -+ pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK; -+ break; -+ case CHIP_BEIGE_GOBY: -+ pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK; -+ pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK; -+ pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK; -+ break; -+ default: -+ break; -+ } - - return 0; - } -@@ -1865,33 +1928,94 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu, - return 0; - } - -+static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu, -+ uint32_t *gen_speed_override, -+ uint32_t *lane_width_override) -+{ -+ struct amdgpu_device *adev = smu->adev; -+ -+ *gen_speed_override = 0xff; -+ *lane_width_override = 0xff; -+ -+ switch (adev->pdev->device) { -+ case 0x73A0: -+ case 0x73A1: -+ case 0x73A2: -+ case 0x73A3: -+ case 0x73AB: -+ case 0x73AE: -+ /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ -+ *lane_width_override = 6; -+ break; -+ case 0x73E0: -+ case 0x73E1: -+ case 0x73E3: -+ *lane_width_override = 4; -+ break; -+ case 0x7420: -+ case 0x7421: -+ case 0x7422: -+ case 0x7423: -+ case 0x7424: -+ *lane_width_override = 3; -+ break; -+ default: -+ break; -+ } -+} -+ -+#define MAX(a, b) ((a) > (b) ? (a) : (b)) -+ - static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, - uint32_t pcie_gen_cap, - uint32_t pcie_width_cap) - { - struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; -- -- uint32_t smu_pcie_arg; -+ struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; -+ uint32_t gen_speed_override, lane_width_override; - uint8_t *table_member1, *table_member2; -+ uint32_t min_gen_speed, max_gen_speed; -+ uint32_t min_lane_width, max_lane_width; -+ uint32_t smu_pcie_arg; - int ret, i; - - GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); - GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); - -- /* lclk dpm table setup */ -- for (i = 0; i < MAX_PCIE_CONF; i++) { -- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i]; -- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i]; -+ sienna_cichlid_get_override_pcie_settings(smu, -+ &gen_speed_override, -+ &lane_width_override); -+ -+ /* PCIE gen speed override */ -+ if (gen_speed_override != 0xff) { -+ min_gen_speed = MIN(pcie_gen_cap, gen_speed_override); -+ max_gen_speed = MIN(pcie_gen_cap, gen_speed_override); -+ } else { -+ min_gen_speed = MAX(0, table_member1[0]); -+ max_gen_speed = MIN(pcie_gen_cap, table_member1[1]); -+ min_gen_speed = min_gen_speed > max_gen_speed ? -+ max_gen_speed : min_gen_speed; -+ } -+ pcie_table->pcie_gen[0] = min_gen_speed; -+ pcie_table->pcie_gen[1] = max_gen_speed; -+ -+ /* PCIE lane width override */ -+ if (lane_width_override != 0xff) { -+ min_lane_width = MIN(pcie_width_cap, lane_width_override); -+ max_lane_width = MIN(pcie_width_cap, lane_width_override); -+ } else { -+ min_lane_width = MAX(1, table_member2[0]); -+ max_lane_width = MIN(pcie_width_cap, table_member2[1]); -+ min_lane_width = min_lane_width > max_lane_width ? -+ max_lane_width : min_lane_width; - } -+ pcie_table->pcie_lane[0] = min_lane_width; -+ pcie_table->pcie_lane[1] = max_lane_width; - - for (i = 0; i < NUM_LINK_LEVELS; i++) { -- smu_pcie_arg = (i << 16) | -- ((table_member1[i] <= pcie_gen_cap) ? -- (table_member1[i] << 8) : -- (pcie_gen_cap << 8)) | -- ((table_member2[i] <= pcie_width_cap) ? -- table_member2[i] : -- pcie_width_cap); -+ smu_pcie_arg = (i << 16 | -+ pcie_table->pcie_gen[i] << 8 | -+ pcie_table->pcie_lane[i]); - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, -@@ -1899,11 +2023,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, - NULL); - if (ret) - return ret; -- -- if (table_member1[i] > pcie_gen_cap) -- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; -- if (table_member2[i] > pcie_width_cap) -- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; - } - - return 0; -@@ -1950,16 +2069,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu) - (OverDriveTable_t *)smu->smu_table.boot_overdrive_table; - OverDriveTable_t *user_od_table = - (OverDriveTable_t *)smu->smu_table.user_overdrive_table; -+ OverDriveTable_t user_od_table_bak; - int ret = 0; - -- /* -- * For S3/S4/Runpm resume, no need to setup those overdrive tables again as -- * - either they already have the default OD settings got during cold bootup -- * - or they have some user customized OD settings which cannot be overwritten -- */ -- if (smu->adev->in_suspend) -- return 0; -- - ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, - 0, (void *)boot_od_table, false); - if (ret) { -@@ -1970,7 +2082,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu) - sienna_cichlid_dump_od_table(smu, boot_od_table); - - memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t)); -- memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t)); -+ -+ /* -+ * For S3/S4/Runpm resume, we need to setup those overdrive tables again, -+ * but we have to preserve user defined values in "user_od_table". -+ */ -+ if (!smu->adev->in_suspend) { -+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t)); -+ smu->user_dpm_profile.user_od = false; -+ } else if (smu->user_dpm_profile.user_od) { -+ memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t)); -+ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t)); -+ user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin; -+ user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax; -+ user_od_table->UclkFmin = user_od_table_bak.UclkFmin; -+ user_od_table->UclkFmax = user_od_table_bak.UclkFmax; -+ user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset; -+ } - - return 0; - } -@@ -2180,6 +2308,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu, - return ret; - } - -+static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu) -+{ -+ struct smu_table_context *table_context = &smu->smu_table; -+ OverDriveTable_t *od_table = table_context->overdrive_table; -+ OverDriveTable_t *user_od_table = table_context->user_overdrive_table; -+ int res; -+ -+ res = smu_v11_0_restore_user_od_settings(smu); -+ if (res == 0) -+ memcpy(od_table, user_od_table, sizeof(OverDriveTable_t)); -+ -+ return res; -+} -+ - static int sienna_cichlid_run_btc(struct smu_context *smu) - { - return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); -@@ -3728,14 +3870,14 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, - - static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) - { -- struct smu_table_context *table_context = &smu->smu_table; -- PPTable_t *smc_pptable = table_context->driver_pptable; -+ uint16_t *mgpu_fan_boost_limit_rpm; - -+ GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm); - /* - * Skip the MGpuFanBoost setting for those ASICs - * which do not support it - */ -- if (!smc_pptable->MGpuFanBoostLimitRpm) -+ if (*mgpu_fan_boost_limit_rpm == 0) - return 0; - - return smu_cmn_send_smc_msg_with_param(smu, -@@ -3869,6 +4011,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { - .dump_pptable = sienna_cichlid_dump_pptable, - .init_microcode = smu_v11_0_init_microcode, - .load_microcode = smu_v11_0_load_microcode, -+ .fini_microcode = smu_v11_0_fini_microcode, - .init_smc_tables = sienna_cichlid_init_smc_tables, - .fini_smc_tables = smu_v11_0_fini_smc_tables, - .init_power = smu_v11_0_init_power, -@@ -3916,7 +4059,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { - .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, - .set_default_od_settings = sienna_cichlid_set_default_od_settings, - .od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table, -- .restore_user_od_settings = smu_v11_0_restore_user_od_settings, -+ .restore_user_od_settings = sienna_cichlid_restore_user_od_settings, - .run_btc = sienna_cichlid_run_btc, - .set_power_source = smu_v11_0_set_power_source, - .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h -index 38cd0ece24f6b..42f705c7a36f8 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h -@@ -33,6 +33,14 @@ typedef enum { - #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960 - #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000 - -+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950 -+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960 -+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676 -+ -+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200 -+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960 -+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000 -+ - extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu); - - #endif -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c -index 87b055466a33f..83fa3d20a1d57 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c -@@ -772,7 +772,7 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu) - goto failed; - } - -- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); -+ bitmap_to_arr32(feature_mask, feature->allowed, 64); - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, - feature_mask[1], NULL); -@@ -1235,6 +1235,8 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, - uint32_t crystal_clock_freq = 2500; - uint32_t tach_period; - -+ if (speed == 0) -+ return -EINVAL; - /* - * To prevent from possible overheat, some ASICs may have requirement - * for minimum fan speed: -@@ -1593,6 +1595,10 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu) - if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; - -+ /* return true if ASIC is in BACO state already */ -+ if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) -+ return true; -+ - /* Arcturus does not support this bit mask */ - if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && - !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c -index f6ef0ce6e9e2c..5a9b47133db12 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c -@@ -579,7 +579,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, - DpmClocks_t *clk_table = smu->smu_table.clocks_table; - SmuMetrics_legacy_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); -- int i, size = 0, ret = 0; -+ int i, idx, size = 0, ret = 0; - uint32_t cur_value = 0, value = 0, count = 0; - bool cur_value_match_level = false; - -@@ -653,7 +653,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, - case SMU_MCLK: - case SMU_FCLK: - for (i = 0; i < count; i++) { -- ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); -+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; -+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); - if (ret) - return ret; - if (!value) -@@ -680,7 +681,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, - DpmClocks_t *clk_table = smu->smu_table.clocks_table; - SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); -- int i, size = 0, ret = 0; -+ int i, idx, size = 0, ret = 0; - uint32_t cur_value = 0, value = 0, count = 0; - bool cur_value_match_level = false; - -@@ -754,7 +755,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu, - case SMU_MCLK: - case SMU_FCLK: - for (i = 0; i < count; i++) { -- ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); -+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; -+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); - if (ret) - return ret; - if (!value) -@@ -1386,52 +1388,38 @@ static int vangogh_set_performance_level(struct smu_context *smu, - uint32_t soc_mask, mclk_mask, fclk_mask; - uint32_t vclk_mask = 0, dclk_mask = 0; - -+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; -+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; -+ - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: -- smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; -+ smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - -- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; -- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = vangogh_force_dpm_limit_value(smu, true); -+ if (ret) -+ return ret; - break; - case AMD_DPM_FORCED_LEVEL_LOW: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; -- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; -- -- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; -- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; -+ smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; - - ret = vangogh_force_dpm_limit_value(smu, false); -+ if (ret) -+ return ret; - break; - case AMD_DPM_FORCED_LEVEL_AUTO: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - -- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; -- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; -- - ret = vangogh_unforce_dpm_levels(smu); -- break; -- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: -- smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; -- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; -- -- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; -- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; -- -- ret = smu_cmn_send_smc_msg_with_param(smu, -- SMU_MSG_SetHardMinGfxClk, -- VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL); -- if (ret) -- return ret; -- -- ret = smu_cmn_send_smc_msg_with_param(smu, -- SMU_MSG_SetSoftMaxGfxClk, -- VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL); - if (ret) - return ret; -+ break; -+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: -+ smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; -+ smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; - - ret = vangogh_get_profiling_clk_mask(smu, level, - &vclk_mask, -@@ -1446,32 +1434,15 @@ static int vangogh_set_performance_level(struct smu_context *smu, - vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); - vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); - vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); -- - break; - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; -- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; -- -- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; -- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; -- -- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, -- VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL); -- if (ret) -- return ret; -- -- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, -- VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL); -- if (ret) -- return ret; -+ smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; - break; - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - -- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; -- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; -- - ret = vangogh_get_profiling_clk_mask(smu, level, - NULL, - NULL, -@@ -1484,29 +1455,29 @@ static int vangogh_set_performance_level(struct smu_context *smu, - vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); - break; - case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: -- smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; -- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; -- -- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; -- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; -- -- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, -- VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL); -- if (ret) -- return ret; -+ smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; -+ smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; - -- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, -- VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL); -+ ret = vangogh_set_peak_clock_by_device(smu); - if (ret) - return ret; -- -- ret = vangogh_set_peak_clock_by_device(smu); - break; - case AMD_DPM_FORCED_LEVEL_MANUAL: - case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: - default: -- break; -+ return 0; - } -+ -+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, -+ smu->gfx_actual_hard_min_freq, NULL); -+ if (ret) -+ return ret; -+ -+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, -+ smu->gfx_actual_soft_max_freq, NULL); -+ if (ret) -+ return ret; -+ - return ret; - } - -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c -index 145f13b8c977d..9a2584b593531 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c -@@ -485,7 +485,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) - static int renoir_print_clk_levels(struct smu_context *smu, - enum smu_clk_type clk_type, char *buf) - { -- int i, size = 0, ret = 0; -+ int i, idx, size = 0, ret = 0; - uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; - SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); -@@ -585,7 +585,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, - case SMU_VCLK: - case SMU_DCLK: - for (i = 0; i < count; i++) { -- ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value); -+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; -+ ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); - if (ret) - return ret; - if (!value) -@@ -1127,6 +1128,39 @@ static int renoir_get_power_profile_mode(struct smu_context *smu, - return size; - } - -+static void renoir_get_ss_power_percent(SmuMetrics_t *metrics, -+ uint32_t *apu_percent, uint32_t *dgpu_percent) -+{ -+ uint32_t apu_boost = 0; -+ uint32_t dgpu_boost = 0; -+ uint16_t apu_limit = 0; -+ uint16_t dgpu_limit = 0; -+ uint16_t apu_power = 0; -+ uint16_t dgpu_power = 0; -+ -+ apu_power = metrics->ApuPower; -+ apu_limit = metrics->StapmOriginalLimit; -+ if (apu_power > apu_limit && apu_limit != 0) -+ apu_boost = ((apu_power - apu_limit) * 100) / apu_limit; -+ apu_boost = (apu_boost > 100) ? 100 : apu_boost; -+ -+ dgpu_power = metrics->dGpuPower; -+ if (metrics->StapmCurrentLimit > metrics->StapmOriginalLimit) -+ dgpu_limit = metrics->StapmCurrentLimit - metrics->StapmOriginalLimit; -+ if (dgpu_power > dgpu_limit && dgpu_limit != 0) -+ dgpu_boost = ((dgpu_power - dgpu_limit) * 100) / dgpu_limit; -+ dgpu_boost = (dgpu_boost > 100) ? 100 : dgpu_boost; -+ -+ if (dgpu_boost >= apu_boost) -+ apu_boost = 0; -+ else -+ dgpu_boost = 0; -+ -+ *apu_percent = apu_boost; -+ *dgpu_percent = dgpu_boost; -+} -+ -+ - static int renoir_get_smu_metrics_data(struct smu_context *smu, - MetricsMember_t member, - uint32_t *value) -@@ -1135,6 +1169,9 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu, - - SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; - int ret = 0; -+ uint32_t apu_percent = 0; -+ uint32_t dgpu_percent = 0; -+ - - mutex_lock(&smu->metrics_lock); - -@@ -1183,26 +1220,18 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu, - *value = metrics->Voltage[1]; - break; - case METRICS_SS_APU_SHARE: -- /* return the percentage of APU power with respect to APU's power limit. -- * percentage is reported, this isn't boost value. Smartshift power -- * boost/shift is only when the percentage is more than 100. -+ /* return the percentage of APU power boost -+ * with respect to APU's power limit. - */ -- if (metrics->StapmOriginalLimit > 0) -- *value = (metrics->ApuPower * 100) / metrics->StapmOriginalLimit; -- else -- *value = 0; -+ renoir_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); -+ *value = apu_percent; - break; - case METRICS_SS_DGPU_SHARE: -- /* return the percentage of dGPU power with respect to dGPU's power limit. -- * percentage is reported, this isn't boost value. Smartshift power -- * boost/shift is only when the percentage is more than 100. -+ /* return the percentage of dGPU power boost -+ * with respect to dGPU's power limit. - */ -- if ((metrics->dGpuPower > 0) && -- (metrics->StapmCurrentLimit > metrics->StapmOriginalLimit)) -- *value = (metrics->dGpuPower * 100) / -- (metrics->StapmCurrentLimit - metrics->StapmOriginalLimit); -- else -- *value = 0; -+ renoir_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); -+ *value = dgpu_percent; - break; - default: - *value = UINT_MAX; -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c -index d60b8c5e87157..9c91e79c955fb 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c -@@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) - - int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) - { -- if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) -+ /* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */ -+ if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix) - return 0; - - return smu_cmn_send_smc_msg_with_param(smu, -@@ -191,6 +192,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu) - kfree(smu_table->watermarks_table); - smu_table->watermarks_table = NULL; - -+ kfree(smu_table->gpu_metrics_table); -+ smu_table->gpu_metrics_table = NULL; -+ - return 0; - } - -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c -index 5019903db492a..d0c6b864d00af 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c -@@ -1619,7 +1619,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en) - { - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, -- en ? 1 : 0, -+ en ? 0 : 1, - NULL); - } - -@@ -1627,6 +1627,7 @@ static const struct throttling_logging_label { - uint32_t feature_mask; - const char *label; - } logging_label[] = { -+ {(1U << THROTTLER_TEMP_GPU_BIT), "GPU"}, - {(1U << THROTTLER_TEMP_MEM_BIT), "HBM"}, - {(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"}, - {(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"}, -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c -index a0e50f23b1dd7..a3723ba359231 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c -@@ -197,6 +197,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu) - - int smu_v13_0_check_fw_version(struct smu_context *smu) - { -+ struct amdgpu_device *adev = smu->adev; - uint32_t if_version = 0xff, smu_version = 0xff; - uint16_t smu_major; - uint8_t smu_minor, smu_debug; -@@ -209,6 +210,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) - smu_major = (smu_version >> 16) & 0xffff; - smu_minor = (smu_version >> 8) & 0xff; - smu_debug = (smu_version >> 0) & 0xff; -+ if (smu->is_apu) -+ adev->pm.fw_version = smu_version; - - switch (smu->adev->asic_type) { - case CHIP_ALDEBARAN: -@@ -453,11 +456,11 @@ int smu_v13_0_init_power(struct smu_context *smu) - if (smu_power->power_context || smu_power->power_context_size != 0) - return -EINVAL; - -- smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), -+ smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context), - GFP_KERNEL); - if (!smu_power->power_context) - return -ENOMEM; -- smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); -+ smu_power->power_context_size = sizeof(struct smu_13_0_power_context); - - return 0; - } -@@ -718,7 +721,7 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu) - if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) - goto failed; - -- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); -+ bitmap_to_arr32(feature_mask, feature->allowed, 64); - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, - feature_mask[1], NULL); -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c -index a403657151ba1..81b1d4ea8a96c 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c -@@ -291,14 +291,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu) - - static int yellow_carp_mode_reset(struct smu_context *smu, int type) - { -- int ret = 0, index = 0; -- -- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, -- SMU_MSG_GfxDeviceDriverReset); -- if (index < 0) -- return index == -EACCES ? 0 : index; -+ int ret = 0; - -- ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); -+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL); - if (ret) - dev_err(smu->adev->dev, "Failed to mode reset!\n"); - -@@ -310,6 +305,42 @@ static int yellow_carp_mode2_reset(struct smu_context *smu) - return yellow_carp_mode_reset(smu, SMU_RESET_MODE_2); - } - -+ -+static void yellow_carp_get_ss_power_percent(SmuMetrics_t *metrics, -+ uint32_t *apu_percent, uint32_t *dgpu_percent) -+{ -+ uint32_t apu_boost = 0; -+ uint32_t dgpu_boost = 0; -+ uint16_t apu_limit = 0; -+ uint16_t dgpu_limit = 0; -+ uint16_t apu_power = 0; -+ uint16_t dgpu_power = 0; -+ -+ /* APU and dGPU power values are reported in milli Watts -+ * and STAPM power limits are in Watts */ -+ apu_power = metrics->ApuPower/1000; -+ apu_limit = metrics->StapmOpnLimit; -+ if (apu_power > apu_limit && apu_limit != 0) -+ apu_boost = ((apu_power - apu_limit) * 100) / apu_limit; -+ apu_boost = (apu_boost > 100) ? 100 : apu_boost; -+ -+ dgpu_power = metrics->dGpuPower/1000; -+ if (metrics->StapmCurrentLimit > metrics->StapmOpnLimit) -+ dgpu_limit = metrics->StapmCurrentLimit - metrics->StapmOpnLimit; -+ if (dgpu_power > dgpu_limit && dgpu_limit != 0) -+ dgpu_boost = ((dgpu_power - dgpu_limit) * 100) / dgpu_limit; -+ dgpu_boost = (dgpu_boost > 100) ? 100 : dgpu_boost; -+ -+ if (dgpu_boost >= apu_boost) -+ apu_boost = 0; -+ else -+ dgpu_boost = 0; -+ -+ *apu_percent = apu_boost; -+ *dgpu_percent = dgpu_boost; -+ -+} -+ - static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, - MetricsMember_t member, - uint32_t *value) -@@ -318,6 +349,8 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, - - SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; - int ret = 0; -+ uint32_t apu_percent = 0; -+ uint32_t dgpu_percent = 0; - - mutex_lock(&smu->metrics_lock); - -@@ -370,26 +403,18 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, - *value = metrics->Voltage[1]; - break; - case METRICS_SS_APU_SHARE: -- /* return the percentage of APU power with respect to APU's power limit. -- * percentage is reported, this isn't boost value. Smartshift power -- * boost/shift is only when the percentage is more than 100. -+ /* return the percentage of APU power boost -+ * with respect to APU's power limit. - */ -- if (metrics->StapmOpnLimit > 0) -- *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; -- else -- *value = 0; -+ yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); -+ *value = apu_percent; - break; - case METRICS_SS_DGPU_SHARE: -- /* return the percentage of dGPU power with respect to dGPU's power limit. -- * percentage is reported, this isn't boost value. Smartshift power -- * boost/shift is only when the percentage is more than 100. -+ /* return the percentage of dGPU power boost -+ * with respect to dGPU's power limit. - */ -- if ((metrics->dGpuPower > 0) && -- (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) -- *value = (metrics->dGpuPower * 100) / -- (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); -- else -- *value = 0; -+ yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); -+ *value = dgpu_percent; - break; - default: - *value = UINT_MAX; -@@ -1049,7 +1074,7 @@ out: - static int yellow_carp_print_clk_levels(struct smu_context *smu, - enum smu_clk_type clk_type, char *buf) - { -- int i, size = 0, ret = 0; -+ int i, idx, size = 0, ret = 0; - uint32_t cur_value = 0, value = 0, count = 0; - - smu_cmn_get_sysfs_buf(&buf, &size); -@@ -1081,7 +1106,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, - goto print_clk_out; - - for (i = 0; i < count; i++) { -- ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); -+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; -+ ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value); - if (ret) - goto print_clk_out; - -diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c -index 59172acb97380..292f533d8cf0d 100644 ---- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c -+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c -@@ -235,7 +235,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, - crtc->state->event = NULL; - drm_crtc_send_vblank_event(crtc, event); - } else { -- DRM_WARN("CRTC[%d]: FLIP happen but no pending commit.\n", -+ DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n", - drm_crtc_index(&kcrtc->base)); - } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); -@@ -286,7 +286,7 @@ komeda_crtc_atomic_enable(struct drm_crtc *crtc, - komeda_crtc_do_flush(crtc, old); - } - --static void -+void - komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc, - struct completion *input_flip_done) - { -diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c -index 93b7f09b96ca9..327051bba5b68 100644 ---- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c -+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c -@@ -69,6 +69,25 @@ static const struct drm_driver komeda_kms_driver = { - .minor = 1, - }; - -+static void komeda_kms_atomic_commit_hw_done(struct drm_atomic_state *state) -+{ -+ struct drm_device *dev = state->dev; -+ struct komeda_kms_dev *kms = to_kdev(dev); -+ int i; -+ -+ for (i = 0; i < kms->n_crtcs; i++) { -+ struct komeda_crtc *kcrtc = &kms->crtcs[i]; -+ -+ if (kcrtc->base.state->active) { -+ struct completion *flip_done = NULL; -+ if (kcrtc->base.state->event) -+ flip_done = kcrtc->base.state->event->base.completion; -+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, flip_done); -+ } -+ } -+ drm_atomic_helper_commit_hw_done(state); -+} -+ - static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) - { - struct drm_device *dev = old_state->dev; -@@ -81,7 +100,7 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) - - drm_atomic_helper_commit_modeset_enables(dev, old_state); - -- drm_atomic_helper_commit_hw_done(old_state); -+ komeda_kms_atomic_commit_hw_done(old_state); - - drm_atomic_helper_wait_for_flip_done(dev, old_state); - -diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h -index 456f3c4357193..bf6e8fba50613 100644 ---- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h -+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h -@@ -182,6 +182,8 @@ void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms); - - void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, - struct komeda_events *evts); -+void komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc, -+ struct completion *input_flip_done); - - struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev); - void komeda_kms_detach(struct komeda_kms_dev *kms); -diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c -index d63d83800a8a3..517b94c3bcaf9 100644 ---- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c -+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c -@@ -265,6 +265,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms, - - formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl, - layer->layer_type, &n_formats); -+ if (!formats) { -+ kfree(kplane); -+ return -ENOMEM; -+ } - - err = drm_universal_plane_init(&kms->base, plane, - get_possible_crtcs(kms, c->pipeline), -@@ -275,8 +279,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms, - - komeda_put_fourcc_list(formats); - -- if (err) -- goto cleanup; -+ if (err) { -+ kfree(kplane); -+ return err; -+ } - - drm_plane_helper_add(plane, &komeda_plane_helper_funcs); - -diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c -index 494075ddbef68..b5928b52e2791 100644 ---- a/drivers/gpu/drm/arm/malidp_crtc.c -+++ b/drivers/gpu/drm/arm/malidp_crtc.c -@@ -487,7 +487,10 @@ static void malidp_crtc_reset(struct drm_crtc *crtc) - if (crtc->state) - malidp_crtc_destroy_state(crtc, crtc->state); - -- __drm_atomic_helper_crtc_reset(crtc, &state->base); -+ if (state) -+ __drm_atomic_helper_crtc_reset(crtc, &state->base); -+ else -+ __drm_atomic_helper_crtc_reset(crtc, NULL); - } - - static int malidp_crtc_enable_vblank(struct drm_crtc *crtc) -diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c -index 8c2ab3d653b70..f67c816050f22 100644 ---- a/drivers/gpu/drm/arm/malidp_planes.c -+++ b/drivers/gpu/drm/arm/malidp_planes.c -@@ -348,7 +348,7 @@ static bool malidp_check_pages_threshold(struct malidp_plane_state *ms, - else - sgt = obj->funcs->get_sg_table(obj); - -- if (!sgt) -+ if (IS_ERR(sgt)) - return false; - - sgl = sgt->sgl; -diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c -index 8e3e98f13db49..54168134d9b93 100644 ---- a/drivers/gpu/drm/armada/armada_drv.c -+++ b/drivers/gpu/drm/armada/armada_drv.c -@@ -99,7 +99,6 @@ static int armada_drm_bind(struct device *dev) - if (ret) { - dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n", - __func__, ret); -- kfree(priv); - return ret; - } - -diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c -index 21909642ee4ca..8fbb25913327c 100644 ---- a/drivers/gpu/drm/armada/armada_gem.c -+++ b/drivers/gpu/drm/armada/armada_gem.c -@@ -336,7 +336,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_armada_gem_pwrite *args = data; - struct armada_gem_object *dobj; - char __user *ptr; -- int ret; -+ int ret = 0; - - DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n", - args->handle, args->offset, args->size, args->ptr); -@@ -349,9 +349,8 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, - if (!access_ok(ptr, args->size)) - return -EFAULT; - -- ret = fault_in_pages_readable(ptr, args->size); -- if (ret) -- return ret; -+ if (fault_in_readable(ptr, args->size)) -+ return -EFAULT; - - dobj = armada_gem_object_lookup(file, args->handle); - if (dobj == NULL) -diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c -index 424250535fed9..0383deb970bbb 100644 ---- a/drivers/gpu/drm/armada/armada_overlay.c -+++ b/drivers/gpu/drm/armada/armada_overlay.c -@@ -4,6 +4,8 @@ - * Rewritten from the dovefb driver, and Armada510 manuals. - */ - -+#include -+ - #include - #include - #include -@@ -451,8 +453,8 @@ static int armada_overlay_get_property(struct drm_plane *plane, - drm_to_overlay_state(state)->colorkey_ug, - drm_to_overlay_state(state)->colorkey_vb, 0); - } else if (property == priv->colorkey_mode_prop) { -- *val = (drm_to_overlay_state(state)->colorkey_mode & -- CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK); -+ *val = FIELD_GET(CFG_CKMODE_MASK, -+ drm_to_overlay_state(state)->colorkey_mode); - } else if (property == priv->brightness_prop) { - *val = drm_to_overlay_state(state)->brightness + 256; - } else if (property == priv->contrast_prop) { -diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c -index b53fee6f1c170..65f172807a0d5 100644 ---- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c -+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c -@@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf) - if (rc) - return rc; - -- return sprintf(buf, "%u\n", reg & 1); -+ return sprintf(buf, "%u\n", reg); - } - static DEVICE_ATTR_RO(vga_pw); - -diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c -index 79a3618679554..754a08c92d3d1 100644 ---- a/drivers/gpu/drm/ast/ast_main.c -+++ b/drivers/gpu/drm/ast/ast_main.c -@@ -423,11 +423,12 @@ struct ast_private *ast_device_create(const struct drm_driver *drv, - return ERR_PTR(-EIO); - - /* -- * If we don't have IO space at all, use MMIO now and -- * assume the chip has MMIO enabled by default (rev 0x20 -- * and higher). -+ * After AST2500, MMIO is enabled by default, and it should be adopted -+ * to be compatible with Arm. - */ -- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { -+ if (pdev->revision >= 0x40) { -+ ast->ioregs = ast->regs + AST_IO_MM_OFFSET; -+ } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { - drm_info(dev, "platform has no IO space, trying MMIO\n"); - ast->ioregs = ast->regs + AST_IO_MM_OFFSET; - } -diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c -index 1e30eaeb0e1b3..08ed0d08d03b8 100644 ---- a/drivers/gpu/drm/ast/ast_mode.c -+++ b/drivers/gpu/drm/ast/ast_mode.c -@@ -474,7 +474,10 @@ static void ast_set_color_reg(struct ast_private *ast, - static void ast_set_crtthd_reg(struct ast_private *ast) - { - /* Set Threshold */ -- if (ast->chip == AST2300 || ast->chip == AST2400 || -+ if (ast->chip == AST2600) { -+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0); -+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0); -+ } else if (ast->chip == AST2300 || ast->chip == AST2400 || - ast->chip == AST2500) { - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60); -@@ -1121,7 +1124,10 @@ static void ast_crtc_reset(struct drm_crtc *crtc) - if (crtc->state) - crtc->funcs->atomic_destroy_state(crtc, crtc->state); - -- __drm_atomic_helper_crtc_reset(crtc, &ast_state->base); -+ if (ast_state) -+ __drm_atomic_helper_crtc_reset(crtc, &ast_state->base); -+ else -+ __drm_atomic_helper_crtc_reset(crtc, NULL); - } - - static struct drm_crtc_state * -diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c -index b5d92f652fd85..1dac7f987a61d 100644 ---- a/drivers/gpu/drm/ast/ast_post.c -+++ b/drivers/gpu/drm/ast/ast_post.c -@@ -291,7 +291,7 @@ static void ast_init_dram_reg(struct drm_device *dev) - ; - } while (ast_read32(ast, 0x10100) != 0xa8); - } else {/* AST2100/1100 */ -- if (ast->chip == AST2100 || ast->chip == 2200) -+ if (ast->chip == AST2100 || ast->chip == AST2200) - dram_reg_info = ast2100_dram_table_data; - else - dram_reg_info = ast1100_dram_table_data; -diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h -index d9eb353a4bf09..dbe1cc620f6e6 100644 ---- a/drivers/gpu/drm/ast/ast_tables.h -+++ b/drivers/gpu/drm/ast/ast_tables.h -@@ -282,8 +282,6 @@ static const struct ast_vbios_enhtable res_1360x768[] = { - }; - - static const struct ast_vbios_enhtable res_1600x900[] = { -- {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */ -- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A }, - {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | - AST2500PreCatchCRT), 60, 1, 0x3A }, -diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig -index 431b6e12a81fe..68ec45abc1fbf 100644 ---- a/drivers/gpu/drm/bridge/Kconfig -+++ b/drivers/gpu/drm/bridge/Kconfig -@@ -8,7 +8,6 @@ config DRM_BRIDGE - config DRM_PANEL_BRIDGE - def_bool y - depends on DRM_BRIDGE -- depends on DRM_KMS_HELPER - select DRM_PANEL - help - DRM bridge wrapper of DRM panels -@@ -30,6 +29,7 @@ config DRM_CDNS_DSI - config DRM_CHIPONE_ICN6211 - tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge" - depends on OF -+ select DRM_KMS_HELPER - select DRM_MIPI_DSI - select DRM_PANEL_BRIDGE - help -diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h -index 05e3abb5a0c9a..fdd8e3d3232ec 100644 ---- a/drivers/gpu/drm/bridge/adv7511/adv7511.h -+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h -@@ -169,6 +169,7 @@ - #define ADV7511_PACKET_ENABLE_SPARE2 BIT(1) - #define ADV7511_PACKET_ENABLE_SPARE1 BIT(0) - -+#define ADV7535_REG_POWER2_HPD_OVERRIDE BIT(6) - #define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0 - #define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00 - #define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40 -@@ -386,10 +387,7 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); - #else - static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) - { -- unsigned int offset = adv7511->type == ADV7533 ? -- ADV7533_REG_CEC_OFFSET : 0; -- -- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, -+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, - ADV7511_CEC_CTRL_POWER_DOWN); - return 0; - } -@@ -397,7 +395,8 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) - - void adv7533_dsi_power_on(struct adv7511 *adv); - void adv7533_dsi_power_off(struct adv7511 *adv); --void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode); -+enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, -+ const struct drm_display_mode *mode); - int adv7533_patch_registers(struct adv7511 *adv); - int adv7533_patch_cec_registers(struct adv7511 *adv); - int adv7533_attach_dsi(struct adv7511 *adv); -diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c -index a20a45c0b353f..ddd1305b82b2c 100644 ---- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c -+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c -@@ -316,7 +316,7 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) - goto err_cec_alloc; - } - -- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0); -+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0); - /* cec soft reset */ - regmap_write(adv7511->regmap_cec, - ADV7511_REG_CEC_SOFT_RESET + offset, 0x01); -@@ -343,7 +343,7 @@ err_cec_alloc: - dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n", - ret); - err_cec_parse_dt: -- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, -+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, - ADV7511_CEC_CTRL_POWER_DOWN); - return ret == -EPROBE_DEFER ? ret : 0; - } -diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c -index 76555ae64e9ce..ce40cd1ae1670 100644 ---- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c -+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c -@@ -351,11 +351,17 @@ static void __adv7511_power_on(struct adv7511 *adv7511) - * from standby or are enabled. When the HPD goes low the adv7511 is - * reset and the outputs are disabled which might cause the monitor to - * go to standby again. To avoid this we ignore the HPD pin for the -- * first few seconds after enabling the output. -+ * first few seconds after enabling the output. On the other hand -+ * adv7535 require to enable HPD Override bit for proper HPD. - */ -- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, -- ADV7511_REG_POWER2_HPD_SRC_MASK, -- ADV7511_REG_POWER2_HPD_SRC_NONE); -+ if (adv7511->type == ADV7535) -+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, -+ ADV7535_REG_POWER2_HPD_OVERRIDE, -+ ADV7535_REG_POWER2_HPD_OVERRIDE); -+ else -+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, -+ ADV7511_REG_POWER2_HPD_SRC_MASK, -+ ADV7511_REG_POWER2_HPD_SRC_NONE); - } - - static void adv7511_power_on(struct adv7511 *adv7511) -@@ -375,6 +381,10 @@ static void adv7511_power_on(struct adv7511 *adv7511) - static void __adv7511_power_off(struct adv7511 *adv7511) - { - /* TODO: setup additional power down modes */ -+ if (adv7511->type == ADV7535) -+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, -+ ADV7535_REG_POWER2_HPD_OVERRIDE, 0); -+ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, - ADV7511_POWER_POWER_DOWN, - ADV7511_POWER_POWER_DOWN); -@@ -672,9 +682,14 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) - status = connector_status_disconnected; - } else { - /* Renable HPD sensing */ -- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, -- ADV7511_REG_POWER2_HPD_SRC_MASK, -- ADV7511_REG_POWER2_HPD_SRC_BOTH); -+ if (adv7511->type == ADV7535) -+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, -+ ADV7535_REG_POWER2_HPD_OVERRIDE, -+ ADV7535_REG_POWER2_HPD_OVERRIDE); -+ else -+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, -+ ADV7511_REG_POWER2_HPD_SRC_MASK, -+ ADV7511_REG_POWER2_HPD_SRC_BOTH); - } - - adv7511->status = status; -@@ -682,7 +697,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) - } - - static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511, -- struct drm_display_mode *mode) -+ const struct drm_display_mode *mode) - { - if (mode->clock > 165000) - return MODE_CLOCK_HIGH; -@@ -771,14 +786,16 @@ static void adv7511_mode_set(struct adv7511 *adv7511, - else - low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; - -- regmap_update_bits(adv7511->regmap, 0xfb, -- 0x6, low_refresh_rate << 1); -+ if (adv7511->type == ADV7511) -+ regmap_update_bits(adv7511->regmap, 0xfb, -+ 0x6, low_refresh_rate << 1); -+ else -+ regmap_update_bits(adv7511->regmap, 0x4a, -+ 0xc, low_refresh_rate << 2); -+ - regmap_update_bits(adv7511->regmap, 0x17, - 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); - -- if (adv7511->type == ADV7533 || adv7511->type == ADV7535) -- adv7533_mode_set(adv7511, adj_mode); -- - drm_mode_copy(&adv7511->curr_mode, adj_mode); - - /* -@@ -898,6 +915,18 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge, - adv7511_mode_set(adv, mode, adj_mode); - } - -+static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge, -+ const struct drm_display_info *info, -+ const struct drm_display_mode *mode) -+{ -+ struct adv7511 *adv = bridge_to_adv7511(bridge); -+ -+ if (adv->type == ADV7533 || adv->type == ADV7535) -+ return adv7533_mode_valid(adv, mode); -+ else -+ return adv7511_mode_valid(adv, mode); -+} -+ - static int adv7511_bridge_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) - { -@@ -948,6 +977,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = { - .enable = adv7511_bridge_enable, - .disable = adv7511_bridge_disable, - .mode_set = adv7511_bridge_mode_set, -+ .mode_valid = adv7511_bridge_mode_valid, - .attach = adv7511_bridge_attach, - .detect = adv7511_bridge_detect, - .get_edid = adv7511_bridge_get_edid, -@@ -1048,6 +1078,10 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv) - ADV7511_CEC_I2C_ADDR_DEFAULT); - if (IS_ERR(adv->i2c_cec)) - return PTR_ERR(adv->i2c_cec); -+ -+ regmap_write(adv->regmap, ADV7511_REG_CEC_I2C_ADDR, -+ adv->i2c_cec->addr << 1); -+ - i2c_set_clientdata(adv->i2c_cec, adv); - - adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec, -@@ -1252,9 +1286,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) - if (ret) - goto err_i2c_unregister_packet; - -- regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, -- adv7511->i2c_cec->addr << 1); -- - INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work); - - if (i2c->irq) { -@@ -1291,6 +1322,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) - return 0; - - err_unregister_cec: -+ cec_unregister_adapter(adv7511->cec_adap); - i2c_unregister_device(adv7511->i2c_cec); - clk_disable_unprepare(adv7511->cec_clk); - err_i2c_unregister_packet: -@@ -1309,8 +1341,6 @@ static int adv7511_remove(struct i2c_client *i2c) - - if (adv7511->type == ADV7533 || adv7511->type == ADV7535) - adv7533_detach_dsi(adv7511); -- i2c_unregister_device(adv7511->i2c_cec); -- clk_disable_unprepare(adv7511->cec_clk); - - adv7511_uninit_regulators(adv7511); - -@@ -1319,6 +1349,8 @@ static int adv7511_remove(struct i2c_client *i2c) - adv7511_audio_exit(adv7511); - - cec_unregister_adapter(adv7511->cec_adap); -+ i2c_unregister_device(adv7511->i2c_cec); -+ clk_disable_unprepare(adv7511->cec_clk); - - i2c_unregister_device(adv7511->i2c_packet); - i2c_unregister_device(adv7511->i2c_edid); -@@ -1362,10 +1394,21 @@ static struct i2c_driver adv7511_driver = { - - static int __init adv7511_init(void) - { -- if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) -- mipi_dsi_driver_register(&adv7533_dsi_driver); -+ int ret; -+ -+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) { -+ ret = mipi_dsi_driver_register(&adv7533_dsi_driver); -+ if (ret) -+ return ret; -+ } - -- return i2c_add_driver(&adv7511_driver); -+ ret = i2c_add_driver(&adv7511_driver); -+ if (ret) { -+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) -+ mipi_dsi_driver_unregister(&adv7533_dsi_driver); -+ } -+ -+ return ret; - } - module_init(adv7511_init); - -diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c -index 59d718bde8c41..babc0be0bbb56 100644 ---- a/drivers/gpu/drm/bridge/adv7511/adv7533.c -+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c -@@ -100,26 +100,24 @@ void adv7533_dsi_power_off(struct adv7511 *adv) - regmap_write(adv->regmap_cec, 0x27, 0x0b); - } - --void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode) -+enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, -+ const struct drm_display_mode *mode) - { -+ unsigned long max_lane_freq; - struct mipi_dsi_device *dsi = adv->dsi; -- int lanes, ret; -- -- if (adv->num_dsi_lanes != 4) -- return; -- -- if (mode->clock > 80000) -- lanes = 4; -- else -- lanes = 3; -- -- if (lanes != dsi->lanes) { -- mipi_dsi_detach(dsi); -- dsi->lanes = lanes; -- ret = mipi_dsi_attach(dsi); -- if (ret) -- dev_err(&dsi->dev, "failed to change host lanes\n"); -- } -+ u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); -+ -+ /* Check max clock for either 7533 or 7535 */ -+ if (mode->clock > (adv->type == ADV7533 ? 80000 : 148500)) -+ return MODE_CLOCK_HIGH; -+ -+ /* Check max clock for each lane */ -+ max_lane_freq = (adv->type == ADV7533 ? 800000 : 891000); -+ -+ if (mode->clock * bpp > max_lane_freq * adv->num_dsi_lanes) -+ return MODE_CLOCK_HIGH; -+ -+ return MODE_OK; - } - - int adv7533_patch_registers(struct adv7511 *adv) -diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c -index b7d2e4449cfaa..f0305f833b6c0 100644 ---- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c -+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c -@@ -1268,6 +1268,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, - return 0; - } - -+static -+struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp, -+ struct drm_atomic_state *state) -+{ -+ struct drm_encoder *encoder = dp->encoder; -+ struct drm_connector *connector; -+ struct drm_connector_state *conn_state; -+ -+ connector = drm_atomic_get_old_connector_for_encoder(state, encoder); -+ if (!connector) -+ return NULL; -+ -+ conn_state = drm_atomic_get_old_connector_state(state, connector); -+ if (!conn_state) -+ return NULL; -+ -+ return conn_state->crtc; -+} -+ - static - struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, - struct drm_atomic_state *state) -@@ -1448,14 +1467,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, - { - struct drm_atomic_state *old_state = old_bridge_state->base.state; - struct analogix_dp_device *dp = bridge->driver_private; -- struct drm_crtc *crtc; -+ struct drm_crtc *old_crtc, *new_crtc; -+ struct drm_crtc_state *old_crtc_state = NULL; - struct drm_crtc_state *new_crtc_state = NULL; -+ int ret; - -- crtc = analogix_dp_get_new_crtc(dp, old_state); -- if (!crtc) -+ new_crtc = analogix_dp_get_new_crtc(dp, old_state); -+ if (!new_crtc) - goto out; - -- new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); -+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc); - if (!new_crtc_state) - goto out; - -@@ -1464,6 +1485,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, - return; - - out: -+ old_crtc = analogix_dp_get_old_crtc(dp, old_state); -+ if (old_crtc) { -+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state, -+ old_crtc); -+ -+ /* When moving from PSR to fully disabled, exit PSR first. */ -+ if (old_crtc_state && old_crtc_state->self_refresh_active) { -+ ret = analogix_dp_disable_psr(dp); -+ if (ret) -+ DRM_ERROR("Failed to disable psr (%d)\n", ret); -+ } -+ } -+ - analogix_dp_bridge_disable(bridge); - } - -@@ -1632,8 +1666,19 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux, - struct drm_dp_aux_msg *msg) - { - struct analogix_dp_device *dp = to_dp(aux); -+ int ret; -+ -+ pm_runtime_get_sync(dp->dev); -+ -+ ret = analogix_dp_detect_hpd(dp); -+ if (ret) -+ goto out; -+ -+ ret = analogix_dp_transfer(dp, msg); -+out: -+ pm_runtime_put(dp->dev); - -- return analogix_dp_transfer(dp, msg); -+ return ret; - } - - struct analogix_dp_device * -@@ -1698,8 +1743,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - dp->reg_base = devm_ioremap_resource(&pdev->dev, res); -- if (IS_ERR(dp->reg_base)) -- return ERR_CAST(dp->reg_base); -+ if (IS_ERR(dp->reg_base)) { -+ ret = PTR_ERR(dp->reg_base); -+ goto err_disable_clk; -+ } - - dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd"); - -@@ -1711,7 +1758,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) - if (IS_ERR(dp->hpd_gpiod)) { - dev_err(dev, "error getting HDP GPIO: %ld\n", - PTR_ERR(dp->hpd_gpiod)); -- return ERR_CAST(dp->hpd_gpiod); -+ ret = PTR_ERR(dp->hpd_gpiod); -+ goto err_disable_clk; - } - - if (dp->hpd_gpiod) { -@@ -1731,7 +1779,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) - - if (dp->irq == -ENXIO) { - dev_err(&pdev->dev, "failed to get irq\n"); -- return ERR_PTR(-ENODEV); -+ ret = -ENODEV; -+ goto err_disable_clk; - } - - ret = devm_request_threaded_irq(&pdev->dev, dp->irq, -@@ -1740,11 +1789,15 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) - irq_flags, "analogix-dp", dp); - if (ret) { - dev_err(&pdev->dev, "failed to request irq\n"); -- return ERR_PTR(ret); -+ goto err_disable_clk; - } - disable_irq(dp->irq); - - return dp; -+ -+err_disable_clk: -+ clk_disable_unprepare(dp->clock); -+ return ERR_PTR(ret); - } - EXPORT_SYMBOL_GPL(analogix_dp_probe); - -@@ -1807,12 +1860,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove); - int analogix_dp_suspend(struct analogix_dp_device *dp) - { - clk_disable_unprepare(dp->clock); -- -- if (dp->plat_data->panel) { -- if (drm_panel_unprepare(dp->plat_data->panel)) -- DRM_ERROR("failed to turnoff the panel\n"); -- } -- - return 0; - } - EXPORT_SYMBOL_GPL(analogix_dp_suspend); -@@ -1827,13 +1874,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp) - return ret; - } - -- if (dp->plat_data->panel) { -- if (drm_panel_prepare(dp->plat_data->panel)) { -- DRM_ERROR("failed to setup the panel\n"); -- return -EBUSY; -- } -- } -- - return 0; - } - EXPORT_SYMBOL_GPL(analogix_dp_resume); -diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c -index cab6c8b92efd4..6a4f20fccf841 100644 ---- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c -+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c -@@ -998,11 +998,21 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, - if (!blocking) - return 0; - -+ /* -+ * db[1]!=0: entering PSR, wait for fully active remote frame buffer. -+ * db[1]==0: exiting PSR, wait for either -+ * (a) ACTIVE_RESYNC - the sink "must display the -+ * incoming active frames from the Source device with no visible -+ * glitches and/or artifacts", even though timings may still be -+ * re-synchronizing; or -+ * (b) INACTIVE - the transition is fully complete. -+ */ - ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status, - psr_status >= 0 && - ((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) || -- (!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500, -- DP_TIMEOUT_PSR_LOOP_MS * 1000); -+ (!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC || -+ psr_status == DP_PSR_SINK_INACTIVE))), -+ 1500, DP_TIMEOUT_PSR_LOOP_MS * 1000); - if (ret) { - dev_warn(dp->dev, "Failed to apply PSR %d\n", ret); - return ret; -diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c -index 14d73fb1dd15b..f895ef1939fa0 100644 ---- a/drivers/gpu/drm/bridge/analogix/anx7625.c -+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c -@@ -720,7 +720,7 @@ static int edid_read(struct anx7625_data *ctx, - ret = sp_tx_aux_rd(ctx, 0xf1); - - if (ret) { -- sp_tx_rst_aux(ctx); -+ ret = sp_tx_rst_aux(ctx); - DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n"); - } else { - ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client, -@@ -735,7 +735,7 @@ static int edid_read(struct anx7625_data *ctx, - if (cnt > EDID_TRY_CNT) - return -EIO; - -- return 0; -+ return ret; - } - - static int segments_edid_read(struct anx7625_data *ctx, -@@ -785,17 +785,18 @@ static int segments_edid_read(struct anx7625_data *ctx, - if (cnt > EDID_TRY_CNT) - return -EIO; - -- return 0; -+ return ret; - } - - static int sp_tx_edid_read(struct anx7625_data *ctx, - u8 *pedid_blocks_buf) - { -- u8 offset, edid_pos; -+ u8 offset; -+ int edid_pos; - int count, blocks_num; - u8 pblock_buf[MAX_DPCD_BUFFER_SIZE]; - u8 i, j; -- u8 g_edid_break = 0; -+ int g_edid_break = 0; - int ret; - struct device *dev = &ctx->client->dev; - -@@ -826,7 +827,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, - g_edid_break = edid_read(ctx, offset, - pblock_buf); - -- if (g_edid_break) -+ if (g_edid_break < 0) - break; - - memcpy(&pedid_blocks_buf[offset], -@@ -887,7 +888,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, - } - - /* Reset aux channel */ -- sp_tx_rst_aux(ctx); -+ ret = sp_tx_rst_aux(ctx); -+ if (ret < 0) { -+ DRM_DEV_ERROR(dev, "Failed to reset aux channel!\n"); -+ return ret; -+ } - - return (blocks_num + 1); - } -diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c -index e6e331071a00d..dd57b104aec3a 100644 ---- a/drivers/gpu/drm/bridge/cdns-dsi.c -+++ b/drivers/gpu/drm/bridge/cdns-dsi.c -@@ -1286,6 +1286,7 @@ static const struct of_device_id cdns_dsi_of_match[] = { - { .compatible = "cdns,dsi" }, - { }, - }; -+MODULE_DEVICE_TABLE(of, cdns_dsi_of_match); - - static struct platform_driver cdns_dsi_platform_driver = { - .probe = cdns_dsi_drm_probe, -diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c -index a6151db955868..d7eedf35e8415 100644 ---- a/drivers/gpu/drm/bridge/chipone-icn6211.c -+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c -@@ -14,8 +14,19 @@ - #include - #include - --#include